aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration13
-rw-r--r--Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget13
-rw-r--r--Documentation/ABI/testing/sysfs-socinfo16
-rw-r--r--Documentation/DocBook/Makefile7
-rw-r--r--Documentation/DocBook/db5500_keypad.tmpl91
-rw-r--r--Documentation/DocBook/device-drivers.tmpl17
-rw-r--r--Documentation/DocBook/gpio.tmpl112
-rw-r--r--Documentation/DocBook/i2c.tmpl116
-rw-r--r--Documentation/DocBook/prcmu-fw-api.tmpl109
-rw-r--r--Documentation/DocBook/ste_ff_vibra.tmpl217
-rw-r--r--Documentation/DocBook/stmpe.tmpl115
-rwxr-xr-x[-rw-r--r--]Documentation/DocBook/stylesheet.xsl28
-rw-r--r--Documentation/DocBook/synaptics_rmi4_touchp.tmpl106
-rw-r--r--Documentation/DocBook/tc_keypad.tmpl113
-rw-r--r--Documentation/DocBook/touchp.tmpl104
-rw-r--r--Documentation/DocBook/ux500_usb.tmpl151
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/trace/stm-trace.txt193
-rw-r--r--Makefile6
-rw-r--r--arch/arm/Kconfig15
-rw-r--r--arch/arm/Kconfig.debug9
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/boottime.c46
-rwxr-xr-x[-rw-r--r--]arch/arm/configs/u8500_defconfig260
-rw-r--r--arch/arm/include/asm/cacheflush.h15
-rw-r--r--arch/arm/include/asm/delay.h11
-rw-r--r--arch/arm/include/asm/elf.h4
-rw-r--r--arch/arm/include/asm/io.h6
-rw-r--r--arch/arm/include/asm/outercache.h14
-rw-r--r--arch/arm/include/asm/setup.h21
-rw-r--r--arch/arm/include/asm/smp_twd.h8
-rw-r--r--arch/arm/include/asm/system.h2
-rw-r--r--arch/arm/kernel/armksyms.c4
-rw-r--r--arch/arm/kernel/elf.c6
-rw-r--r--arch/arm/kernel/machine_kexec.c11
-rw-r--r--arch/arm/kernel/process.c11
-rw-r--r--arch/arm/kernel/return_address.c4
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/kernel/smp_twd.c114
-rw-r--r--arch/arm/lib/delay.S69
-rw-r--r--arch/arm/lib/delay.c81
-rw-r--r--arch/arm/mach-ux500/Kconfig87
-rw-r--r--arch/arm/mach-ux500/Kconfig-arch142
-rw-r--r--arch/arm/mach-ux500/Makefile67
-rw-r--r--arch/arm/mach-ux500/board-mop500-bm.c422
-rw-r--r--arch/arm/mach-ux500/board-mop500-bm.h24
-rwxr-xr-xarch/arm/mach-ux500/board-mop500-cyttsp.c226
-rw-r--r--arch/arm/mach-ux500/board-mop500-mcde.c693
-rw-r--r--arch/arm/mach-ux500/board-mop500-mcde.h24
-rw-r--r--arch/arm/mach-ux500/board-mop500-pins.c863
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.c308
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.h6
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c126
-rw-r--r--arch/arm/mach-ux500/board-mop500-stm.c203
-rw-r--r--arch/arm/mach-ux500/board-mop500-stuib.c119
-rw-r--r--arch/arm/mach-ux500/board-mop500-u8500uib.c100
-rw-r--r--arch/arm/mach-ux500/board-mop500-uib.c81
-rw-r--r--arch/arm/mach-ux500/board-mop500.c880
-rw-r--r--arch/arm/mach-ux500/board-mop500.h62
-rw-r--r--arch/arm/mach-ux500/board-pins-sleep-force.c267
-rw-r--r--arch/arm/mach-ux500/board-pins-sleep-force.h38
-rw-r--r--arch/arm/mach-ux500/board-u5500-bm.c495
-rw-r--r--arch/arm/mach-ux500/board-u5500-bm.h26
-rwxr-xr-xarch/arm/mach-ux500/board-u5500-cyttsp.c145
-rw-r--r--arch/arm/mach-ux500/board-u5500-mcde.c431
-rw-r--r--arch/arm/mach-ux500/board-u5500-pins.c193
-rw-r--r--arch/arm/mach-ux500/board-u5500-regulators.c182
-rw-r--r--arch/arm/mach-ux500/board-u5500-sdi.c237
-rw-r--r--arch/arm/mach-ux500/board-u5500.c604
-rw-r--r--arch/arm/mach-ux500/board-u5500.h30
-rw-r--r--arch/arm/mach-ux500/board-ux500-usb.h13
-rw-r--r--arch/arm/mach-ux500/clock-db5500.c715
-rw-r--r--arch/arm/mach-ux500/clock-db8500.c1056
-rw-r--r--arch/arm/mach-ux500/clock-debug.c237
-rw-r--r--arch/arm/mach-ux500/clock.c997
-rw-r--r--arch/arm/mach-ux500/clock.h259
-rw-r--r--arch/arm/mach-ux500/cpu-db5500.c101
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c163
-rw-r--r--arch/arm/mach-ux500/cpu.c17
-rw-r--r--arch/arm/mach-ux500/dcache.c254
-rw-r--r--arch/arm/mach-ux500/devices-common.c94
-rw-r--r--arch/arm/mach-ux500/devices-common.h33
-rw-r--r--arch/arm/mach-ux500/devices-db5500.c280
-rw-r--r--arch/arm/mach-ux500/devices-db5500.h45
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c520
-rw-r--r--arch/arm/mach-ux500/devices-db8500.h5
-rw-r--r--arch/arm/mach-ux500/devices.c45
-rw-r--r--arch/arm/mach-ux500/dma-db5500.c139
-rw-r--r--arch/arm/mach-ux500/dma-db8500.c312
-rw-r--r--arch/arm/mach-ux500/hotplug.c32
-rw-r--r--arch/arm/mach-ux500/hwmem-int.c165
-rw-r--r--arch/arm/mach-ux500/hwreg.c651
-rw-r--r--arch/arm/mach-ux500/include/mach/ab8500_gpadc.h36
-rw-r--r--arch/arm/mach-ux500/include/mach/abx500-accdet.h353
-rw-r--r--arch/arm/mach-ux500/include/mach/context.h86
-rw-r--r--arch/arm/mach-ux500/include/mach/crypto-ux500.h19
-rw-r--r--arch/arm/mach-ux500/include/mach/db5500-keypad.h42
-rw-r--r--arch/arm/mach-ux500/include/mach/db5500-regs.h3
-rw-r--r--arch/arm/mach-ux500/include/mach/db8500-regs.h15
-rw-r--r--arch/arm/mach-ux500/include/mach/dcache.h26
-rw-r--r--arch/arm/mach-ux500/include/mach/devices.h21
-rw-r--r--arch/arm/mach-ux500/include/mach/gpio.h15
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h45
-rw-r--r--arch/arm/mach-ux500/include/mach/hcl_defs.h252
-rw-r--r--arch/arm/mach-ux500/include/mach/id.h47
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-board-mop500.h4
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-board-u5500.h11
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-db5500.h30
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs.h29
-rw-r--r--arch/arm/mach-ux500/include/mach/pm-timer.h30
-rw-r--r--arch/arm/mach-ux500/include/mach/pm.h121
-rw-r--r--arch/arm/mach-ux500/include/mach/prcmu-debug.h23
-rw-r--r--arch/arm/mach-ux500/include/mach/reboot_reasons.h47
-rw-r--r--arch/arm/mach-ux500/include/mach/sensors1p.h24
-rw-r--r--arch/arm/mach-ux500/include/mach/setup.h1
-rw-r--r--arch/arm/mach-ux500/include/mach/ste-dma40-db5500.h (renamed from arch/arm/mach-ux500/ste-dma40-db5500.h)12
-rw-r--r--arch/arm/mach-ux500/include/mach/ste-dma40-db8500.h (renamed from arch/arm/mach-ux500/ste-dma40-db8500.h)21
-rw-r--r--arch/arm/mach-ux500/include/mach/suspend.h20
-rw-r--r--arch/arm/mach-ux500/include/mach/system.h8
-rw-r--r--arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h48
-rw-r--r--arch/arm/mach-ux500/include/mach/timex.h1
-rw-r--r--arch/arm/mach-ux500/include/mach/usb.h10
-rw-r--r--arch/arm/mach-ux500/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-ux500/l2x0-prefetch.c160
-rw-r--r--arch/arm/mach-ux500/mbox-db5500.c565
-rw-r--r--arch/arm/mach-ux500/pins-db8500.h72
-rw-r--r--arch/arm/mach-ux500/pins.c214
-rw-r--r--arch/arm/mach-ux500/pins.h45
-rw-r--r--arch/arm/mach-ux500/pm/Kconfig68
-rw-r--r--arch/arm/mach-ux500/pm/Makefile12
-rw-r--r--arch/arm/mach-ux500/pm/context-db5500.c404
-rw-r--r--arch/arm/mach-ux500/pm/context-db8500.c456
-rw-r--r--arch/arm/mach-ux500/pm/context.c978
-rw-r--r--arch/arm/mach-ux500/pm/context_arm.S385
-rw-r--r--arch/arm/mach-ux500/pm/performance.c225
-rw-r--r--arch/arm/mach-ux500/pm/pm.c231
-rw-r--r--arch/arm/mach-ux500/pm/prcmu-qos-power.c702
-rw-r--r--arch/arm/mach-ux500/pm/runtime.c355
-rw-r--r--arch/arm/mach-ux500/pm/scu.h25
-rw-r--r--arch/arm/mach-ux500/pm/suspend.c242
-rw-r--r--arch/arm/mach-ux500/pm/suspend_dbg.c163
-rw-r--r--arch/arm/mach-ux500/pm/suspend_dbg.h63
-rw-r--r--arch/arm/mach-ux500/pm/timer.c189
-rw-r--r--arch/arm/mach-ux500/pm/usecase_gov.c943
-rw-r--r--arch/arm/mach-ux500/prcmu-debug.c555
-rw-r--r--arch/arm/mach-ux500/product.c107
-rw-r--r--arch/arm/mach-ux500/product.h26
-rw-r--r--arch/arm/mach-ux500/reboot_reasons.c77
-rw-r--r--arch/arm/mach-ux500/regulator-u5500.h20
-rw-r--r--arch/arm/mach-ux500/sensors1p.c298
-rw-r--r--arch/arm/mach-ux500/tee_service_svp.c66
-rw-r--r--arch/arm/mach-ux500/tee_ta_start_modem_svp.c56
-rw-r--r--arch/arm/mach-ux500/tee_ux500.c95
-rw-r--r--arch/arm/mach-ux500/timer.c19
-rw-r--r--arch/arm/mach-ux500/uart-db8500.c225
-rw-r--r--arch/arm/mach-ux500/usb.c48
-rw-r--r--arch/arm/mm/cache-fa.S18
-rw-r--r--arch/arm/mm/cache-v3.S18
-rw-r--r--arch/arm/mm/cache-v4.S18
-rw-r--r--arch/arm/mm/cache-v4wb.S18
-rw-r--r--arch/arm/mm/cache-v4wt.S18
-rw-r--r--arch/arm/mm/cache-v6.S18
-rw-r--r--arch/arm/mm/cache-v7.S94
-rw-r--r--arch/arm/mm/mmu.c14
-rw-r--r--arch/arm/mm/proc-macros.S2
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/plat-nomadik/include/plat/mtu.h2
-rw-r--r--arch/arm/plat-nomadik/include/plat/pincfg.h19
-rw-r--r--arch/arm/plat-nomadik/include/plat/ske.h9
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h8
-rw-r--r--arch/arm/plat-nomadik/timer.c32
-rw-r--r--drivers/Kconfig3
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/amba/bus.c43
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/soc.c79
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c23
-rw-r--r--drivers/cpufreq/Makefile3
-rw-r--r--drivers/cpufreq/cpufreq.c21
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c173
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c303
-rw-r--r--drivers/crypto/Kconfig11
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/ux500/Kconfig29
-rw-r--r--drivers/crypto/ux500/Makefile8
-rw-r--r--drivers/crypto/ux500/cryp/Makefile13
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c418
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h308
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2313
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.c45
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.h31
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irqp.h125
-rw-r--r--drivers/crypto/ux500/cryp/cryp_p.h124
-rw-r--r--drivers/crypto/ux500/hash/Makefile11
-rw-r--r--drivers/crypto/ux500/hash/hash_alg.h353
-rwxr-xr-xdrivers/crypto/ux500/hash/hash_alg_p.h26
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c1814
-rw-r--r--drivers/dma/ste_dma40.c411
-rw-r--r--drivers/dma/ste_dma40_ll.c26
-rw-r--r--drivers/dma/ste_dma40_ll.h11
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-ab8500.c164
-rw-r--r--drivers/gpio/gpio-nomadik.c110
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/mali/Kconfig15
-rw-r--r--drivers/gpu/mali/Makefile10
-rw-r--r--drivers/gpu/mali/mali400ko/.gitignore32
-rw-r--r--drivers/gpu/mali/mali400ko/Makefile102
-rw-r--r--drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface.h236
-rw-r--r--drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface_ref_drv.h31
-rw-r--r--drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_platform.h48
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile346
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.common56
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.platform45
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m300/config.h87
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-direct/config.h94
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-pmu/config.h87
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1/config.h79
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-2/config.h93
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-3/config.h107
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-4/config.h121
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-ux500/config.h109
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c370
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.h18
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_GP2.c1418
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_MALI200.c1187
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_common.h171
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.c892
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.h134
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.c183
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.h99
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_gp.h21
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.c515
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.h25
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem.h17
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_buddy.c1425
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.c2924
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.h66
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.c309
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.h37
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.c348
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.h145
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_pp.h21
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.c240
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.h46
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.c2032
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.h355
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_session_manager.h19
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_subsystem.h107
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.c207
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.h44
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_vsync.c29
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk.h1620
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_bitops.h166
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_list.h184
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_mali.h252
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_uk_types.h1148
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_ukk.h710
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.c921
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.h323
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.c243
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.h155
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.c81
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.h62
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.c461
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.h80
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.c718
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.h296
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_system.h66
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/license/gpl/mali_kernel_license.h31
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.c82
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.h19
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_ioctl.h77
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.c565
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.h29
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.c786
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.h19
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_dvfs_pause_resume.c72
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm.h57
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm_testsuite.h37
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_atomics.c55
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.c86
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.h48
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_irq.c228
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_locks.c271
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c578
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_mali.c98
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_math.c22
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_memory.c50
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_misc.c51
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_notification.c191
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_pm.c195
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_specific.h32
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_time.c51
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_timers.c65
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_core.c142
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_gp.c128
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_mem.c336
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_pp.c103
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_profiling.c135
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_vsync.c41
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_wrappers.h70
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/default/mali_platform.c50
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali-runtimepm/mali_platform.c61
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali400-pmu/mali_platform.c388
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali_platform.h100
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/mali_platform.c190
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/ump_kernel_api_hwmem.c159
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/readme.txt28
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_200_regs.h170
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_gp_regs.h219
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.c13
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.h48
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.c13
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.h26
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile115
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile.common20
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/arch-pb-virtex5/config.h18
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_api.c329
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.c387
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.h126
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.c166
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.h91
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_memory_backend.h49
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_ref_drv.c194
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_types.h35
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_osk.h48
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_uk_types.h141
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_ukk.h51
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/license/gpl/ump_kernel_license.h31
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ioctl.h49
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.c409
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.h18
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.c273
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.h23
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.c245
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.h23
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_memory_backend.c70
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_atomics.c27
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_low_level_mem.c243
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_misc.c37
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.c76
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.h35
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.c173
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.h41
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/readme.txt17
-rw-r--r--drivers/gpu/mali/mali400ko/mali.spec57
-rw-r--r--drivers/gpu/mali/mali400ko/x11/mali_drm/README.txt24
-rw-r--r--drivers/gpu/mali/mali400ko/x11/mali_drm/mali/Makefile17
-rw-r--r--drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.c155
-rw-r--r--drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.h25
-rw-r--r--drivers/hwmon/Kconfig38
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/ab5500.c207
-rw-r--r--drivers/hwmon/ab8500.c184
-rw-r--r--drivers/hwmon/abx500.c698
-rw-r--r--drivers/hwmon/abx500.h95
-rw-r--r--drivers/hwmon/dbx500.c402
-rw-r--r--drivers/hwmon/hwmon.c21
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c7
-rw-r--r--drivers/input/keyboard/Kconfig12
-rw-r--r--drivers/input/keyboard/Makefile3
-rw-r--r--drivers/input/keyboard/db5500_keypad.c790
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c645
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c89
-rw-r--r--drivers/input/misc/Kconfig32
-rw-r--r--drivers/input/misc/Makefile3
-rw-r--r--drivers/input/misc/ab5500-accdet.c274
-rw-r--r--drivers/input/misc/ab8500-accdet.c408
-rw-r--r--drivers/input/misc/ab8500-ponkey.c214
-rw-r--r--drivers/input/misc/abx500-accdet.c951
-rw-r--r--drivers/input/misc/ste_ff_vibra.c234
-rw-r--r--drivers/input/touchscreen/Kconfig17
-rw-r--r--drivers/input/touchscreen/Makefile4
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c499
-rwxr-xr-xdrivers/input/touchscreen/cyttsp_core.c2221
-rwxr-xr-xdrivers/input/touchscreen/cyttsp_core.h44
-rwxr-xr-xdrivers/input/touchscreen/cyttsp_ldr.h333
-rwxr-xr-xdrivers/input/touchscreen/cyttsp_spi.c302
-rw-r--r--drivers/input/touchscreen/synaptics_i2c_rmi.c675
-rw-r--r--drivers/leds/Kconfig8
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-ab5500.c811
-rw-r--r--drivers/leds/leds-pwm.c8
-rw-r--r--drivers/mfd/Kconfig46
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/ab5500-core.c80
-rw-r--r--drivers/mfd/ab5500-gpadc.c1180
-rw-r--r--drivers/mfd/ab5500-power.c96
-rw-r--r--drivers/mfd/ab8500-core.c7
-rw-r--r--drivers/mfd/ab8500-debugfs.c1056
-rw-r--r--drivers/mfd/ab8500-denc.c538
-rw-r--r--drivers/mfd/ab8500-gpadc.c12
-rw-r--r--drivers/mfd/ab8500-i2c.c2
-rw-r--r--drivers/mfd/ab8500-sysctrl.c108
-rw-r--r--drivers/mfd/db5500-prcmu-regs.h126
-rw-r--r--drivers/mfd/db5500-prcmu.c1711
-rw-r--r--drivers/mfd/db8500-prcmu.c457
-rw-r--r--drivers/mfd/dbx500-prcmu-regs.h98
-rw-r--r--drivers/mfd/stmpe.c18
-rw-r--r--drivers/mfd/tc35892.c503
-rw-r--r--drivers/mfd/tc3589x.c131
-rw-r--r--drivers/mfd/tps6105x.c1
-rw-r--r--drivers/misc/Kconfig18
-rw-r--r--drivers/misc/Kconfig.stm114
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/ab8500-pwm.c45
-rw-r--r--drivers/misc/bh1780gli.c186
-rw-r--r--drivers/misc/dispdev/Makefile1
-rw-r--r--drivers/misc/dispdev/dispdev.c658
-rw-r--r--drivers/misc/hwmem/Makefile3
-rw-r--r--drivers/misc/hwmem/cache_handler.c510
-rw-r--r--drivers/misc/hwmem/cache_handler.h61
-rw-r--r--drivers/misc/hwmem/contig_alloc.c571
-rw-r--r--drivers/misc/hwmem/hwmem-ioctl.c532
-rw-r--r--drivers/misc/hwmem/hwmem-main.c726
-rw-r--r--drivers/misc/stm.c797
-rw-r--r--drivers/mmc/card/block.c108
-rw-r--r--drivers/mmc/core/mmc.c14
-rw-r--r--drivers/mmc/host/mmci.c185
-rw-r--r--drivers/mmc/host/mmci.h19
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c112
-rw-r--r--drivers/power/Kconfig26
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/ab5500_btemp.c889
-rw-r--r--drivers/power/ab5500_charger.c1814
-rw-r--r--drivers/power/ab5500_fg.c1838
-rw-r--r--drivers/power/ab8500_btemp.c1126
-rw-r--r--drivers/power/ab8500_chargalg.c1961
-rw-r--r--drivers/power/ab8500_charger.c2698
-rw-r--r--drivers/power/ab8500_fg.c2316
-rw-r--r--drivers/power/abx500_chargalg.c1920
-rw-r--r--drivers/regulator/Kconfig35
-rw-r--r--drivers/regulator/Makefile5
-rw-r--r--drivers/regulator/ab5500.c586
-rw-r--r--drivers/regulator/ab8500-debug.c1853
-rw-r--r--drivers/regulator/ab8500-ext.c293
-rw-r--r--drivers/regulator/ab8500.c512
-rw-r--r--drivers/regulator/core.c48
-rw-r--r--drivers/regulator/db5500-prcmu.c330
-rw-r--r--drivers/regulator/db8500-prcmu.c102
-rw-r--r--drivers/regulator/dbx500-prcmu.c282
-rw-r--r--drivers/regulator/dbx500-prcmu.h56
-rw-r--r--drivers/rtc/Kconfig7
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ab.c483
-rw-r--r--drivers/rtc/rtc-ab8500.c132
-rw-r--r--drivers/spi/spi-pl022.c22
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c313
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h1
-rw-r--r--drivers/tee/Kconfig13
-rw-r--r--drivers/tee/Makefile8
-rw-r--r--drivers/tee/tee_driver.c643
-rw-r--r--drivers/tee/tee_service.c17
-rw-r--r--drivers/tty/serial/Kconfig8
-rw-r--r--drivers/tty/serial/amba-pl011.c444
-rw-r--r--drivers/usb/core/hub.c39
-rw-r--r--drivers/usb/core/notify.c7
-rw-r--r--drivers/usb/core/usb.h4
-rw-r--r--drivers/usb/gadget/f_ecm.c24
-rw-r--r--drivers/usb/gadget/f_mass_storage.c27
-rw-r--r--drivers/usb/gadget/f_rndis.c4
-rw-r--r--drivers/usb/musb/Kconfig17
-rw-r--r--drivers/usb/musb/musb_core.c11
-rw-r--r--drivers/usb/musb/musb_host.c83
-rw-r--r--drivers/usb/musb/ux500.c382
-rw-r--r--drivers/usb/musb/ux500_dma.c97
-rw-r--r--drivers/usb/otg/Kconfig9
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/ab5500-usb.c730
-rw-r--r--drivers/usb/otg/ab8500-usb.c713
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/video/Makefile3
-rw-r--r--drivers/video/av8100/Kconfig48
-rw-r--r--drivers/video/av8100/Makefile10
-rw-r--r--drivers/video/av8100/av8100.c4162
-rw-r--r--drivers/video/av8100/av8100_regs.h346
-rw-r--r--drivers/video/av8100/hdmi.c2479
-rw-r--r--drivers/video/av8100/hdmi_loc.h75
-rw-r--r--drivers/video/b2r2/Kconfig134
-rw-r--r--drivers/video/b2r2/Makefile15
-rw-r--r--drivers/video/b2r2/b2r2_blt_main.c3322
-rw-r--r--drivers/video/b2r2/b2r2_core.c2666
-rw-r--r--drivers/video/b2r2/b2r2_core.h222
-rw-r--r--drivers/video/b2r2/b2r2_debug.c343
-rw-r--r--drivers/video/b2r2/b2r2_debug.h104
-rw-r--r--drivers/video/b2r2/b2r2_filters.c372
-rw-r--r--drivers/video/b2r2/b2r2_filters.h71
-rw-r--r--drivers/video/b2r2/b2r2_generic.c3303
-rw-r--r--drivers/video/b2r2/b2r2_generic.h51
-rw-r--r--drivers/video/b2r2/b2r2_global.h119
-rw-r--r--drivers/video/b2r2/b2r2_hw.h707
-rw-r--r--drivers/video/b2r2/b2r2_input_validation.c422
-rw-r--r--drivers/video/b2r2/b2r2_input_validation.h28
-rw-r--r--drivers/video/b2r2/b2r2_internal.h360
-rw-r--r--drivers/video/b2r2/b2r2_kernel_if.c37
-rw-r--r--drivers/video/b2r2/b2r2_mem_alloc.c726
-rw-r--r--drivers/video/b2r2/b2r2_mem_alloc.h140
-rw-r--r--drivers/video/b2r2/b2r2_node_gen.c80
-rw-r--r--drivers/video/b2r2/b2r2_node_split.c3513
-rw-r--r--drivers/video/b2r2/b2r2_node_split.h121
-rw-r--r--drivers/video/b2r2/b2r2_profiler/Makefile3
-rw-r--r--drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c258
-rw-r--r--drivers/video/b2r2/b2r2_profiler_api.h66
-rw-r--r--drivers/video/b2r2/b2r2_profiler_socket.c105
-rw-r--r--drivers/video/b2r2/b2r2_profiler_socket.h22
-rw-r--r--drivers/video/b2r2/b2r2_structures.h226
-rw-r--r--drivers/video/b2r2/b2r2_timing.c22
-rw-r--r--drivers/video/b2r2/b2r2_timing.h22
-rw-r--r--drivers/video/b2r2/b2r2_utils.c391
-rw-r--r--drivers/video/b2r2/b2r2_utils.h51
-rw-r--r--drivers/video/mcde/Kconfig96
-rw-r--r--drivers/video/mcde/Makefile23
-rw-r--r--drivers/video/mcde/display-ab8500.c493
-rw-r--r--drivers/video/mcde/display-av8100.c1577
-rw-r--r--drivers/video/mcde/display-fictive.c63
-rw-r--r--drivers/video/mcde/display-generic_dsi.c309
-rw-r--r--drivers/video/mcde/display-samsung_s6d16d0.c218
-rw-r--r--drivers/video/mcde/display-sony_acx424akp_dsi.c401
-rw-r--r--drivers/video/mcde/display-vuib500-dpi.c215
-rw-r--r--drivers/video/mcde/dsilink_regs.h2037
-rw-r--r--drivers/video/mcde/mcde_bus.c274
-rw-r--r--drivers/video/mcde/mcde_debugfs.c207
-rw-r--r--drivers/video/mcde/mcde_debugfs.h25
-rw-r--r--drivers/video/mcde/mcde_display.c395
-rw-r--r--drivers/video/mcde/mcde_dss.c476
-rw-r--r--drivers/video/mcde/mcde_fb.c898
-rw-r--r--drivers/video/mcde/mcde_hw.c3522
-rw-r--r--drivers/video/mcde/mcde_mod.c69
-rw-r--r--drivers/video/mcde/mcde_regs.h5315
-rw-r--r--drivers/watchdog/Kconfig16
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/mpcore_wdt.c101
-rw-r--r--drivers/watchdog/u8500_wdt.c433
-rw-r--r--firmware/Makefile1
-rw-r--r--firmware/av8100.fw.ihex1281
-rw-r--r--fs/mpage.c2
-rw-r--r--fs/partitions/Kconfig19
-rw-r--r--fs/partitions/Makefile1
-rwxr-xr-xfs/partitions/blkdev_parts.c127
-rwxr-xr-xfs/partitions/blkdev_parts.h14
-rw-r--r--fs/partitions/check.c4
-rw-r--r--include/Kbuild1
-rw-r--r--include/linux/amba/bus.h60
-rw-r--r--include/linux/amba/mmci.h22
-rw-r--r--include/linux/boottime.h89
-rw-r--r--include/linux/cpufreq-dbx500.h14
-rw-r--r--include/linux/cpufreq.h2
-rwxr-xr-xinclude/linux/cyttsp.h114
-rw-r--r--include/linux/dispdev.h66
-rw-r--r--include/linux/gpio/nomadik.h (renamed from arch/arm/plat-nomadik/include/plat/gpio-nomadik.h)1
-rw-r--r--include/linux/hwmem.h593
-rw-r--r--include/linux/hwmon.h5
-rw-r--r--include/linux/input/bu21013.h26
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/leds-ab5500.h35
-rw-r--r--include/linux/leds_pwm.h1
-rw-r--r--include/linux/mfd/ab8500.h36
-rw-r--r--include/linux/mfd/ab8500/bm.h514
-rw-r--r--include/linux/mfd/ab8500/denc-regs.h357
-rw-r--r--include/linux/mfd/ab8500/denc.h82
-rw-r--r--include/linux/mfd/ab8500/gpadc.h2
-rw-r--r--include/linux/mfd/ab8500/gpio.h58
-rw-r--r--include/linux/mfd/ab8500/ux500_chargalg.h38
-rw-r--r--include/linux/mfd/abx500.h8
-rw-r--r--include/linux/mfd/abx500/ab5500-bm.h114
-rw-r--r--include/linux/mfd/abx500/ab5500-gpadc.h69
-rw-r--r--include/linux/mfd/abx500/ab5500.h167
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h38
-rw-r--r--include/linux/mfd/db5500-prcmu.h80
-rw-r--r--include/linux/mfd/db8500-prcmu.h46
-rw-r--r--include/linux/mfd/dbx500-prcmu.h185
-rw-r--r--include/linux/mfd/stmpe.h2
-rw-r--r--include/linux/mfd/tc35892.h146
-rw-r--r--include/linux/mfd/tc3589x.h61
-rw-r--r--include/linux/mmc/card.h14
-rw-r--r--include/linux/mmc/mmc.h6
-rw-r--r--include/linux/regulator/ab5500.h28
-rw-r--r--include/linux/regulator/ab8500-debug.h21
-rw-r--r--include/linux/regulator/ab8500.h50
-rw-r--r--include/linux/regulator/db5500-prcmu.h27
-rw-r--r--include/linux/regulator/dbx500-prcmu.h92
-rw-r--r--include/linux/sys_soc.h50
-rw-r--r--include/linux/tee.h312
-rw-r--r--include/linux/usb.h8
-rw-r--r--include/trace/Kbuild1
-rw-r--r--include/trace/stm.h223
-rw-r--r--include/video/av8100.h549
-rw-r--r--include/video/b2r2_blt.h617
-rw-r--r--include/video/hdmi.h205
-rw-r--r--include/video/mcde.h448
-rw-r--r--include/video/mcde_display-ab8500.h24
-rw-r--r--include/video/mcde_display-av8100.h53
-rw-r--r--include/video/mcde_display-generic_dsi.h35
-rw-r--r--include/video/mcde_display-sony_acx424akp_dsi.h26
-rw-r--r--include/video/mcde_display-vuib500-dpi.h31
-rw-r--r--include/video/mcde_display.h149
-rw-r--r--include/video/mcde_dss.h81
-rw-r--r--include/video/mcde_fb.h65
-rw-r--r--init/Kconfig9
-rw-r--r--init/Makefile1
-rw-r--r--init/boottime.c467
-rw-r--r--init/main.c6
-rw-r--r--kernel/irq/chip.c8
-rw-r--r--kernel/kexec.c12
-rw-r--r--kernel/printk.c12
-rw-r--r--kernel/trace/trace.c11
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_sched_switch.c10
-rwxr-xr-xscripts/setlocalversion42
611 files changed, 155445 insertions, 3939 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration b/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration
new file mode 100644
index 00000000000..ae217d4a3a6
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration
@@ -0,0 +1,13 @@
+What: Attribute for calibrating ST-Ericsson AB8500 Real Time Clock
+Date: Oct 2011
+KernelVersion: 3.0
+Contact: Mark Godfrey <mark.godfrey@stericsson.com>
+Description: The rtc_calibration attribute allows the userspace to
+ calibrate the AB8500.s 32KHz Real Time Clock.
+ Every 60 seconds the AB8500 will correct the RTC's value
+ by adding to it the value of this attribute.
+ The range of the attribute is -127 to +127 in units of
+ 30.5 micro-seconds (half-parts-per-million of the 32KHz clock)
+Users: The /vendor/st-ericsson/base_utilities/core/rtc_calibration
+ daemon uses this interface.
+
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget b/Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget
index d548eaac230..3bf505e7227 100644
--- a/Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget
+++ b/Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget
@@ -19,3 +19,16 @@ Description:
Possible values are:
1 -> ignore the FUA flag
0 -> obey the FUA flag
+
+What: /sys/devices/platform/_UDC_/gadget/host_request
+Date: December 2010
+Contact: Pavan Kondeti <pkondeti@...>
+Description:
+ OTG 2.0 compliant host keeps polling OTG2.0 peripheral
+ for host role. Set host_request flag, which tells host
+ to give up the host role to peripheral.
+
+ 1 -> host role is requested
+ 0 -> no effect (automatically cleared upon reset/disconnect)
+
+ (_UDC_ is the name of the USB Device Controller driver)
diff --git a/Documentation/ABI/testing/sysfs-socinfo b/Documentation/ABI/testing/sysfs-socinfo
new file mode 100644
index 00000000000..afd9da2fa76
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-socinfo
@@ -0,0 +1,16 @@
+What: /sys/socinfo
+Date: March 2011
+contact: Maxime Coquelin <maxime.coquelin-nonst@stericsson.com>
+Description:
+ The /sys/socinfo directory contains information about the
+ System-on-Chip. It is only available if platform implements it.
+ This directory contains two kind of attributes :
+ - common attributes:
+ * machine: the name of the machine.
+ * family: the family name of the SoC
+ - SoC-specific attributes: The SoC vendor can declare attributes
+ to export some strings to user-space, like the serial-number for
+ example.
+
+Users:
+ User-space applications which needs these kind of attributes.
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 66725a3d30d..80c42603ff2 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -14,7 +14,12 @@ DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
80211.xml debugobjects.xml sh.xml regulator.xml \
alsa-driver-api.xml writing-an-alsa-driver.xml \
- tracepoint.xml drm.xml media_api.xml
+ tracepoint.xml drm.xml media_api.xml \
+ i2s.xml msp.xml shrm.xml stmpe.xml touchp.xml \
+ tc_keypad.xml prcmu-fw-api.xml cg2900_fm_radio.xml \
+ synaptics_rmi4_touchp.xml db5500_keypad.xml \
+ u5500_LogicalMailbox.xml cg2900.xml \
+ lsm303dlh.xml ste_ff_vibra.xml ux500_usb.xml
include $(srctree)/Documentation/DocBook/media/Makefile
diff --git a/Documentation/DocBook/db5500_keypad.tmpl b/Documentation/DocBook/db5500_keypad.tmpl
new file mode 100644
index 00000000000..a25fc990516
--- /dev/null
+++ b/Documentation/DocBook/db5500_keypad.tmpl
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="keypad-API-Guide">
+ <bookinfo>
+ <title>DB5500 Keypad</title>
+
+ <authorgroup>
+ <author>
+ <firstname>NaveenKumar</firstname>
+ <surname>Gaddipati</surname>
+ <affiliation>
+ <address>
+ <email>naveen.gaddipati@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <para>
+ License terms: GNU General Public License (GPL) version 2.
+ </para>
+
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the API provided by the keypad
+ driver for internal keypad.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ This db5500-keypad driver doesn't export any functions.
+ </para>
+ </chapter>
+
+ <chapter id="structs">
+ <title>Structures</title>
+ <para>
+ This chapter contains the autogenerated documentation of the
+ structures which are used in the keypad driver.
+ </para>
+!Iarch/arm/mach-ux500/include/mach/db5500-keypad.h
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the
+ internal functions.
+ </para>
+!Idrivers/input/keyboard/db5500_keypad.c
+ </chapter>
+
+ </book>
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index b638e50cf8f..5f70f734e8b 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -437,4 +437,21 @@ X!Idrivers/video/console/fonts.c
!Edrivers/i2c/i2c-core.c
</chapter>
+ <chapter id="hsi">
+ <title>High Speed Synchronous Serial Interface (HSI)</title>
+
+ <para>
+ High Speed Synchronous Serial Interface (HSI) is a
+ serial interface mainly used for connecting application
+ engines (APE) with cellular modem engines (CMT) in cellular
+ handsets.
+
+ HSI provides multiplexing for up to 16 logical channels,
+ low-latency and full duplex communication.
+ </para>
+
+!Iinclude/linux/hsi/hsi.h
+!Edrivers/hsi/hsi.c
+ </chapter>
+
</book>
diff --git a/Documentation/DocBook/gpio.tmpl b/Documentation/DocBook/gpio.tmpl
new file mode 100644
index 00000000000..b69c2770210
--- /dev/null
+++ b/Documentation/DocBook/gpio.tmpl
@@ -0,0 +1,112 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="GPIO">
+ <bookinfo>
+ <title>GPIO1B</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Alessandro</firstname>
+ <surname>Rubini</surname>
+ <affiliation>
+ <address>
+ <email>rubini@unipv.it</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>Prafulla</firstname>
+ <surname>WADASKAR</surname>
+ <affiliation>
+ <address>
+ <email>prafulla.wadaskar@st.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2008-2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This Documentation describes the API's provided by the GPIO controller Driver.
+ </para>
+ <para>
+ Only the API specific to the Ux500 platform is listed here. For the generic GPIO
+ API, see <filename>Documentation/gpio.txt</filename> in the kernel source tree.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title> Public Interface </title>
+ <para>
+ This Section lists the API's provided by the GPIO controller driver to client drivers.
+ </para>
+ <para>
+ Only the API specific to the Ux500 platform is listed here. For the generic GPIO
+ API, see <filename>Documentation/gpio.txt</filename> in the kernel source tree.
+ </para>
+!Earch/arm/plat-nomadik/gpio.c
+ </chapter>
+</book>
diff --git a/Documentation/DocBook/i2c.tmpl b/Documentation/DocBook/i2c.tmpl
new file mode 100644
index 00000000000..8a4cb49204e
--- /dev/null
+++ b/Documentation/DocBook/i2c.tmpl
@@ -0,0 +1,116 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="I2C">
+ <bookinfo>
+ <title>I2C</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Srinidhi</firstname>
+ <surname>Kasagar</surname>
+ <affiliation>
+ <address>
+ <email>srinidhi.kasagar@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>Sachin</firstname>
+ <surname>Verma</surname>
+ <affiliation>
+ <address>
+ <email>sachin.verma@st.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2009-2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This Documentation describes the API's provided by the I2C controller Driver.
+ Since this driver registers the transferfunction with kernel framework, there
+ are only private functions in this I2C bus driver. This driver currently
+ works only in master mode and does 7 bit adderssing only. There is no support
+ for 10 bit addressing. The driver currently supports standard mode (100KHz)
+ and Fast mode (400KHz) operation.
+ </para>
+ </chapter>
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ Not Applicable
+ </para>
+ </chapter>
+
+ <chapter id="private">
+ <title>Private Functions</title>
+ <para>
+ This Section lists the functions used internally by the I2C controller driver.
+ </para>
+!Idrivers/i2c/busses/i2c-nomadik.c
+ </chapter>
+
+</book>
diff --git a/Documentation/DocBook/prcmu-fw-api.tmpl b/Documentation/DocBook/prcmu-fw-api.tmpl
new file mode 100644
index 00000000000..445a277933c
--- /dev/null
+++ b/Documentation/DocBook/prcmu-fw-api.tmpl
@@ -0,0 +1,109 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="STw4500">
+ <bookinfo>
+ <title>PRCMU Driver</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Sudeep Karkada</firstname>
+ <surname>Nagesha</surname>
+ <affiliation>
+ <address>
+ <email>sudeepkarkada.nagesha@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2009-2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the API provided by the PRCMU firmware interface driver.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="enum">
+ <title>Enumerations</title>
+ <para>
+ This chapter contains the autogenerated documentation of the structures
+ and enumerations which are used in the PRCMU firmware interface driver.
+ It is also required by the client drivers.
+ </para>
+!Iinclude/linux/mfd/dbx500-prcmu.h
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the kernel
+ API functions which are exported to the client drivers.
+ </para>
+!Edrivers/mfd/db8500-prcmu.c
+ </chapter>
+
+
+ </book>
diff --git a/Documentation/DocBook/ste_ff_vibra.tmpl b/Documentation/DocBook/ste_ff_vibra.tmpl
new file mode 100644
index 00000000000..cf5f159bed0
--- /dev/null
+++ b/Documentation/DocBook/ste_ff_vibra.tmpl
@@ -0,0 +1,217 @@
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="STE-Force-Feedback-Vibrator-API-Guide">
+ <bookinfo>
+ <title>Force Feedback Vibrator Driver</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Marcin</firstname>
+ <surname>Mielczarczyk</surname>
+ <affiliation>
+ <address>
+ <email>marcin.mielczarczyk@tieto.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+ <toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the implementation of ST-Ericsson's
+ Force Feedback Vibrator driver for the ST-Ericsson Linux platforms.
+ </para>
+ </chapter>
+
+ <chapter id="gettingstarted">
+ <title>Getting Started</title>
+ <para>
+ There are no special compilation flags needed to build the
+ Force Feedback Vibrator driver.
+ </para>
+
+ <section id="basic-tutorial">
+ <title>Basic Tutorial</title>
+ <para>
+ To enable the Force Feedback Vibrator driver using Kconfig, go to
+ <constant> Device Drivers -&gt; Input Device Support -&gt; Miscellaneous devices </constant>
+ and enable the following:
+ <itemizedlist>
+ <listitem><para>ST-Ericsson Force Feedback Vibrator</para></listitem>
+ </itemizedlist>
+ </para>
+ </section>
+
+ </chapter>
+
+ <chapter id="concepts">
+ <title>Concepts</title>
+ <para>
+ Vibrator driver registers as memless force feedback input device.
+ </para>
+ </chapter>
+
+ <chapter id="driver-configuration">
+ <title>Driver Configuration and Interaction</title>
+ <para>
+ There are no configuration parameters for Force Feedback Vibrator driver.
+ </para>
+ <section id="driver-implemented-operations">
+ <title>Implemented operations in driver</title>
+ <para>
+ All available operations are provided by Memless Input Device class driver.
+ </para>
+ <para>
+ <table>
+ <title> Supported device driver operations </title>
+ <tgroup cols="2"><tbody>
+ <row><entry> open </entry> <entry> Calls ste_ff_vibra_open() function which initializaes workqueue </entry> </row>
+ <row><entry> close </entry> <entry> Calls ste_ff_vibra_close() function which cancels and destroys workqueue </entry> </row>
+ </tbody></tgroup>
+ </table>
+ </para>
+ </section>
+ <section id="driver-loading">
+ <title>Driver loading parameters</title>
+ <para>
+ Not Applicable.
+ </para>
+ </section>
+ <section id="driver-ioctl">
+ <title>Driver IO Control</title>
+ <para>
+ Not Applicable.
+ </para>
+ </section>
+
+ <section id="driver-sysfs">
+ <title>Driver Interaction with Sysfs</title>
+ <para>
+ Not Applicable.
+ </para>
+ </section>
+ <section id="driver-proc">
+ <title>Driver Interaction using /proc filesystem</title>
+ <para>
+ Not Applicable.
+ </para>
+ </section>
+
+ <section id="driver-other">
+ <title>Other means for Driver Interaction</title>
+ <para>
+ Not Applicable.
+ </para>
+ </section>
+
+ <section id="driver-node">
+ <title>Driver Node File</title>
+ <para>
+ Force Feedback Vibrator driver provides following node files:
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>eventX - Force Feedback Vibrator node file</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/input/eventX</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>Node file of Force Feedback Vibrator driver</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+ </section>
+
+
+ </chapter>
+
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None.</term>
+ <listitem>
+ <para>
+ </para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </para>
+ </chapter>
+
+<chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ Not Applicable.
+ </para>
+
+</chapter>
+
+<chapter id="internal-functions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal functions.
+ </para>
+!Edrivers/input/misc/ste_ff_vibra.c
+</chapter>
+
+</book>
diff --git a/Documentation/DocBook/stmpe.tmpl b/Documentation/DocBook/stmpe.tmpl
new file mode 100644
index 00000000000..9e64a00f6b3
--- /dev/null
+++ b/Documentation/DocBook/stmpe.tmpl
@@ -0,0 +1,115 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="STMPE MFD devices">
+ <bookinfo>
+ <title>STMPE IO-Port Expander guide</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Rabin</firstname>
+ <surname>Vincent</surname>
+ <affiliation>
+ <address>
+ <email>rabin.vincent@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License version 2 as published by the Free Software Foundation.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the driver for STMicroelectronics
+ STMPExxxx port expander devices.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None.</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ List of public interfaces in stmpe driver
+ </para>
+!Edrivers/mfd/stmpe.c
+ </chapter>
+
+ <chapter id="private">
+ <title>Private Functions</title>
+ <para>
+ STMPE Keypad driver
+ STMPE GPIO driver
+ </para>
+ <section id="stmpe-keypad.c">
+ <title>stmpe-keypad.c</title>
+!Idrivers/input/keyboard/stmpe-keypad.c
+ </section>
+ </chapter>
+
+ <chapter id="Other">
+ <title>Other Data Structures</title>
+ <para>
+ This Section lists some of the Data structure used by the stmpe driver and client drivers.
+ </para>
+!Iinclude/linux/mfd/stmpe.h
+!Idrivers/mfd/stmpe.h
+</chapter>
+</book>
diff --git a/Documentation/DocBook/stylesheet.xsl b/Documentation/DocBook/stylesheet.xsl
index 85b25275196..b2769ce5c8f 100644..100755
--- a/Documentation/DocBook/stylesheet.xsl
+++ b/Documentation/DocBook/stylesheet.xsl
@@ -1,10 +1,18 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<stylesheet xmlns="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<param name="chunk.quietly">1</param>
-<param name="funcsynopsis.style">ansi</param>
-<param name="funcsynopsis.tabular.threshold">80</param>
-<param name="callout.graphics">0</param>
-<!-- <param name="paper.type">A4</param> -->
-<param name="generate.section.toc.level">2</param>
-<param name="use.id.as.filename">1</param>
-</stylesheet>
+<?xml version='1.0'?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ xmlns:fo="http://www.w3.org/1999/XSL/Format"
+ version="1.0">
+ <xsl:param name="use.id.as.filename" select="'1'"/>
+ <xsl:param name="admon.graphics" select="'1'"/>
+ <xsl:param name="admon.graphics.path"></xsl:param>
+ <xsl:param name="chunk.section.depth" select="2"></xsl:param>
+ <xsl:param name="chunk.quietly">1</xsl:param>
+ <xsl:param name="html.stylesheet"
+ select="'style.css'"/>
+ <xsl:param name="section.autolabel" select="1"/>
+ <xsl:param name="table.section.depth" select="1"/>
+ <xsl:param name="toc.section.depth" select="5"/>
+ <xsl:template name="user.header.content">
+ <link href="../style.css" title="walsh" rel="stylesheet" type="text/css"/>
+ </xsl:template>
+</xsl:stylesheet> \ No newline at end of file
diff --git a/Documentation/DocBook/synaptics_rmi4_touchp.tmpl b/Documentation/DocBook/synaptics_rmi4_touchp.tmpl
new file mode 100644
index 00000000000..bc104eb4840
--- /dev/null
+++ b/Documentation/DocBook/synaptics_rmi4_touchp.tmpl
@@ -0,0 +1,106 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="Synaptics RMI4 API Guide">
+ <bookinfo>
+ <title>Synaptics RMI4 Touch screen</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Naveen Kumar</firstname>
+ <surname>Gaddipati</surname>
+ <affiliation>
+ <address>
+ <email>naveen.gaddipati@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the functions provided by the
+ driver of touch panel for Synaptics RMI4 controller
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ Not Applicable.
+ </para>
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal
+ functions of the Tocuh panel driver.
+ </para>
+!Idrivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+ </chapter>
+
+ </book>
diff --git a/Documentation/DocBook/tc_keypad.tmpl b/Documentation/DocBook/tc_keypad.tmpl
new file mode 100644
index 00000000000..3f2630d9d6c
--- /dev/null
+++ b/Documentation/DocBook/tc_keypad.tmpl
@@ -0,0 +1,113 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="TC35893">
+ <bookinfo>
+ <title>TC35893 Keypad</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Jayeeta</firstname>
+ <surname>Banerjee</surname>
+ <affiliation>
+ <address>
+ <email>jayeeta.banerjee@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the API provided by the keypad driver for TC35893 controller
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="structs">
+ <title>Structures</title>
+ <para>
+ This chapter contains the autogenerated documentation of the structures which are
+ used in the keypad driver.
+ </para>
+!Iinclude/linux/mfd/tc3589x.h
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ Not Applicable.
+ </para>
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal functions.
+ </para>
+!Idrivers/input/keyboard//tc3589x-keypad.c
+ </chapter>
+
+ </book>
diff --git a/Documentation/DocBook/touchp.tmpl b/Documentation/DocBook/touchp.tmpl
new file mode 100644
index 00000000000..4301b23bfc0
--- /dev/null
+++ b/Documentation/DocBook/touchp.tmpl
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="bu21013_ts">
+ <bookinfo>
+ <title>Touch screen ROHM BU21013MWV</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Naveen Kumar</firstname>
+ <surname>Gaddipati</surname>
+ <affiliation>
+ <address>
+ <email>naveen.gaddipati@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2009</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the functions provided by the driver of touch panel for BU21013 controller
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ Not Applicable.
+ </para>
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal functions of the Tocuh panel driver.
+ </para>
+!Idrivers/input/touchscreen/bu21013_ts.c
+ </chapter>
+
+ </book>
diff --git a/Documentation/DocBook/ux500_usb.tmpl b/Documentation/DocBook/ux500_usb.tmpl
new file mode 100644
index 00000000000..71b744386d4
--- /dev/null
+++ b/Documentation/DocBook/ux500_usb.tmpl
@@ -0,0 +1,151 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="USB-FUNCTION-Guide">
+ <bookinfo>
+ <title>USB Driver Function guide</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Praveena</firstname>
+ <surname>Nadahally</surname>
+ <affiliation>
+ <address>
+ <email>praveen.nadahally@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>Rajaram</firstname>
+ <surname>Ragupathy</surname>
+ <affiliation>
+ <address>
+ <email>ragupathy.rajaram@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>SakethRam</firstname>
+ <surname>Bommisetti</surname>
+ <affiliation>
+ <address>
+ <email>sakethram.bommisetti@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2011</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Connectivity</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License version 2 as published by the Free Software Foundation.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes ST-Ericsson's adaptation on USB external DMA and communication between Mentor USB IP controller and the USB
+ transreceiver
+ </para>
+ </chapter>
+
+ <chapter id="concepts">
+ <title>Concepts</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ In ST-Ericsson's USB driver, the open source linux gadget stack and Mentor IP USB 2.0 driver is used. Since the USB Transceiver and Mentor USB IP controller are on different hardware, API's are defined for the communication between them. These API's are available in ux500.c file.
+ The ST-Ericsson's USB driver doesn't have the internal DMA dedicated for USB. So, the external system DMA is used. The integration of external DMA with the mentor chip is available in ux500_dma.c file.
+ Changes have been made in the musb_core.c file where endpoints are configured as per the platform and also integrated DMA specific chnages in the musb_gadget.c file.
+ <!-- TODO: A brief introduction about the concepts
+ which are introduced by the driver.
+ Remove this chapter completely if there are no
+ special concepts introduced by this driver.
+ Do NOT change the chapter id or title! -->
+ <!-- TODO: This guideline for this chapter may be extended
+ during the user-guide guidelines drop. -->
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ <variablelist>
+
+ <varlistentry>
+ <term>None.</term>
+ <listitem>
+ <para>
+ </para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </para>
+ </chapter>
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ The musb driver doesn't export any functions.
+ </para>
+ </chapter>
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ List of internal functions
+ </para>
+ <!-- Do NOT change the chapter id or title! -->
+ <!-- TODO: Replace with link to appropriate headerfile(s),
+ source file(s), or both. One per row, ensure the
+ exclamation mark is on the first column! If no
+ appropriate header or source file exist describing a public interface,
+ replace the inclusion with a paragraph containing the text
+ "Not Applicable"-->
+ <section id="ux500_dma.c">
+ <title>ux500_dma.c</title>
+!Idrivers/usb/musb/ux500_dma.c
+ </section>
+ <section id="ux500.c">
+ <title>ux500.c</title>
+!Idrivers/usb/musb/ux500.c
+ </section>
+</chapter>
+</book>
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 54078ed96b3..af76fdef604 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -223,6 +223,7 @@ Code Seq#(hex) Include File Comments
'j' 00-3F linux/joystick.h
'k' 00-0F linux/spi/spidev.h conflict!
'k' 00-05 video/kyro.h conflict!
+'k' 10-17 linux/hsi/hsi_char.h HSI character device
'l' 00-3F linux/tcfs_fs.h transparent cryptographic file system
<http://web.archive.org/web/*/http://mikonos.dia.unisa.it/tcfs>
'l' 40-7F linux/udf_fs_i.h in development:
diff --git a/Documentation/trace/stm-trace.txt b/Documentation/trace/stm-trace.txt
new file mode 100644
index 00000000000..cd73c2b87b7
--- /dev/null
+++ b/Documentation/trace/stm-trace.txt
@@ -0,0 +1,193 @@
+ MIPI System Trace Module driver
+ ===============================
+
+Copyright (C) ST-Ericsson SA 2011
+ Authors: Pierre Peiffer <pierre dot peiffer at stericsson dot com>
+ Philippe Langlais <philippe dot langlais at linaro dot org>
+ License: The GNU Free Documentation License, Version 1.2
+ (dual licensed under the GPL v2)
+
+Hardware overview
+=================
+ This hardware collects and provides simple tracepoints,
+ so a system processor (in our case the main ARM CPU,
+ or some small CPUs and DSPs) can write some data,
+ up to 8 bytes, into a register and out comes a log entry
+ with a time stamp (20ns resolution) on one of 256 channels. Also
+ hardware tracepoints are supported.
+
+ This module external interface is a pad on the chip
+ which complies to the MIPI System Trace Protocol v1.0
+ (see http://www.mipi.org/specifications/debug)
+ and the actual trace output can be read by an
+ electronic probe, not by software so it cannot be intercepted by
+ the CPU and reach Linux userspace.
+
+ Bandwidth depends on number of lines & bus frequency (for example on ux500
+ SoC 4 lines at max 100MHz eg max 400Mbit/s shared between 7 cores).
+ Transmit FIFO size: 256 samples up to 8 bytes.
+ On ux500 platform there is 2 contiguous STM blocks (eg 512 channels)
+
+Software Overview
+=================
+ Write atomicity and write order on STM trace channels is ensured by the fact
+ we try to allocate one channel by execution thread (no concurrent access).
+ There is 2 modes one lossless but intrusive aka Software mode and
+ another lossy mode less intrusive aka Hardware mode, by default
+ all sources are configured in Hardware mode and enabled.
+ The end of data packet is marked by a time stamp on latest byte(s) only.
+
+Kernel API
+----------
+ Configuration functions:
+ output trace clock frequency, trace mode, output port configuration
+ and enable/disable STM trace sources
+ Expose a debugfs interface too for STM trace control
+
+ Alloc/free STM trace channel functions
+
+ Set of low level atomic trace functions for 1, 2, 4 or 8 bytes
+ with & w/o time stamp
+
+ Higher level lockless trace functions:
+ stm_trace_buffer:
+ allocate a channel in 128 highest channels available
+ output the trace buffer with arbitrary length
+ (latest byte(s) automatically time stamped) then free the channel
+ stm_trace_buffer_onchannel:
+ use given channel to output the trace buffer
+ with arbitrary length (latest byte(s) automatically time stamped)
+
+ File IO output console like interface (open, close, write)
+
+ See <trace/stm.h> & drivers/misc/stm.c for more detail
+
+debugfs API
+-----------
+clockdiv:
+ This is used to set or display the current clock divisor
+ that is configured
+
+connection:
+ This is used to set or display the current output connector
+ that is configured (common values, 0 not connected, 1 for default
+ connection, 3 on Ux500 for APE MIPI34 connection)
+
+free_channels:
+ This is used to display the total number of free channels
+
+masters_enable:
+ This sets or displays whether the STM trace sources
+ are activated. Each bits represent the state of corresponding source:
+ 0 for disable or 1 to enable it.
+
+masters_modes:
+ This sets or displays the STM trace sources modes.
+ Each bits represent the mode of corresponding source:
+ 0 for Sofware lossless mode or 1 for Hardware lossy mode.
+
+User API
+--------
+ IOCTLs or debugfs for controls
+ 2 levels API for tracing:
+ - Standard write function, in this case a channel is automatically
+ allocated at first write, after you can channel number
+ with IOCTL STM_GET_CHANNEL_NO
+ - mmap for direct access of all STM trace channels port plus
+ a set of IOCTLs for alloc/free channels, in this case you can
+ write your own lib to easiest its usage
+
+Examples of using the STM
+=========================
+First mount debugfs with:
+mount -t debugfs none /sys/kernel/debug
+
+In a shell scipt
+----------------
+It's as easy as:
+ echo "My trace point" > /dev/stm
+
+To avoid trace overflow, increase STM clock by decreasing the clockdiv with:
+ echo 1 >/sys/kernel/debug/stm/clockdiv # now use DIV2 instead of default DIV8
+If not enough you can disable some sources with:
+ echo YourEnableSources > /sys/kernel/debug/stm/masters_enable
+If always not enough the ultime intrusive way is to change the sources mode
+and set the corresponding sources in Software mode (set corresponding source
+bit to 0) with:
+ echo YourModeSources > /sys/kernel/debug/stm/masters_modes
+ (be aware some source doesn't support Software mode => keep it in HW mode)
+
+NB: on Ux500 platform, first you have to configure STM output port to switch
+APE tracing on MIPI34 connector with:
+ echo 3 > /sys/kernel/debug/stm/connection
+
+In C language
+-------------
+
+The easy way more intrusive (with STM buffer recopy):
+
+#include <trace/stm.h>
+
+int fd, i;
+char buf[1024]; // Try to align this buffer on 64 bits if possible
+
+ fd = open("/dev/stm", O_WRONLY);
+ snprintf(buf, 1024, "STM0 Hello world\n");
+ write(fd, buf, strlen(buf));
+ ioctl(fd, STM_GET_CHANNEL_NO, &i);
+ snprintf(buf, 1024, "Use channel #%d\n", i);
+ write(fd, buf, strlen(buf));
+ close(fd);
+
+NB: You can call open("/dev/stm", O_WRONLY) as many times as necessary
+to allocate a different channel to avoid concurrency in your
+multithreaded application.
+
+The more efficient way, use mmap'ed STM channels memory (to put in a lib):
+
+#include <trace/stm.h>
+
+int fd, i, c, l, maxChannels;
+char buf[1024]; // Try to align this buffer on 64 bits if possible
+volatile struct stm_channel *channels; // mmap'ed channels area
+
+ fd = open("/dev/stm", O_RDWR);
+ ioctl(fd0, STM_GET_NB_MAX_CHANNELS, &maxChannels);
+ channels = (struct stm_channel *)mmap(0, maxChannels*sizeof(*channels),
+ PROT_WRITE, MAP_SHARED, fd, 0);
+ assert(channels != MAP_FAILED);
+
+ if (!ioctl(fd, STM_GET_FREE_CHANNEL, &c)) {
+ l = snprintf(buf, 1024, "STM0 Hello world on channel #%d\n", c);
+ // lazy implementation you have to send buffer by 8 Bytes when possible
+ // and be sure you don't share this channel with others threads
+ for (i=0; i<l; i++) {
+ channels[c].stamp8 = buf[i];
+ }
+ ioctl(fd, STM_RELEASE_CHANNEL, c);
+ }
+ munmap((void *)channels, maxChannels*sizeof(*channels));
+ close(fd);
+
+Kernel Internal usages
+======================
+Dynamically channels dedicated for the kernel are allocated
+in the 128 highest ones
+
+Via menuconfig you can:
+- Duplicate printk output on a STM dedicated channel (255)
+- Have realtime ftrace output to a STM dedicated channel (254),
+ if corresponding TRACER is enabled
+- Have realtime sched context switch & sched wakeup output on dedicated channels
+ (253, 252), if corresponding TRACER is enabled
+- Have Stack Trace on dedicated channels (251)
+- Duplicate trace_printk output on dedicated channels (250 & 249)
+
+
+And in the future:
+------------------
+- Use it in standard kernel tracing infrastucture,
+ possibilities:
+ - Insert other STM trace calls before trace ring buffer write
+ - Substitute time stamping & trace ring buffer by STM
+
diff --git a/Makefile b/Makefile
index adddd11c3b3..2120c7ca0ae 100644
--- a/Makefile
+++ b/Makefile
@@ -443,7 +443,7 @@ asm-generic:
no-dot-config-targets := clean mrproper distclean \
cscope gtags TAGS tags help %docs check% coccicheck \
include/linux/version.h headers_% \
- kernelversion %src-pkg
+ kernelrelease kernelversion %src-pkg
config-targets := 0
mixed-targets := 0
@@ -947,7 +947,7 @@ $(vmlinux-dirs): prepare scripts
# Store (new) KERNELRELASE string in include/config/kernel.release
include/config/kernel.release: include/config/auto.conf FORCE
$(Q)rm -f $@
- $(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))" > $@
+ $(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion -s $(srctree) -t v$(KERNELVERSION))" > $@
# Things we need to do before we recursively start building the kernel
@@ -1460,7 +1460,7 @@ checkstack:
$(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH)
kernelrelease:
- @echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
+ @echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion -s $(srctree) -t v$(KERNELVERSION))"
kernelversion:
@echo $(KERNELVERSION)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b259c7c644e..557078c2ec4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -72,6 +72,10 @@ config KTIME_SCALAR
bool
default y
+config KTIME_SCALAR
+ bool
+ default y
+
config HAVE_TCM
bool
select GENERIC_ALLOCATOR
@@ -904,7 +908,9 @@ config ARCH_U8500
select GENERIC_CLOCKEVENTS
select CLKDEV_LOOKUP
select ARCH_REQUIRE_GPIOLIB
+ select HAVE_CLK
select ARCH_HAS_CPUFREQ
+ select NOMADIK_GPIO
help
Support for ST-Ericsson's Ux500 architecture
@@ -1699,7 +1705,9 @@ source "mm/Kconfig"
config FORCE_MAX_ZONEORDER
int "Maximum zone order" if ARCH_SHMOBILE
range 11 64 if ARCH_SHMOBILE
+ depends on SA1111 || UX500_SOC_DB8500
default "9" if SA1111
+ default "12" if UX500_SOC_DB8500
default "11"
help
The kernel memory allocator divides physically contiguous memory
@@ -2014,6 +2022,13 @@ config KEXEC
initially work for you. It may help to enable device hotplugging
support.
+config CRASH_SWRESET
+ bool "Perform a software reset at a panic (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on KEXEC
+ help
+ If no crash kernel has been loaded, perform a SW reset as plan B.
+
config ATAGS_PROC
bool "Export atags in procfs"
depends on KEXEC
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index c5213e78606..6c44e08b6ef 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -257,6 +257,15 @@ config EARLY_PRINTK
kernel low-level debugging functions. Add earlyprintk to your
kernel parameters to enable this console.
+config PRINTK_LL
+ bool "Use printascii in printk"
+ depends on DEBUG_LL
+ help
+ Say Y here if you want to have printk send its output via the
+ kernel low-level debugging functions. This is useful if you
+ are debugging code that executes before the earlyprintk console
+ is initialized.
+
config OC_ETM
bool "On-chip ETM and ETB"
depends on ARM_AMBA
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 6ea9b6f3607..86d67017c1e 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_ARCH_IXP2000) += uengine.o
obj-$(CONFIG_ARCH_IXP23XX) += uengine.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
+obj-$(CONFIG_BOOTTIME) += boottime.o
diff --git a/arch/arm/common/boottime.c b/arch/arm/common/boottime.c
new file mode 100644
index 00000000000..73e9e04ed37
--- /dev/null
+++ b/arch/arm/common/boottime.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2009-2010
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Store boot times measured during for example u-boot startup.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/boottime.h>
+#include <linux/string.h>
+#include <asm/setup.h>
+
+static u32 bootloader_idle;
+static u32 bootloader_total;
+
+static int __init boottime_parse_tag(const struct tag *tag)
+{
+ int i;
+ char buff[BOOTTIME_MAX_NAME_LEN];
+
+ bootloader_idle = tag->u.boottime.idle;
+ bootloader_total = tag->u.boottime.total;
+
+ for (i = 0; i < tag->u.boottime.num; i++) {
+ snprintf(buff, BOOTTIME_MAX_NAME_LEN, "%s+0x0/0x0",
+ tag->u.boottime.entry[i].name);
+ buff[BOOTTIME_MAX_NAME_LEN - 1] = '\0';
+ boottime_mark_wtime(buff, tag->u.boottime.entry[i].time);
+ }
+
+ return 0;
+}
+
+__tagtable(ATAG_BOOTTIME, boottime_parse_tag);
+
+int boottime_bootloader_idle(void)
+{
+ if (bootloader_total == 0)
+ return 0;
+
+ return (int) ((bootloader_idle) / (bootloader_total / 100));
+}
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 2d7b6e7b727..491b7b932cc 100644..100755
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -1,117 +1,333 @@
CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_BOOTTIME=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_DEFAULT_DEADLINE=y
CONFIG_ARCH_U8500=y
-CONFIG_UX500_SOC_DB5500=y
CONFIG_UX500_SOC_DB8500=y
CONFIG_MACH_HREFV60=y
CONFIG_MACH_SNOWBALL=y
-CONFIG_MACH_U5500=y
+CONFIG_DBX500_PRCMU_DEBUG=y
+CONFIG_DB8500_MLOADER=y
+CONFIG_MCDE_DISPLAY_PRIMARY_32BPP=y
+CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_VSYNC=y
+CONFIG_DISPLAY_SONY_ACX424AKP_DSI_PRIMARY=y
+CONFIG_DISPLAY_AV8100_TERTIARY=y
+CONFIG_DISPLAY_AV8100_TRIPPLE_BUFFER=y
+CONFIG_UX500_SUSPEND=y
+CONFIG_UX500_SUSPEND_STANDBY=y
+CONFIG_UX500_SUSPEND_MEM=y
+CONFIG_UX500_SUSPEND_DBG=y
+CONFIG_UX500_SUSPEND_DBG_WAKE_ON_UART=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_PREEMPT=y
CONFIG_AEABI=y
-CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8"
+CONFIG_HIGHMEM=y
+CONFIG_CMDLINE="root=/dev/ram0 init=init rw console=ttyAMA2,115200n8 mem=256M initrd=0x800000,72M"
+CONFIG_KEXEC=y
+CONFIG_CRASH_SWRESET=y
+CONFIG_CRASH_DUMP=y
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_U8500_CPUIDLE_DEEPEST_STATE=2
+CONFIG_UX500_CPUIDLE_DEBUG=y
+CONFIG_FPE_NWFPE=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
+CONFIG_NET_KEY=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_IPV6_SIT is not set
CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_QUEUE=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_ULOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_PHONET=y
-# CONFIG_WIRELESS is not set
+CONFIG_NET_SCHED=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_REG_DEBUG=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_NET_9P=y
CONFIG_CAIF=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_BLK_DEV_RAM_SIZE=73728
CONFIG_MISC_DEVICES=y
CONFIG_AB8500_PWM=y
CONFIG_SENSORS_BH1780=y
+CONFIG_STE_TRACE_MODEM=y
+CONFIG_DISPDEV=y
+CONFIG_U8500_SIM_DETECT=y
+CONFIG_STM_TRACE=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
CONFIG_NETDEVICES=y
-CONFIG_SMSC911X=y
+CONFIG_TUN=y
CONFIG_SMSC_PHY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_CAIF_TTY=m
+CONFIG_CAIF_HSI=m
+CONFIG_PPP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_MPPE=y
+# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
-CONFIG_KEYBOARD_NOMADIK=y
+CONFIG_KEYBOARD_NOMADIK_SKE=y
CONFIG_KEYBOARD_STMPE=y
CONFIG_KEYBOARD_TC3589X=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_BU21013=y
+CONFIG_TOUCHSCREEN_CYTTSP_CORE=y
+CONFIG_TOUCHSCREEN_CYTTSP_SPI=y
CONFIG_INPUT_MISC=y
+CONFIG_INPUT_AB8500_ACCDET=y
CONFIG_INPUT_AB8500_PONKEY=y
-# CONFIG_SERIO is not set
+CONFIG_INPUT_UINPUT=y
CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_NOMADIK=y
-CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
CONFIG_I2C_NOMADIK=y
CONFIG_SPI=y
+# CONFIG_STM_MSP_SPI is not set
CONFIG_SPI_PL022=y
-CONFIG_GPIO_STMPE=y
+CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_TC3589X=y
+CONFIG_GPIO_AB8500=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_AB8500_BM=y
+CONFIG_SENSORS_AB8500=y
+CONFIG_SENSORS_LSM303DLH=y
+CONFIG_SENSORS_L3G4200D=y
+CONFIG_WATCHDOG=y
+CONFIG_U8500_WATCHDOG_DEBUG=y
+CONFIG_TPS6105X=y
CONFIG_MFD_STMPE=y
CONFIG_MFD_TC3589X=y
CONFIG_AB5500_CORE=y
CONFIG_AB8500_CORE=y
+CONFIG_MFD_DB8500_PRCMU=y
+CONFIG_REGULATOR_DEBUG=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
CONFIG_REGULATOR_AB8500=y
-# CONFIG_HID_SUPPORT is not set
+CONFIG_REGULATOR_DB8500_PRCMU=y
+CONFIG_REGULATOR_AB8500_DEBUG=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+# CONFIG_VIDEO_CAPTURE_DRIVERS is not set
+CONFIG_RADIO_CG2900=y
+CONFIG_DRM=y
+CONFIG_GPU_MALI=y
+CONFIG_FB=y
+CONFIG_FB_MCDE=y
+CONFIG_MCDE_FB_AVOID_REALLOC=y
+CONFIG_MCDE_DISPLAY_SAMSUNG_S6D16D0=y
+# CONFIG_MCDE_DISPLAY_HDMI_FB_AUTO_CREATE is not set
+CONFIG_AV8100_HWTRIG_I2SDAT3=y
+CONFIG_FB_B2R2=y
+CONFIG_B2R2_PLUG_CONF=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_UX500=y
+CONFIG_SND_SOC_UX500_AB5500=y
+CONFIG_SND_SOC_UX500_AB8500=y
+CONFIG_SND_SOC_UX500_CG29XX=y
+CONFIG_SND_SOC_UX500_AV8100=y
+CONFIG_USB=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+CONFIG_USB_MON=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_UX500=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_LIBUSUAL=y
CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_MUSB_HDRC=m
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_CDC_COMPOSITE=m
+CONFIG_USB_G_MULTI=m
+# CONFIG_USB_G_MULTI_RNDIS is not set
+CONFIG_USB_G_HID=m
CONFIG_AB8500_USB=y
CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
CONFIG_MMC_CLKGATE=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_ARMMMCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_LM3530=y
CONFIG_LEDS_LP5521=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AB=y
CONFIG_RTC_DRV_AB8500=y
-CONFIG_RTC_DRV_PL031=y
CONFIG_DMADEVICES=y
CONFIG_STE_DMA40=y
CONFIG_STAGING=y
+CONFIG_AB5500_SIM=y
CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
CONFIG_HSEM_U8500=y
+CONFIG_CG2900=y
+CONFIG_CG2900_CHIP=y
+CONFIG_STLC2690_CHIP=y
+CONFIG_CG2900_UART=y
+CONFIG_CG2900_AUDIO=y
+CONFIG_CG2900_TEST=y
+CONFIG_BT_CG2900=y
+CONFIG_CW1200=m
+CONFIG_CW1200_USE_GPIO_IRQ=y
+CONFIG_CW1200_DEBUGFS=y
+CONFIG_U8500_MMIO=y
+CONFIG_U8500_CM=y
+CONFIG_U8500_FLASH=y
+CONFIG_MODEM=y
+CONFIG_MODEM_U8500=y
+CONFIG_U8500_SHRM=y
+CONFIG_U8500_SHRM_MODEM_SILENT_RESET=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CONFIGFS_FS=m
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
+CONFIG_HFS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_9P_FS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BLKDEV_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_SCHED_DEBUG is not set
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_FTRACE is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_USER=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_UX500=y
+CONFIG_CRYPTO_DEV_UX500_HASH=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=m
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d5d8d5c7268..a4bf3199819 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -84,6 +84,14 @@
* - kaddr - page address
* - size - region size
*
+ * clean_dcache_all()
+ *
+ * Cleans the entire d-cache.
+ *
+ * flush_dcache_all()
+ *
+ * Flushes the entire d-cache.
+ *
* DMA Cache Coherency
* ===================
*
@@ -104,6 +112,9 @@ struct cpu_cache_fns {
void (*coherent_user_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_area)(void *, size_t);
+ void (*clean_dcache_all)(void);
+ void (*flush_dcache_all)(void);
+
void (*dma_map_area)(const void *, size_t, int);
void (*dma_unmap_area)(const void *, size_t, int);
@@ -124,6 +135,8 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
+#define __cpuc_clean_dcache_all cpu_cache.clean_dcache_all
+#define __cpuc_flush_dcache_all cpu_cache.flush_dcache_all
/*
* These are private to the dma-mapping API. Do not use directly.
@@ -144,6 +157,8 @@ extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_area(void *, size_t);
+extern void __cpuc_clean_dcache_all(void);
+extern void __cpuc_flush_dcache_all(void);
/*
* These are private to the dma-mapping API. Do not use directly.
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index b2deda18154..91063a3976f 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -8,7 +8,7 @@
#include <asm/param.h> /* HZ */
-extern void __delay(int loops);
+extern void __delay(unsigned long loops);
/*
* This function intentionally does not exist; if you see references to
@@ -40,5 +40,14 @@ extern void __const_udelay(unsigned long);
__const_udelay((n) * ((2199023U*HZ)>>11))) : \
__udelay(n))
+extern void (*delay_fn)(unsigned long);
+
+static inline void set_delay_fn(void (*fn)(unsigned long))
+{
+ delay_fn = fn;
+}
+
+extern void read_current_timer_delay_loop(unsigned long loops);
+
#endif /* defined(_ARM_DELAY_H) */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 0e9ce8d9686..85cf3655914 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -96,8 +96,8 @@ struct elf32_hdr;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-extern int elf_check_arch(const struct elf32_hdr *);
-#define elf_check_arch elf_check_arch
+extern int arm_elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch(x) arm_elf_check_arch((const struct elf32_hdr *)(x))
#define vmcore_elf64_check_arch(x) (0)
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 065d100fa63..0fff50f73b8 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -63,6 +63,12 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
#define MT_DEVICE_CACHED 2
#define MT_DEVICE_WC 3
/*
+ * NOTE : U8500 v1.0/ED cut specific hack.
+ * look at the commit message for more details
+ */
+#define MT_BACKUP_RAM 4
+
+/*
* types 4 onwards can be found in asm/mach/map.h and are undefined
* for ioremap
*/
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 53426c66352..e76b9eae8e4 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -33,6 +33,8 @@ struct outer_cache_fns {
#ifdef CONFIG_OUTER_CACHE_SYNC
void (*sync)(void);
#endif
+ void (*prefetch_enable)(void);
+ void (*prefetch_disable)(void);
void (*set_debug)(unsigned long);
void (*resume)(void);
};
@@ -81,6 +83,18 @@ static inline void outer_resume(void)
outer_cache.resume();
}
+static inline void outer_prefetch_enable(void)
+{
+ if (outer_cache.prefetch_enable)
+ outer_cache.prefetch_enable();
+}
+
+static inline void outer_prefetch_disable(void)
+{
+ if (outer_cache.prefetch_disable)
+ outer_cache.prefetch_disable();
+}
+
#else
static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 915696dd9c7..ad937bc348f 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -143,6 +143,23 @@ struct tag_memclk {
__u32 fmemclk;
};
+/* for automatic boot timing testcases */
+#define ATAG_BOOTTIME 0x41000403
+#define BOOTTIME_MAX_NAME_LEN 64
+#define BOOTTIME_MAX 10
+
+struct boottime_entry {
+ u32 time; /* in us */
+ u8 name[BOOTTIME_MAX_NAME_LEN];
+};
+
+struct tag_boottime {
+ struct boottime_entry entry[BOOTTIME_MAX];
+ u32 idle; /* in us */
+ u32 total; /* in us */
+ u8 num;
+};
+
struct tag {
struct tag_header hdr;
union {
@@ -165,6 +182,10 @@ struct tag {
* DC21285 specific
*/
struct tag_memclk memclk;
+ /*
+ * Boot time
+ */
+ struct tag_boottime boottime;
} u;
};
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
index ef9ffba97ad..7ea2460d9b6 100644
--- a/arch/arm/include/asm/smp_twd.h
+++ b/arch/arm/include/asm/smp_twd.h
@@ -25,4 +25,12 @@ extern void __iomem *twd_base;
void twd_timer_setup(struct clock_event_device *);
void twd_timer_stop(struct clock_event_device *);
+#if defined(CONFIG_HOTPLUG) || defined(CONFIG_CPU_IDLE)
+void twd_save(void);
+void twd_restore(void);
+#else
+static inline void twd_save(void) { }
+static inline void twd_restore(void) { }
+#endif
+
#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 984014b9264..411a02fa4c9 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -103,6 +103,8 @@ extern void cpu_init(void);
void arm_machine_restart(char mode, const char *cmd);
extern void (*arm_pm_restart)(char str, const char *cmd);
+void cpu_idle_wait(void);
+
#define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2)
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 5b0bce61eb6..f0c41295216 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -49,10 +49,6 @@ extern void __aeabi_ulcmp(void);
extern void fpundefinstr(void);
- /* platform dependent support */
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__const_udelay);
-
/* networking */
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_from_user);
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index ddba41d1fcf..cac241a8415 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -4,11 +4,13 @@
#include <linux/binfmts.h>
#include <linux/elf.h>
-int elf_check_arch(const struct elf32_hdr *x)
+int arm_elf_check_arch(const struct elf32_hdr *x)
{
unsigned int eflags;
/* Make sure it's an ARM executable */
+ if (x->e_ident[EI_CLASS] != ELF_CLASS)
+ return 0;
if (x->e_machine != EM_ARM)
return 0;
@@ -35,7 +37,7 @@ int elf_check_arch(const struct elf32_hdr *x)
}
return 1;
}
-EXPORT_SYMBOL(elf_check_arch);
+EXPORT_SYMBOL(arm_elf_check_arch);
void elf_set_personality(const struct elf32_hdr *x)
{
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index e59bbd496c3..2a9c3430115 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -47,6 +47,7 @@ void machine_crash_nonpanic_core(void *unused)
printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
smp_processor_id());
crash_save_cpu(&regs, smp_processor_id());
+ atomic_notifier_call_chain(&crash_percpu_notifier_list, 0, NULL);
flush_cache_all();
atomic_dec(&waiting_for_crash_ipi);
@@ -122,3 +123,13 @@ void machine_kexec(struct kimage *image)
flush_cache_all();
cpu_reset(reboot_code_buffer_phys);
}
+
+void machine_crash_swreset(void)
+{
+ printk(KERN_INFO "Software reset on panic!\n");
+
+ flush_cache_all();
+ outer_flush_all();
+ outer_disable();
+ arm_pm_restart(0, NULL);
+}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 3d0c6fb74ae..caead1019e2 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -187,8 +187,17 @@ void cpu_idle(void)
leds_event(led_idle_start);
while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_is_offline(smp_processor_id()))
+ if (cpu_is_offline(smp_processor_id())) {
+
+ /* NOTE : preempt_count() should be 0 for dying CPU
+ * as the CPU will use this very thread when
+ * it is alive
+ */
+ if (preempt_count())
+ preempt_enable_no_resched();
+
cpu_die();
+ }
#endif
local_irq_disable();
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 8085417555d..0697db65efa 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -58,10 +58,6 @@ void *return_address(unsigned int level)
#else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
-#if defined(CONFIG_ARM_UNWIND)
-#warning "TODO: return_address should use unwind tables"
-#endif
-
void *return_address(unsigned int level)
{
return NULL;
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index ef5640b9e21..ddc7fd3db9c 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -321,8 +321,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
notify_cpu_starting(cpu);
- calibrate_delay();
-
smp_store_cpu_info(cpu);
/*
@@ -477,7 +475,7 @@ static void ipi_timer(void)
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-static void smp_timer_broadcast(const struct cpumask *mask)
+void smp_timer_broadcast(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_TIMER);
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index a8a6682d6b5..be6af1dc4fa 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -10,13 +10,17 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/smp.h>
#include <linux/jiffies.h>
#include <linux/clockchips.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/percpu.h>
#include <asm/smp_twd.h>
#include <asm/localtimer.h>
@@ -25,7 +29,12 @@
/* set up by the platform code */
void __iomem *twd_base;
+static struct clk *twd_clk;
static unsigned long twd_timer_rate;
+static DEFINE_PER_CPU(struct clock_event_device *, twd_ce);
+
+static DEFINE_PER_CPU(u32, twd_ctrl);
+static DEFINE_PER_CPU(u32, twd_load);
static struct clock_event_device __percpu **twd_evt;
@@ -89,6 +98,52 @@ void twd_timer_stop(struct clock_event_device *clk)
disable_percpu_irq(clk->irq);
}
+#ifdef CONFIG_CPU_FREQ
+
+/*
+ * Updates clockevent frequency when the cpu frequency changes.
+ * Called on the cpu that is changing frequency with interrupts disabled.
+ */
+static void twd_update_frequency(void *data)
+{
+ twd_timer_rate = clk_get_rate(twd_clk);
+
+ clockevents_update_freq(__get_cpu_var(twd_ce), twd_timer_rate);
+}
+
+static int twd_cpufreq_transition(struct notifier_block *nb,
+ unsigned long state, void *data)
+{
+ struct cpufreq_freqs *freqs = data;
+
+ /*
+ * The twd clock events must be reprogrammed to account for the new
+ * frequency. The timer is local to a cpu, so cross-call to the
+ * changing cpu.
+ */
+ if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
+ smp_call_function_single(freqs->cpu, twd_update_frequency,
+ NULL, 1);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block twd_cpufreq_nb = {
+ .notifier_call = twd_cpufreq_transition,
+};
+
+static int twd_cpufreq_init(void)
+{
+ if (!IS_ERR_OR_NULL(twd_clk))
+ return cpufreq_register_notifier(&twd_cpufreq_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return 0;
+}
+core_initcall(twd_cpufreq_init);
+
+#endif
+
static void __cpuinit twd_calibrate_rate(void)
{
unsigned long count;
@@ -140,6 +195,27 @@ static irqreturn_t twd_handler(int irq, void *dev_id)
return IRQ_NONE;
}
+static struct clk *twd_get_clock(void)
+{
+ struct clk *clk;
+ int err;
+
+ clk = clk_get_sys("smp_twd", NULL);
+ if (IS_ERR(clk)) {
+ pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk));
+ return clk;
+ }
+
+ err = clk_enable(clk);
+ if (err) {
+ pr_err("smp_twd: clock failed to enable: %d\n", err);
+ clk_put(clk);
+ return ERR_PTR(err);
+ }
+
+ return clk;
+}
+
/*
* Setup the local clock events for a CPU.
*/
@@ -165,7 +241,13 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
}
}
- twd_calibrate_rate();
+ if (!twd_clk)
+ twd_clk = twd_get_clock();
+
+ if (!IS_ERR_OR_NULL(twd_clk))
+ twd_timer_rate = clk_get_rate(twd_clk);
+ else
+ twd_calibrate_rate();
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
@@ -173,15 +255,35 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
clk->rating = 350;
clk->set_mode = twd_set_mode;
clk->set_next_event = twd_set_next_event;
- clk->shift = 20;
- clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift);
- clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
- clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
this_cpu_clk = __this_cpu_ptr(twd_evt);
*this_cpu_clk = clk;
- clockevents_register_device(clk);
+ __get_cpu_var(twd_ce) = clk;
+
+ clockevents_config_and_register(clk, twd_timer_rate,
+ 0xf, 0xffffffff);
enable_percpu_irq(clk->irq, 0);
}
+
+#if defined(CONFIG_HOTPLUG) || defined(CONFIG_CPU_IDLE)
+void twd_save(void)
+{
+ int this_cpu = smp_processor_id();
+
+ per_cpu(twd_ctrl, this_cpu) = __raw_readl(twd_base + TWD_TIMER_CONTROL);
+ per_cpu(twd_load, this_cpu) = __raw_readl(twd_base + TWD_TIMER_LOAD);
+
+}
+
+void twd_restore(void)
+{
+ int this_cpu = smp_processor_id();
+
+ __raw_writel(per_cpu(twd_ctrl, this_cpu),
+ twd_base + TWD_TIMER_CONTROL);
+ __raw_writel(per_cpu(twd_load, this_cpu),
+ twd_base + TWD_TIMER_LOAD);
+}
+#endif
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay.S
deleted file mode 100644
index 3c9a05c8d20..00000000000
--- a/arch/arm/lib/delay.S
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * linux/arch/arm/lib/delay.S
- *
- * Copyright (C) 1995, 1996 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/param.h>
- .text
-
-.LC0: .word loops_per_jiffy
-.LC1: .word (2199023*HZ)>>11
-
-/*
- * r0 <= 2000
- * lpj <= 0x01ffffff (max. 3355 bogomips)
- * HZ <= 1000
- */
-
-ENTRY(__udelay)
- ldr r2, .LC1
- mul r0, r2, r0
-ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
- mov r1, #-1
- ldr r2, .LC0
- ldr r2, [r2] @ max = 0x01ffffff
- add r0, r0, r1, lsr #32-14
- mov r0, r0, lsr #14 @ max = 0x0001ffff
- add r2, r2, r1, lsr #32-10
- mov r2, r2, lsr #10 @ max = 0x00007fff
- mul r0, r2, r0 @ max = 2^32-1
- add r0, r0, r1, lsr #32-6
- movs r0, r0, lsr #6
- moveq pc, lr
-
-/*
- * loops = r0 * HZ * loops_per_jiffy / 1000000
- *
- * Oh, if only we had a cycle counter...
- */
-
-@ Delay routine
-ENTRY(__delay)
- subs r0, r0, #1
-#if 0
- movls pc, lr
- subs r0, r0, #1
- movls pc, lr
- subs r0, r0, #1
- movls pc, lr
- subs r0, r0, #1
- movls pc, lr
- subs r0, r0, #1
- movls pc, lr
- subs r0, r0, #1
- movls pc, lr
- subs r0, r0, #1
- movls pc, lr
- subs r0, r0, #1
-#endif
- bhi __delay
- mov pc, lr
-ENDPROC(__udelay)
-ENDPROC(__const_udelay)
-ENDPROC(__delay)
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
new file mode 100644
index 00000000000..b8d636e8ef8
--- /dev/null
+++ b/arch/arm/lib/delay.c
@@ -0,0 +1,81 @@
+/*
+ * Originally from linux/arch/arm/lib/delay.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/timex.h>
+
+/*
+ * Oh, if only we had a cycle counter...
+ */
+static void delay_loop(unsigned long loops)
+{
+ asm volatile(
+ "1: subs %0, %0, #1 \n"
+ " bhi 1b \n"
+ : /* No output */
+ : "r" (loops)
+ );
+}
+
+#ifdef ARCH_HAS_READ_CURRENT_TIMER
+/*
+ * Assumes read_current_timer() is monotonically increasing
+ * across calls and wraps at most once within MAX_UDELAY_MS.
+ */
+void read_current_timer_delay_loop(unsigned long loops)
+{
+ unsigned long bclock, now;
+
+ read_current_timer(&bclock);
+ do {
+ read_current_timer(&now);
+ } while ((now - bclock) < loops);
+}
+#endif
+
+void (*delay_fn)(unsigned long) = delay_loop;
+
+/*
+ * loops = usecs * HZ * loops_per_jiffy / 1000000
+ */
+void __delay(unsigned long loops)
+{
+ delay_fn(loops);
+}
+EXPORT_SYMBOL(__delay);
+
+/*
+ * 0 <= xloops <= 0x7fffff06
+ * loops_per_jiffy <= 0x01ffffff (max. 3355 bogomips)
+ */
+void __const_udelay(unsigned long xloops)
+{
+ unsigned long lpj;
+ unsigned long loops;
+
+ xloops >>= 14; /* max = 0x01ffffff */
+ lpj = loops_per_jiffy >> 10; /* max = 0x0001ffff */
+ loops = lpj * xloops; /* max = 0x00007fff */
+ loops >>= 6; /* max = 2^32-1 */
+
+ if (likely(loops))
+ __delay(loops);
+}
+EXPORT_SYMBOL(__const_udelay);
+
+/*
+ * usecs <= 2000
+ * HZ <= 1000
+ */
+void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs * ((2199023UL*HZ)>>11));
+}
+EXPORT_SYMBOL(__udelay);
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index a3e0c8692f0..21018ecfd67 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -4,20 +4,25 @@ config UX500_SOC_COMMON
bool
default y
select ARM_GIC
- select HAS_MTU
+ select NOMADIK_GPIO
select ARM_ERRATA_753970
select ARM_ERRATA_754322
+ select SYS_SOC
+ select HAS_MTU
+
+config UX500_SOC_DBX500
+ depends on UX500_SOC_DB5500 || UX500_SOC_DB8500
+ bool
menu "Ux500 SoC"
config UX500_SOC_DB5500
bool "DB5500"
- select MFD_DB5500_PRCMU
+ select UX500_SOC_DBX500
config UX500_SOC_DB8500
bool "DB8500"
- select MFD_DB8500_PRCMU
- select REGULATOR_DB8500_PRCMU
+ select UX500_SOC_DBX500
endmenu
@@ -26,13 +31,13 @@ menu "Ux500 target platform (boards)"
config MACH_U8500
bool "U8500 Development platform"
depends on UX500_SOC_DB8500
- select TPS6105X
help
Include support for the mop500 development platform.
config MACH_HREFV60
bool "U85000 Development platform, HREFv60 version"
depends on UX500_SOC_DB8500
+ select MACH_U8500
help
Include support for the HREFv60 new development platform.
@@ -50,6 +55,24 @@ config MACH_U5500
Include support for the U5500 development platform.
endmenu
+choice
+ prompt "Ux500 UIB Keylayout"
+ default KEYLAYOUT_LAYOUT1
+
+config KEYLAYOUT_LAYOUT1
+ bool "UIB Keylayout 1; for generic users"
+ help
+ Supported keylayout for some numerics, power/call buttons,
+ volume control etc
+
+config KEYLAYOUT_LAYOUT2
+ bool "UIB Keylayout 2; for connectivity users"
+ help
+ Supports keylayout numerics 0-9, left/right/up/down/back/
+ enter keys and special character "."(dot)
+
+endchoice
+
config UX500_DEBUG_UART
int "Ux500 UART to use for low-level debug"
default 2
@@ -57,6 +80,13 @@ config UX500_DEBUG_UART
Choose the UART on which kernel low-level debug messages should be
output.
+config SENSORS1P_MOP
+ tristate "HAL and Proximity sensors support"
+ depends on REGULATOR && (GPIO_STMPE2401 || GPIO_TC35892)
+ help
+ Add support for Osram's SFH7741 Proximity Sensor and Samsumg
+ HED54XXU11 HAL Switch
+
config U5500_MODEM_IRQ
bool "Modem IRQ support"
depends on UX500_SOC_DB5500
@@ -64,11 +94,50 @@ config U5500_MODEM_IRQ
help
Add support for handling IRQ:s from modem side
-config U5500_MBOX
- bool "Mailbox support"
- depends on U5500_MODEM_IRQ
+config DBX500_PRCMU_DEBUG
+ bool "DBX500 PRCMU debug"
+ depends on ((MFD_DB5500_PRCMU || MFD_DB8500_PRCMU) && DEBUG_FS)
+ help
+ Add support for PRCMU debug
+
+config TEE_UX500
+ bool "Trusted Execution Environment (TEE) ux500 hardware support"
+ depends on TEE_SUPPORT
+ default y
+ help
+ Adds TEE hardware support for ux500 platforms.
+
+config TEE_SVP
+ bool "Trusted Execution Environment (TEE) ux500 SVP support"
+ depends on TEE_SUPPORT && UX500_SVP
default y
help
- Add support for U5500 mailbox communication with modem side
+ Adds TEE support for SVP in ux500 platforms.
+
+config UX500_DEBUG_HWREG
+ bool "Debug hardware registers from userspace"
+ depends on (DEBUG_FS && UX500_SOC_DB8500)
+ help
+ Adds various debug files to access registers.
+ This should never ever be used for anything else than debugging.
+
+config UX500_DEBUG_NO_LAUTERBACH
+ bool "Disable clocks needed for Lauterbach debugging"
+ help
+ Disable clocks needed for Lauterbach debugging at boot.
+ If yes, you will reduce the power consumption.
+
+config UX500_L2X0_PREFETCH_CTRL
+ bool "PL310 prefetch control"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && \
+ (TEE_UX500 && CACHE_L2X0)
+ default y
+ help
+ Adds interface to control instruction and data prefetch.
+ Communication with Trustzone is done through TEE driver.
+
+source "arch/arm/mach-ux500/pm/Kconfig"
+
+source "arch/arm/mach-ux500/Kconfig-arch"
endif
diff --git a/arch/arm/mach-ux500/Kconfig-arch b/arch/arm/mach-ux500/Kconfig-arch
new file mode 100644
index 00000000000..d00b176301d
--- /dev/null
+++ b/arch/arm/mach-ux500/Kconfig-arch
@@ -0,0 +1,142 @@
+config U8500_SECURE
+ bool "Support for running in Secure mode"
+ default n
+ help
+ Build the kernel to run in Secure mode.
+
+#Configuration for MCDE setup
+
+if FB_MCDE
+
+menu "Display selection"
+
+config DISPLAY_GENERIC_PRIMARY
+ bool "Generic primary display support"
+ depends on (MACH_U8500 || MACH_U5500)
+ select MCDE_DISPLAY_DSI
+ default y
+
+choice
+ prompt "Display port type"
+ depends on DISPLAY_GENERIC_PRIMARY
+ default DISPLAY_GENERIC_DSI_PRIMARY
+ help
+ Select the kind of display port used for the primary display
+
+config DISPLAY_GENERIC_DSI_PRIMARY
+ bool "DSI display"
+ select MCDE_DISPLAY_GENERIC_DSI
+ help
+ Say yes here when using a DSI display
+
+config MCDE_DISPLAY_DPI_PRIMARY
+ bool "DPI display"
+ select MCDE_DISPLAY_DPI
+ depends on MACH_U8500
+ help
+ Say yes here when using a DPI display
+
+endchoice
+
+choice
+ prompt "Color depth"
+ depends on DISPLAY_GENERIC_PRIMARY
+ default MCDE_DISPLAY_PRIMARY_16BPP
+ help
+ Select color depth for primary display
+
+config MCDE_DISPLAY_PRIMARY_16BPP
+ bool "16 bpp"
+ help
+ 16 bpp color depth
+
+config MCDE_DISPLAY_PRIMARY_32BPP
+ bool "32 bpp"
+ help
+ 32 bpp color depth
+
+endchoice
+
+choice DISPLAY_GENERIC_DSI_PRIMARY_ROTATION
+ prompt "Enable main display rotation"
+ depends on DISPLAY_GENERIC_DSI_PRIMARY
+ default DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_90
+ help
+ Set rotation of main display
+
+config DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_0
+ bool "0 degrees"
+config DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_90
+ bool "90 degrees"
+config DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_180
+ bool "180 degrees"
+config DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_270
+ bool "270 degrees"
+endchoice
+
+config DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE
+ int
+ depends on DISPLAY_GENERIC_DSI_PRIMARY
+ default "0" if DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_0
+ default "90" if DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_90
+ default "180" if DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_180
+ default "270" if DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_270
+
+config DISPLAY_GENERIC_DSI_PRIMARY_VSYNC
+ bool "Enable v-sync for primary display"
+ depends on DISPLAY_GENERIC_DSI_PRIMARY
+ default n
+ help
+ Say yes to enable v-sync for primary display
+
+config DISPLAY_SONY_ACX424AKP_DSI_PRIMARY
+ bool "Sony acx424akp DSI display"
+ select MCDE_DISPLAY_SONY_ACX424AKP_DSI
+ select MCDE_DISPLAY_DSI
+ help
+ Say yes here when using a sony acx424akp DSI display
+
+config DISPLAY_GENERIC_DSI_SECONDARY
+ bool "Sub display support"
+ depends on MACH_U8500
+ select MCDE_DISPLAY_GENERIC_DSI
+ select MCDE_DISPLAY_DSI
+ help
+ Say yes here if sub display exists
+
+config DISPLAY_GENERIC_DSI_SECONDARY_VSYNC
+ bool "Enable v-sync for secondary display"
+ depends on DISPLAY_GENERIC_DSI_SECONDARY
+ help
+ Say yes to enable v-sync for secondary display
+
+config DISPLAY_AB8500_TERTIARY
+ bool "AB8500 TVout display support"
+ depends on MACH_U8500 && !AV8100_SDTV
+ select MCDE_DISPLAY_AB8500_DENC
+ help
+ Say yes here if tv out support
+
+config DISPLAY_AV8100_TERTIARY
+ bool "AV8100 HDMI/CVBS display support"
+ depends on (MACH_U8500 || MACH_U5500)
+ select MCDE_DISPLAY_AV8100
+ select MCDE_DISPLAY_DSI
+ help
+ Say yes here if HDMI output support
+
+config DISPLAY_AV8100_TRIPPLE_BUFFER
+ bool "Enable tripple buffer for HDMI display"
+ depends on DISPLAY_AV8100_TERTIARY
+ help
+ Say yes to enable tripple buffer. You'll get double buffer otherwise
+
+config DISPLAY_FICTIVE
+ bool "DISPLAY fictive"
+ default n
+ ---help---
+ Say Y if you want a fictive display that doesn't access hardware
+
+endmenu
+
+endif
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index 6bd2f451c18..70c483b9dd8 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -1,21 +1,70 @@
#
-# Makefile for the linux kernel, U8500 machine.
+# Makefile for the linux kernel, UX500 machine.
#
-obj-y := clock.o cpu.o devices.o devices-common.o \
- id.o usb.o timer.o
+obj-y := clock.o cpu.o devices.o \
+ devices-common.o id.o pins.o \
+ usb.o reboot_reasons.o timer.o \
+ uart-db8500.o clock-debug.o
+obj-y += pm/
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
-obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o dma-db5500.o
-obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
+
+
+ifeq ($(CONFIG_UX500_SOC_DB5500), y)
+obj-$(CONFIG_UX500_SOC_DBX500) += cpu-db5500.o dma-db5500.o \
+ devices-db5500.o clock-db5500.o
+endif
+ifeq ($(CONFIG_UX500_SOC_DB8500), y)
+obj-$(CONFIG_UX500_SOC_DBX500) += cpu-db8500.o devices-db8500.o \
+ clock-db8500.o dma-db8500.o
+endif
obj-$(CONFIG_MACH_U8500) += board-mop500.o board-mop500-sdi.o \
board-mop500-regulators.o \
board-mop500-uib.o board-mop500-stuib.o \
- board-mop500-u8500uib.o \
- board-mop500-pins.o
-obj-$(CONFIG_MACH_U5500) += board-u5500.o board-u5500-sdi.o
+ board-mop500-u8500uib.o board-mop500-pins.o \
+ board-mop500-bm.o \
+ board-pins-sleep-force.o
+obj-$(CONFIG_SENSORS1P_MOP) += sensors1p.o
+obj-$(CONFIG_MACH_U5500) += board-u5500.o board-u5500-sdi.o \
+ board-u5500-regulators.o \
+ board-u5500-pins.o
+obj-$(CONFIG_U5500_MMIO) += board-u5500-mmio.o
+obj-$(CONFIG_U8500_MMIO) += board-mop500-mmio.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
obj-$(CONFIG_U5500_MODEM_IRQ) += modem-irq-db5500.o
-obj-$(CONFIG_U5500_MBOX) += mbox-db5500.o
+obj-$(CONFIG_TEE_UX500) += tee_ux500.o product.o
+obj-$(CONFIG_TEE_SVP) += tee_service_svp.o
+obj-$(CONFIG_TEE_SVP) += tee_ta_start_modem_svp.o
+obj-$(CONFIG_DB8500_MLOADER) += mloader-db8500.o
+obj-$(CONFIG_U5500_MLOADER) += mloader-db5500.o
+obj-$(CONFIG_UX500_DEBUG_HWREG) += hwreg.o
+obj-$(CONFIG_HWMEM) += hwmem-int.o
+obj-$(CONFIG_UX500_L2X0_PREFETCH_CTRL) += l2x0-prefetch.o
+obj-$(CONFIG_AB5500_BM) += board-u5500-bm.o
+obj-$(CONFIG_DBX500_PRCMU_DEBUG) += prcmu-debug.o
+
+obj-$(CONFIG_HWMEM) += dcache.o
+ifdef CONFIG_STM_TRACE
+obj-$(CONFIG_MACH_U8500) += board-mop500-stm.o
+endif
+ifdef CONFIG_SENSORS_LSM303DLH
+obj-$(CONFIG_MACH_U8500) += board-mop500-sensors.o
+endif
+ifdef CONFIG_FB_MCDE
+obj-$(CONFIG_MACH_U8500) += board-mop500-mcde.o
+obj-$(CONFIG_MACH_U5500) += board-u5500-mcde.o
+endif
+ifdef CONFIG_STM_MSP_I2S
+obj-$(CONFIG_MACH_U8500) += board-mop500-msp.o
+endif
+ifdef CONFIG_CW1200
+obj-$(CONFIG_MACH_U8500) += board-mop500-wlan.o
+obj-$(CONFIG_MACH_U5500) += board-u5500-wlan.o
+endif
+ifdef CONFIG_TOUCHSCREEN_CYTTSP_SPI
+obj-$(CONFIG_MACH_U8500) += board-mop500-cyttsp.o
+obj-$(CONFIG_MACH_U5500) += board-u5500-cyttsp.o
+endif
diff --git a/arch/arm/mach-ux500/board-mop500-bm.c b/arch/arm/mach-ux500/board-mop500-bm.c
new file mode 100644
index 00000000000..91fb887d4f5
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-bm.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * U8500 board specific charger and battery initialization parameters.
+ *
+ * Author: Johan Palsson <johan.palsson@stericsson.com> for ST-Ericsson.
+ * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
+ *
+ */
+
+#include <linux/power_supply.h>
+#include <linux/mfd/ab8500/bm.h>
+#include "board-mop500-bm.h"
+
+#ifdef CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL
+/*
+ * These are the defined batteries that uses a NTC and ID resistor placed
+ * inside of the battery pack.
+ * Note that the res_to_temp table must be strictly sorted by falling resistance
+ * values to work.
+ */
+static struct res_to_temp temp_tbl_A[] = {
+ {-5, 53407},
+ { 0, 48594},
+ { 5, 43804},
+ {10, 39188},
+ {15, 34870},
+ {20, 30933},
+ {25, 27422},
+ {30, 24347},
+ {35, 21694},
+ {40, 19431},
+ {45, 17517},
+ {50, 15908},
+ {55, 14561},
+ {60, 13437},
+ {65, 12500},
+};
+static struct res_to_temp temp_tbl_B[] = {
+ {-5, 165418},
+ { 0, 159024},
+ { 5, 151921},
+ {10, 144300},
+ {15, 136424},
+ {20, 128565},
+ {25, 120978},
+ {30, 113875},
+ {35, 107397},
+ {40, 101629},
+ {45, 96592},
+ {50, 92253},
+ {55, 88569},
+ {60, 85461},
+ {65, 82869},
+};
+static struct v_to_cap cap_tbl_A[] = {
+ {4171, 100},
+ {4114, 95},
+ {4009, 83},
+ {3947, 74},
+ {3907, 67},
+ {3863, 59},
+ {3830, 56},
+ {3813, 53},
+ {3791, 46},
+ {3771, 33},
+ {3754, 25},
+ {3735, 20},
+ {3717, 17},
+ {3681, 13},
+ {3664, 8},
+ {3651, 6},
+ {3635, 5},
+ {3560, 3},
+ {3408, 1},
+ {3247, 0},
+};
+static struct v_to_cap cap_tbl_B[] = {
+ {4161, 100},
+ {4124, 98},
+ {4044, 90},
+ {4003, 85},
+ {3966, 80},
+ {3933, 75},
+ {3888, 67},
+ {3849, 60},
+ {3813, 55},
+ {3787, 47},
+ {3772, 30},
+ {3751, 25},
+ {3718, 20},
+ {3681, 16},
+ {3660, 14},
+ {3589, 10},
+ {3546, 7},
+ {3495, 4},
+ {3404, 2},
+ {3250, 0},
+};
+#endif
+static struct v_to_cap cap_tbl[] = {
+ {4186, 100},
+ {4163, 99},
+ {4114, 95},
+ {4068, 90},
+ {3990, 80},
+ {3926, 70},
+ {3898, 65},
+ {3866, 60},
+ {3833, 55},
+ {3812, 50},
+ {3787, 40},
+ {3768, 30},
+ {3747, 25},
+ {3730, 20},
+ {3705, 15},
+ {3699, 14},
+ {3684, 12},
+ {3672, 9},
+ {3657, 7},
+ {3638, 6},
+ {3556, 4},
+ {3424, 2},
+ {3317, 1},
+ {3094, 0},
+};
+
+/*
+ * Note that the res_to_temp table must be strictly sorted by falling
+ * resistance values to work.
+ */
+static struct res_to_temp temp_tbl[] = {
+ {-5, 214834},
+ { 0, 162943},
+ { 5, 124820},
+ {10, 96520},
+ {15, 75306},
+ {20, 59254},
+ {25, 47000},
+ {30, 37566},
+ {35, 30245},
+ {40, 24520},
+ {45, 20010},
+ {50, 16432},
+ {55, 13576},
+ {60, 11280},
+ {65, 9425},
+};
+
+static const struct battery_type bat_type[] = {
+ [BATTERY_UNKNOWN] = {
+ /* First element always represent the UNKNOWN battery */
+ .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
+ .resis_high = 0,
+ .resis_low = 0,
+ .battery_resistance = 300,
+ .charge_full_design = 612,
+ .nominal_voltage = 3700,
+ .termination_vol = 4050,
+ .termination_curr = 200,
+ .recharge_vol = 3990,
+ .normal_cur_lvl = 400,
+ .normal_vol_lvl = 4100,
+ .maint_a_cur_lvl = 400,
+ .maint_a_vol_lvl = 4050,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 400,
+ .maint_b_vol_lvl = 4000,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ },
+
+#ifdef CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 53407,
+ .resis_low = 12500,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3600,
+ .termination_vol = 4150,
+ .termination_curr = 80,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_A),
+ .r_to_t_tbl = temp_tbl_A,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_A),
+ .v_to_cap_tbl = cap_tbl_A,
+
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 165418,
+ .resis_low = 82869,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3600,
+ .termination_vol = 4150,
+ .termination_curr = 80,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_B),
+ .r_to_t_tbl = temp_tbl_B,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_B),
+ .v_to_cap_tbl = cap_tbl_B,
+ },
+#else
+/*
+ * These are the batteries that doesn't have an internal NTC resistor to measure
+ * its temperature. The temperature in this case is measure with a NTC placed
+ * near the battery but on the PCB.
+ */
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 76000,
+ .resis_low = 53000,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LION,
+ .resis_high = 30000,
+ .resis_low = 10000,
+ .battery_resistance = 300,
+ .charge_full_design = 950,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LION,
+ .resis_high = 95000,
+ .resis_low = 76001,
+ .battery_resistance = 300,
+ .charge_full_design = 950,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ },
+#endif
+};
+
+static char *ab8500_charger_supplied_to[] = {
+ "ab8500_chargalg",
+ "ab8500_fg",
+ "ab8500_btemp",
+};
+
+static char *ab8500_btemp_supplied_to[] = {
+ "ab8500_chargalg",
+ "ab8500_fg",
+};
+
+static char *ab8500_fg_supplied_to[] = {
+ "ab8500_chargalg",
+ "ab8500_usb",
+};
+
+static char *ab8500_chargalg_supplied_to[] = {
+ "ab8500_fg",
+};
+
+struct ab8500_charger_platform_data ab8500_charger_plat_data = {
+ .supplied_to = ab8500_charger_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab8500_charger_supplied_to),
+};
+
+struct ab8500_btemp_platform_data ab8500_btemp_plat_data = {
+ .supplied_to = ab8500_btemp_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab8500_btemp_supplied_to),
+};
+
+struct ab8500_fg_platform_data ab8500_fg_plat_data = {
+ .supplied_to = ab8500_fg_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab8500_fg_supplied_to),
+};
+
+struct ab8500_chargalg_platform_data ab8500_chargalg_plat_data = {
+ .supplied_to = ab8500_chargalg_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab8500_chargalg_supplied_to),
+};
+
+static const struct ab8500_bm_capacity_levels cap_levels = {
+ .critical = 2,
+ .low = 10,
+ .normal = 70,
+ .high = 95,
+ .full = 100,
+};
+
+static const struct ab8500_fg_parameters fg = {
+ .recovery_sleep_timer = 10,
+ .recovery_total_time = 100,
+ .init_timer = 1,
+ .init_discard_time = 5,
+ .init_total_time = 40,
+ .high_curr_time = 60,
+ .accu_charging = 30,
+ .accu_high_curr = 30,
+ .high_curr_threshold = 50,
+ .lowbat_threshold = 3100,
+};
+
+static const struct ab8500_maxim_parameters maxi_params = {
+ .ena_maxi = true,
+ .chg_curr = 910,
+ .wait_cycles = 10,
+ .charger_curr_step = 100,
+};
+
+static const struct ab8500_bm_charger_parameters chg = {
+ .usb_volt_max = 5500,
+ .usb_curr_max = 1500,
+ .ac_volt_max = 7500,
+ .ac_curr_max = 1500,
+};
+
+struct ab8500_bm_data ab8500_bm_data = {
+ .temp_under = 3,
+ .temp_low = 8,
+ .temp_high = 43,
+ .temp_over = 48,
+ .main_safety_tmr_h = 4,
+ .temp_interval_chg = 20,
+ .temp_interval_nochg = 120,
+ .usb_safety_tmr_h = 4,
+ .bkup_bat_v = BUP_VCH_SEL_2P6V,
+ .bkup_bat_i = BUP_ICH_SEL_150UA,
+ .no_maintenance = false,
+#ifdef CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL
+ .adc_therm = ADC_THERM_BATCTRL,
+#else
+ .adc_therm = ADC_THERM_BATTEMP,
+#endif
+ .chg_unknown_bat = false,
+ .enable_overshoot = false,
+ .fg_res = 10,
+ .cap_levels = &cap_levels,
+ .bat_type = bat_type,
+ .n_btypes = ARRAY_SIZE(bat_type),
+ .batt_id = 0,
+ .interval_charging = 5,
+ .interval_not_charging = 120,
+ .temp_hysteresis = 3,
+ .gnd_lift_resistance = 34,
+ .maxi = &maxi_params,
+ .chg_params = &chg,
+ .fg_params = &fg,
+};
diff --git a/arch/arm/mach-ux500/board-mop500-bm.h b/arch/arm/mach-ux500/board-mop500-bm.h
new file mode 100644
index 00000000000..eb2450f1ab5
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-bm.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * U8500 board specific charger and battery initialization parameters.
+ *
+ * Author: Johan Palsson <johan.palsson@stericsson.com> for ST-Ericsson.
+ * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
+ *
+ */
+
+#ifndef __BOARD_MOP500_BM_H
+#define __BOARD_MOP500_BM_H
+
+#include <linux/mfd/ab8500/bm.h>
+
+extern struct ab8500_charger_platform_data ab8500_charger_plat_data;
+extern struct ab8500_btemp_platform_data ab8500_btemp_plat_data;
+extern struct ab8500_fg_platform_data ab8500_fg_plat_data;
+extern struct ab8500_chargalg_platform_data ab8500_chargalg_plat_data;
+extern struct ab8500_bm_data ab8500_bm_data;
+
+#endif
diff --git a/arch/arm/mach-ux500/board-mop500-cyttsp.c b/arch/arm/mach-ux500/board-mop500-cyttsp.c
new file mode 100755
index 00000000000..2aa27ea9b1f
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-cyttsp.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Author: Avinash A <avinash.a@stericsson.com> for ST-Ericsson
+ * License terms:GNU General Public License (GPL) version 2
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/cyttsp.h>
+#include <linux/delay.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/i2c.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/mfd/tc3589x.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/amba/pl022.h>
+#include <plat/pincfg.h>
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <mach/irqs-db8500.h>
+#include "pins-db8500.h"
+#include "board-mop500.h"
+#include "devices-db8500.h"
+
+#define NUM_SSP_CLIENTS 10
+
+/* cyttsp_gpio_board_init : configures the touch panel. */
+static int cyttsp_plat_init(int on)
+{
+ int ret;
+
+ ret = gpio_direction_output(CYPRESS_SLAVE_SELECT_GPIO, 1);
+ if (ret < 0) {
+ pr_err("slave select gpio direction failed\n");
+ gpio_free(CYPRESS_SLAVE_SELECT_GPIO);
+ return ret;
+ }
+ return 0;
+}
+
+static struct pl022_ssp_controller mop500_spi2_data = {
+ .bus_id = SPI023_2_CONTROLLER,
+ .num_chipselect = NUM_SSP_CLIENTS,
+};
+
+static int cyttsp_wakeup(void)
+{
+ int ret;
+
+ ret = gpio_request(CYPRESS_TOUCH_INT_PIN, "Wakeup_pin");
+ if (ret < 0) {
+ pr_err("touch gpio failed\n");
+ return ret;
+ }
+ ret = gpio_direction_output(CYPRESS_TOUCH_INT_PIN, 1);
+ if (ret < 0) {
+ pr_err("touch gpio direction failed\n");
+ goto out;
+ }
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 0);
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 1);
+ /*
+ * To wake up the controller from sleep
+ * state the interrupt pin needs to be
+ * pulsed twice with a delay greater
+ * than 2 micro seconds.
+ */
+ udelay(3);
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 0);
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 1);
+ ret = gpio_direction_input(CYPRESS_TOUCH_INT_PIN);
+ if (ret < 0) {
+ pr_err("touch gpio direction IN config failed\n");
+ goto out;
+ }
+out:
+ gpio_free(CYPRESS_TOUCH_INT_PIN);
+ return 0;
+}
+struct cyttsp_platform_data cyttsp_platdata = {
+ .maxx = 480,
+ .maxy = 854,
+ .flags = 0,
+ .gen = CY_GEN3,
+ .use_st = 0,
+ .use_mt = 1,
+ .use_trk_id = 0,
+ .use_hndshk = 0,
+ .use_sleep = 1,
+ .use_gestures = 0,
+ .use_load_file = 0,
+ .use_force_fw_update = 0,
+ .use_virtual_keys = 0,
+ /* activate up to 4 groups and set active distance */
+ .gest_set = CY_GEST_GRP_NONE | CY_ACT_DIST,
+ /* change scn_type to enable finger and/or stylus detection */
+ .scn_typ = 0xA5, /* autodetect finger+stylus; balanced mutual scan */
+ .act_intrvl = CY_ACT_INTRVL_DFLT, /* Active refresh interval; ms */
+ .tch_tmout = CY_TCH_TMOUT_DFLT, /* Active touch timeout; ms */
+ .lp_intrvl = CY_LP_INTRVL_DFLT, /* Low power refresh interval; ms */
+ .init = cyttsp_plat_init,
+ .mt_sync = input_mt_sync,
+ .wakeup = cyttsp_wakeup,
+ .name = CY_SPI_NAME,
+ .irq_gpio = CYPRESS_TOUCH_INT_PIN,
+ .rst_gpio = CYPRESS_TOUCH_RST_GPIO,
+};
+
+static void cyttsp_spi_cs_control(u32 command)
+{
+ if (command == SSP_CHIP_SELECT)
+ gpio_set_value(CYPRESS_SLAVE_SELECT_GPIO, 0);
+ else if (command == SSP_CHIP_DESELECT)
+ gpio_set_value(CYPRESS_SLAVE_SELECT_GPIO, 1);
+}
+
+static struct pl022_config_chip cyttsp_ssp_config_chip = {
+ .com_mode = INTERRUPT_TRANSFER,
+ .iface = SSP_INTERFACE_MOTOROLA_SPI,
+ /* we can act as master only */
+ .hierarchy = SSP_MASTER,
+ .slave_tx_disable = 0,
+ .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
+ .tx_lev_trig = SSP_TX_16_OR_MORE_EMPTY_LOC,
+ .ctrl_len = SSP_BITS_16,
+ .wait_state = SSP_MWIRE_WAIT_ZERO,
+ .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+ .cs_control = cyttsp_spi_cs_control,
+};
+
+static struct spi_board_info cypress_spi_devices[] = {
+ {
+ .modalias = CY_SPI_NAME,
+ .controller_data = &cyttsp_ssp_config_chip,
+ .platform_data = &cyttsp_platdata,
+ .max_speed_hz = 1000000,
+ .bus_num = SPI023_2_CONTROLLER,
+ .chip_select = 0,
+ .mode = SPI_MODE_0,
+ }
+};
+
+/*
+ * TC35893
+ */
+static const unsigned int sony_keymap[] = {
+ KEY(3, 1, KEY_END),
+ KEY(4, 1, KEY_HOME),
+ KEY(6, 4, KEY_VOLUMEDOWN),
+ KEY(4, 2, KEY_EMAIL),
+ KEY(3, 3, KEY_RIGHT),
+ KEY(2, 5, KEY_BACKSPACE),
+
+ KEY(6, 7, KEY_MENU),
+ KEY(5, 0, KEY_ENTER),
+ KEY(4, 3, KEY_0),
+ KEY(3, 4, KEY_DOT),
+ KEY(5, 2, KEY_UP),
+ KEY(3, 5, KEY_DOWN),
+
+ KEY(4, 5, KEY_SEND),
+ KEY(0, 5, KEY_BACK),
+ KEY(6, 2, KEY_VOLUMEUP),
+ KEY(1, 3, KEY_SPACE),
+ KEY(7, 6, KEY_LEFT),
+ KEY(5, 5, KEY_SEARCH),
+};
+
+static struct matrix_keymap_data sony_keymap_data = {
+ .keymap = sony_keymap,
+ .keymap_size = ARRAY_SIZE(sony_keymap),
+};
+
+static struct tc3589x_keypad_platform_data tc35893_data = {
+ .krow = TC_KPD_ROWS,
+ .kcol = TC_KPD_COLUMNS,
+ .debounce_period = TC_KPD_DEBOUNCE_PERIOD,
+ .settle_time = TC_KPD_SETTLE_TIME,
+ .irqtype = IRQF_TRIGGER_FALLING,
+ .enable_wakeup = true,
+ .keymap_data = &sony_keymap_data,
+ .no_autorepeat = true,
+};
+
+static struct tc3589x_platform_data tc3589x_keypad_data = {
+ .block = TC3589x_BLOCK_KEYPAD,
+ .keypad = &tc35893_data,
+ .irq_base = MOP500_EGPIO_IRQ_BASE,
+};
+
+static struct i2c_board_info __initdata mop500_i2c0_devices_u8500[] = {
+ {
+ I2C_BOARD_INFO("tc3589x", 0x44),
+ .platform_data = &tc3589x_keypad_data,
+ .irq = NOMADIK_GPIO_TO_IRQ(64),
+ .flags = I2C_CLIENT_WAKE,
+ },
+};
+
+void mop500_cyttsp_init(void)
+{
+ int ret = 0;
+
+ /*
+ * Enable the alternative C function
+ * in the PRCMU register
+ */
+ prcmu_enable_spi2();
+ ret = gpio_request(CYPRESS_SLAVE_SELECT_GPIO, "slave_select_gpio");
+ if (ret < 0)
+ pr_err("slave select gpio failed\n");
+ spi_register_board_info(cypress_spi_devices,
+ ARRAY_SIZE(cypress_spi_devices));
+}
+
+void __init mop500_u8500uib_r3_init(void)
+{
+ mop500_cyttsp_init();
+ db8500_add_spi2(&mop500_spi2_data);
+ nmk_config_pin((GPIO64_GPIO | PIN_INPUT_PULLUP), false);
+ mop500_uib_i2c_add(0, mop500_i2c0_devices_u8500,
+ ARRAY_SIZE(mop500_i2c0_devices_u8500));
+ mop500_uib_i2c_add(0, mop500_i2c0_devices_u8500,
+ ARRAY_SIZE(mop500_i2c0_devices_u8500));
+}
diff --git a/arch/arm/mach-ux500/board-mop500-mcde.c b/arch/arm/mach-ux500/board-mop500-mcde.c
new file mode 100644
index 00000000000..645940f3654
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-mcde.c
@@ -0,0 +1,693 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/mfd/ab8500/denc.h>
+#include <linux/workqueue.h>
+#include <linux/dispdev.h>
+#include <asm/mach-types.h>
+#include <video/av8100.h>
+#include <video/mcde_display.h>
+#include <video/mcde_display-vuib500-dpi.h>
+#include <video/mcde_display-sony_acx424akp_dsi.h>
+#include <video/mcde_display-av8100.h>
+#include <video/mcde_display-ab8500.h>
+#include <video/mcde_fb.h>
+#include <video/mcde_dss.h>
+#include <plat/pincfg.h>
+#include "pins-db8500.h"
+#include "pins.h"
+#include "board-mop500.h"
+
+#define DSI_UNIT_INTERVAL_0 0x9
+#define DSI_UNIT_INTERVAL_1 0x9
+#define DSI_UNIT_INTERVAL_2 0x5
+
+#ifdef CONFIG_FB_MCDE
+
+/* The initialization of hdmi disp driver must be delayed in order to
+ * ensure that inputclk will be available (needed by hdmi hw) */
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+static struct delayed_work work_dispreg_hdmi;
+#define DISPREG_HDMI_DELAY 6000
+#endif
+
+enum {
+ PRIMARY_DISPLAY_ID,
+ SECONDARY_DISPLAY_ID,
+ FICTIVE_DISPLAY_ID,
+ AV8100_DISPLAY_ID,
+ AB8500_DISPLAY_ID,
+ MCDE_NR_OF_DISPLAYS
+};
+static int display_initialized_during_boot;
+
+static int __init startup_graphics_setup(char *str)
+{
+
+ if (get_option(&str, &display_initialized_during_boot) != 1)
+ display_initialized_during_boot = 0;
+
+ switch (display_initialized_during_boot) {
+ case 1:
+ pr_info("Startup graphics support\n");
+ break;
+ case 0:
+ default:
+ pr_info("No startup graphics supported\n");
+ break;
+ };
+
+ return 1;
+}
+__setup("startup_graphics=", startup_graphics_setup);
+
+#if defined(CONFIG_DISPLAY_AB8500_TERTIARY) ||\
+ defined(CONFIG_DISPLAY_AV8100_TERTIARY)
+static struct mcde_col_transform rgb_2_yCbCr_transform = {
+ .matrix = {
+ {0x0042, 0x0081, 0x0019},
+ {0xffda, 0xffb6, 0x0070},
+ {0x0070, 0xffa2, 0xffee},
+ },
+ .offset = {0x10, 0x80, 0x80},
+};
+#endif
+
+#ifdef CONFIG_DISPLAY_FICTIVE
+static struct mcde_display_device fictive_display = {
+ .name = "mcde_disp_fictive",
+ .id = FICTIVE_DISPLAY_ID,
+ .fictive = true,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB565,
+ .native_x_res = 1280,
+ .native_y_res = 720,
+};
+#endif /* CONFIG_DISPLAY_FICTIVE */
+
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_PRIMARY
+static struct mcde_display_dsi_platform_data samsung_s6d16d0_pdata0 = {
+ .link = 0,
+};
+
+static struct mcde_display_device samsung_s6d16d0_display0 = {
+ .name = "samsung_s6d16d0",
+ .id = PRIMARY_DISPLAY_ID,
+ .chnl_id = MCDE_CHNL_A,
+ .fifo = MCDE_FIFO_A,
+#ifdef CONFIG_MCDE_DISPLAY_PRIMARY_16BPP
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB565,
+#else
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGBA8888,
+#endif
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_VSYNC
+ .synchronized_update = true,
+#else
+ .synchronized_update = false,
+#endif
+ /* TODO: Remove rotation buffers once ESRAM driver is completed */
+ .rotbuf1 = U8500_ESRAM_BASE + 0x20000 * 4,
+ .rotbuf2 = U8500_ESRAM_BASE + 0x20000 * 4 + 0x10000,
+ .dev = {
+ .platform_data = &samsung_s6d16d0_pdata0,
+ },
+};
+#endif /* CONFIG_DISPLAY_GENERIC_DSI_PRIMARY */
+
+#ifdef CONFIG_DISPLAY_SONY_ACX424AKP_DSI_PRIMARY
+static struct mcde_port sony_port0 = {
+ .link = 0,
+};
+
+struct mcde_display_sony_acx424akp_platform_data
+ sony_acx424akp_display0_pdata = {
+ .reset_gpio = HREFV60_DISP2_RST_GPIO,
+};
+
+static struct mcde_display_device sony_acx424akp_display0 = {
+ .name = "mcde_disp_sony_acx424akp",
+ .id = PRIMARY_DISPLAY_ID,
+ .port = &sony_port0,
+ .chnl_id = MCDE_CHNL_A,
+ /*
+ * A large fifo is needed when ddr is clocked down to 25% to not get
+ * latency problems.
+ */
+ .fifo = MCDE_FIFO_A,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGBA8888,
+ .synchronized_update = true,
+ .rotbuf1 = U8500_ESRAM_BASE + 0x20000 * 4,
+ .rotbuf2 = U8500_ESRAM_BASE + 0x20000 * 4 + 0x10000,
+ .dev = {
+ .platform_data = &sony_acx424akp_display0_pdata,
+ },
+};
+#endif /* CONFIG_DISPLAY_SONY_ACX424AKP_DSI_PRIMARY */
+
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_SECONDARY
+static struct mcde_display_dsi_platform_data samsung_s6d16d0_pdata1 = {
+ .link = 1,
+};
+
+static struct mcde_display_device samsung_s6d16d0_display1 = {
+ .name = "samsung_s6d16d0",
+ .id = SECONDARY_DISPLAY_ID,
+ .chnl_id = MCDE_CHNL_C1,
+ .fifo = MCDE_FIFO_C1,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB565,
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_SECONDARY_VSYNC
+ .synchronized_update = true,
+#else
+ .synchronized_update = false,
+#endif
+ .dev = {
+ .platform_data = &samsung_s6d16d0_pdata1,
+ },
+};
+#endif /* CONFIG_DISPLAY_GENERIC_DSI_SECONDARY */
+
+#ifdef CONFIG_MCDE_DISPLAY_DPI_PRIMARY
+static struct mcde_port port0 = {
+ .type = MCDE_PORTTYPE_DPI,
+ .pixel_format = MCDE_PORTPIXFMT_DPI_24BPP,
+ .ifc = 0,
+ .link = 1, /* DPI channel B can only be on link 1 */
+ .sync_src = MCDE_SYNCSRC_OFF, /* sync from output formatter */
+ .update_auto_trig = true,
+ .phy = {
+ .dpi = {
+ .tv_mode = false,
+ .clock_div = 2,
+ .polarity = DPI_ACT_LOW_VSYNC | DPI_ACT_LOW_HSYNC,
+ },
+ },
+};
+
+static struct mcde_display_dpi_platform_data dpi_display0_pdata = {0};
+static struct ux500_pins *dpi_pins;
+
+static int dpi_display_platform_enable(struct mcde_display_device *ddev)
+{
+ int res;
+
+ if (!dpi_pins) {
+ dpi_pins = ux500_pins_get("mcde-dpi");
+ if (!dpi_pins)
+ return -EINVAL;
+ }
+
+ dev_info(&ddev->dev, "%s\n", __func__);
+ res = ux500_pins_enable(dpi_pins);
+ if (res)
+ dev_warn(&ddev->dev, "Failure during %s\n", __func__);
+
+ return res;
+}
+
+static int dpi_display_platform_disable(struct mcde_display_device *ddev)
+{
+ int res;
+
+ dev_info(&ddev->dev, "%s\n", __func__);
+
+ res = ux500_pins_disable(dpi_pins);
+ if (res)
+ dev_warn(&ddev->dev, "Failure during %s\n", __func__);
+
+ return res;
+
+}
+
+static struct mcde_display_device dpi_display0 = {
+ .name = "mcde_display_dpi",
+ .id = 0,
+ .port = &port0,
+ .chnl_id = MCDE_CHNL_B,
+ .fifo = MCDE_FIFO_B,
+#ifdef CONFIG_MCDE_DISPLAY_PRIMARY_16BPP
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB565,
+#else
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGBA8888,
+#endif
+ .native_x_res = 640,
+ .native_y_res = 480,
+ /* .synchronized_update: Don't care: port is set to update_auto_trig */
+ .dev = {
+ .platform_data = &dpi_display0_pdata,
+ },
+ .platform_enable = dpi_display_platform_enable,
+ .platform_disable = dpi_display_platform_disable,
+};
+#endif /* CONFIG_MCDE_DISPLAY_DPI_PRIMARY */
+
+#ifdef CONFIG_DISPLAY_AB8500_TERTIARY
+static struct mcde_port port_tvout1 = {
+ .type = MCDE_PORTTYPE_DPI,
+ .pixel_format = MCDE_PORTPIXFMT_DPI_24BPP,
+ .ifc = 0,
+ .link = 1, /* channel B */
+ .sync_src = MCDE_SYNCSRC_OFF,
+ .update_auto_trig = true,
+ .phy = {
+ .dpi = {
+ .bus_width = 4, /* DDR mode */
+ .tv_mode = true,
+ .clock_div = MCDE_PORT_DPI_NO_CLOCK_DIV,
+ },
+ },
+};
+
+static struct ab8500_display_platform_data ab8500_display_pdata = {
+ .nr_regulators = 2,
+ .regulator_id = {"vtvout", "vcc-N2158"},
+ .rgb_2_yCbCr_transform = &rgb_2_yCbCr_transform,
+};
+
+static struct ux500_pins *tvout_pins;
+
+static int ab8500_platform_enable(struct mcde_display_device *ddev)
+{
+ int res = 0;
+
+ if (!tvout_pins) {
+ tvout_pins = ux500_pins_get("mcde-tvout");
+ if (!tvout_pins)
+ return -EINVAL;
+ }
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ res = ux500_pins_enable(tvout_pins);
+ if (res != 0)
+ goto failed;
+
+ return res;
+
+failed:
+ dev_warn(&ddev->dev, "Failure during %s\n", __func__);
+ return res;
+}
+
+static int ab8500_platform_disable(struct mcde_display_device *ddev)
+{
+ int res;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+
+ res = ux500_pins_disable(tvout_pins);
+ if (res != 0)
+ goto failed;
+ return res;
+
+failed:
+ dev_warn(&ddev->dev, "Failure during %s\n", __func__);
+ return res;
+}
+
+struct mcde_display_device tvout_ab8500_display = {
+ .name = "mcde_tv_ab8500",
+ .id = AB8500_DISPLAY_ID,
+ .port = &port_tvout1,
+ .chnl_id = MCDE_CHNL_B,
+ .fifo = MCDE_FIFO_B,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB565,
+ .native_x_res = 720,
+ .native_y_res = 576,
+ /* .synchronized_update: Don't care: port is set to update_auto_trig */
+ .dev = {
+ .platform_data = &ab8500_display_pdata,
+ },
+
+ /*
+ * We might need to describe the std here:
+ * - there are different PAL / NTSC formats (do they require MCDE
+ * settings?)
+ */
+ .platform_enable = ab8500_platform_enable,
+ .platform_disable = ab8500_platform_disable,
+};
+#endif /* CONFIG_DISPLAY_AB8500_TERTIARY */
+
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+static struct mcde_port port2 = {
+ .type = MCDE_PORTTYPE_DSI,
+ .mode = MCDE_PORTMODE_CMD,
+ .pixel_format = MCDE_PORTPIXFMT_DSI_24BPP,
+ .ifc = 1,
+ .link = 2,
+#ifdef CONFIG_AV8100_HWTRIG_INT
+ .sync_src = MCDE_SYNCSRC_TE0,
+#endif
+#ifdef CONFIG_AV8100_HWTRIG_I2SDAT3
+ .sync_src = MCDE_SYNCSRC_TE1,
+#endif
+#ifdef CONFIG_AV8100_HWTRIG_DSI_TE
+ .sync_src = MCDE_SYNCSRC_TE_POLLING,
+#endif
+#ifdef CONFIG_AV8100_HWTRIG_NONE
+ .sync_src = MCDE_SYNCSRC_OFF,
+#endif
+ .update_auto_trig = true,
+ .phy = {
+ .dsi = {
+ .virt_id = 0,
+ .num_data_lanes = 2,
+ .ui = DSI_UNIT_INTERVAL_2,
+ .clk_cont = false,
+ .data_lanes_swap = false,
+ },
+ },
+ .hdmi_sdtv_switch = HDMI_SWITCH,
+};
+
+static struct mcde_display_hdmi_platform_data av8100_hdmi_pdata = {
+ .reset_gpio = 0,
+ .reset_delay = 1,
+ .regulator_id = NULL, /* TODO: "display_main" */
+ .cvbs_regulator_id = "vcc-N2158",
+ .ddb_id = 1,
+ .rgb_2_yCbCr_transform = &rgb_2_yCbCr_transform,
+};
+
+static struct ux500_pins *av8100_pins;
+static int av8100_platform_enable(struct mcde_display_device *ddev)
+{
+ int res;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ if (!av8100_pins) {
+ av8100_pins = ux500_pins_get("av8100-hdmi");
+ if (!av8100_pins) {
+ res = -EINVAL;
+ goto failed;
+ }
+ }
+
+ res = ux500_pins_enable(av8100_pins);
+ if (res != 0)
+ goto failed;
+
+ return res;
+
+failed:
+ dev_warn(&ddev->dev, "Failure during %s\n", __func__);
+ return res;
+}
+
+static int av8100_platform_disable(struct mcde_display_device *ddev)
+{
+ int res;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+
+ res = ux500_pins_disable(av8100_pins);
+ if (res != 0)
+ goto failed;
+ return res;
+
+failed:
+ dev_warn(&ddev->dev, "Failure during %s\n", __func__);
+ return res;
+}
+
+struct mcde_display_device av8100_hdmi = {
+ .name = "av8100_hdmi",
+ .id = AV8100_DISPLAY_ID,
+ .port = &port2,
+ .chnl_id = MCDE_CHNL_B,
+ .fifo = MCDE_FIFO_B,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB565,
+ .native_x_res = 1280,
+ .native_y_res = 720,
+ .dev = {
+ .platform_data = &av8100_hdmi_pdata,
+ },
+ .platform_enable = av8100_platform_enable,
+ .platform_disable = av8100_platform_disable,
+};
+
+static void delayed_work_dispreg_hdmi(struct work_struct *ptr)
+{
+ if (mcde_display_device_register(&av8100_hdmi))
+ pr_warning("Failed to register av8100_hdmi\n");
+}
+#endif /* CONFIG_DISPLAY_AV8100_TERTIARY */
+
+/*
+* This function will create the framebuffer for the display that is registered.
+*/
+static int display_postregistered_callback(struct notifier_block *nb,
+ unsigned long event, void *dev)
+{
+ struct mcde_display_device *ddev = dev;
+ u16 width, height;
+ u16 virtual_width, virtual_height;
+ u32 rotate = FB_ROTATE_UR;
+ u32 rotate_angle = 0;
+ struct fb_info *fbi;
+#ifdef CONFIG_DISPDEV
+ struct mcde_fb *mfb;
+#endif
+
+ if (event != MCDE_DSS_EVENT_DISPLAY_REGISTERED)
+ return 0;
+
+ if (ddev->id < 0 || ddev->id >= MCDE_NR_OF_DISPLAYS)
+ return 0;
+
+ mcde_dss_get_native_resolution(ddev, &width, &height);
+
+ if (uib_is_u8500uibr3())
+ rotate_angle = 0;
+ else
+ rotate_angle = \
+ CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE;
+
+#if defined(CONFIG_DISPLAY_GENERIC_DSI_PRIMARY) || \
+ defined(CONFIG_DISPLAY_SONY_ACX424AKP_DSI_PRIMARY)
+ if (ddev->id == PRIMARY_DISPLAY_ID) {
+ switch (rotate_angle) {
+ case 0:
+ rotate = FB_ROTATE_UR;
+ break;
+ case 90:
+ rotate = FB_ROTATE_CW;
+ swap(width, height);
+ break;
+ case 180:
+ rotate = FB_ROTATE_UD;
+ break;
+ case 270:
+ rotate = FB_ROTATE_CCW;
+ swap(width, height);
+ break;
+ }
+ }
+#endif
+
+ virtual_width = width;
+ virtual_height = height * 2;
+
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+ if (ddev->id == AV8100_DISPLAY_ID) {
+#ifdef CONFIG_MCDE_DISPLAY_HDMI_FB_AUTO_CREATE
+ hdmi_fb_onoff(ddev, 1, 0, 0);
+#endif
+ goto display_postregistered_callback_end;
+ }
+#endif /* CONFIG_DISPLAY_AV8100_TERTIARY */
+
+ /* Create frame buffer */
+ fbi = mcde_fb_create(ddev,
+ width, height,
+ virtual_width, virtual_height,
+ ddev->default_pixel_format,
+ rotate);
+
+ if (IS_ERR(fbi)) {
+ dev_warn(&ddev->dev,
+ "Failed to create fb for display %s\n",
+ ddev->name);
+ goto display_postregistered_callback_err;
+ } else {
+ dev_info(&ddev->dev, "Framebuffer created (%s)\n",
+ ddev->name);
+ }
+
+#ifdef CONFIG_DISPDEV
+ mfb = to_mcde_fb(fbi);
+
+ /* Create a dispdev overlay for this display */
+ if (dispdev_create(ddev, true, mfb->ovlys[0]) < 0) {
+ dev_warn(&ddev->dev,
+ "Failed to create disp for display %s\n",
+ ddev->name);
+ goto display_postregistered_callback_err;
+ } else {
+ dev_info(&ddev->dev, "Disp dev created for (%s)\n",
+ ddev->name);
+ }
+#endif
+
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+display_postregistered_callback_end:
+#endif
+ return 0;
+
+display_postregistered_callback_err:
+ return -1;
+}
+
+static struct notifier_block display_nb = {
+ .notifier_call = display_postregistered_callback,
+};
+
+/*
+* This function is used to refresh the display (lcd, hdmi, tvout) with black
+* when the framebuffer is registered.
+* The main display will not be updated if startup graphics is displayed
+* from u-boot.
+*/
+static int framebuffer_postregistered_callback(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ int ret = 0;
+ struct fb_event *event_data = data;
+ struct fb_info *info;
+ struct fb_var_screeninfo var;
+ struct fb_fix_screeninfo fix;
+ struct mcde_fb *mfb;
+
+ if (event != FB_EVENT_FB_REGISTERED)
+ return 0;
+
+ if (!event_data)
+ return 0;
+
+ info = event_data->info;
+ mfb = to_mcde_fb(info);
+ if (mfb->id == 0 && display_initialized_during_boot)
+ goto out;
+
+ var = info->var;
+ fix = info->fix;
+ var.yoffset = var.yoffset ? 0 : var.yres;
+ if (info->fbops->fb_pan_display)
+ ret = info->fbops->fb_pan_display(&var, info);
+out:
+ return ret;
+}
+
+static struct notifier_block framebuffer_nb = {
+ .notifier_call = framebuffer_postregistered_callback,
+};
+
+static void setup_primary_display(void)
+{
+ /* Display reset GPIO is different depending on reference boards */
+ if (machine_is_hrefv60())
+ samsung_s6d16d0_pdata0.reset_gpio = HREFV60_DISP1_RST_GPIO;
+ else
+ samsung_s6d16d0_pdata0.reset_gpio = MOP500_DISP1_RST_GPIO;
+
+ /* Not all STUIB supports VSYNC, disable vsync for STUIB */
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_PRIMARY
+ if (uib_is_stuib())
+ samsung_s6d16d0_display0.synchronized_update = false;
+#endif
+}
+
+int __init init_display_devices(void)
+{
+ int ret = 0;
+
+ if (!cpu_is_u8500())
+ return ret;
+
+ ret = fb_register_client(&framebuffer_nb);
+ if (ret)
+ pr_warning("Failed to register framebuffer notifier\n");
+
+ ret = mcde_dss_register_notifier(&display_nb);
+ if (ret)
+ pr_warning("Failed to register dss notifier\n");
+#ifdef CONFIG_DISPLAY_FICTIVE
+ ret = mcde_display_device_register(&fictive_display);
+ if (ret)
+ pr_warning("Failed to register fictive display device\n");
+#endif
+
+ /* Set powermode to STANDBY if startup graphics is executed */
+#ifdef CONFIG_DISPLAY_GENERIC_PRIMARY
+ if (display_initialized_during_boot)
+ samsung_s6d16d0_display0.power_mode = MCDE_DISPLAY_PM_STANDBY;
+#endif
+#ifdef CONFIG_DISPLAY_SONY_ACX424AKP_DSI_PRIMARY
+ if (display_initialized_during_boot)
+ sony_acx424akp_display0.power_mode = MCDE_DISPLAY_PM_STANDBY;
+#endif
+
+#if defined(CONFIG_DISPLAY_GENERIC_PRIMARY) || \
+ defined(CONFIG_DISPLAY_SONY_ACX424AKP_DSI_PRIMARY)
+ /*
+ * For reference platforms different panels are used
+ * depending on UIB
+ * UIB = User Interface Board
+ */
+ setup_primary_display();
+
+#ifdef CONFIG_DISPLAY_GENERIC_PRIMARY
+ /* Samsung display for STUIB and U8500UIB */
+ if (uib_is_u8500uib() || uib_is_stuib())
+ ret = mcde_display_device_register(&samsung_s6d16d0_display0);
+#endif
+#ifdef CONFIG_DISPLAY_SONY_ACX424AKP_DSI_PRIMARY
+ /* Sony display on U8500UIBV3 */
+ if (uib_is_u8500uibr3())
+ ret = mcde_display_device_register(&sony_acx424akp_display0);
+#endif
+ if (ret)
+ pr_warning("Failed to register primary display device\n");
+#endif
+
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_SECONDARY
+ /* Display reset GPIO is different depending on reference boards */
+ if (machine_is_hrefv60())
+ samsung_s6d16d0_pdata1.reset_gpio = HREFV60_DISP2_RST_GPIO;
+ else
+ samsung_s6d16d0_pdata1.reset_gpio = MOP500_DISP2_RST_GPIO;
+ ret = mcde_display_device_register(&samsung_s6d16d0_display1);
+ if (ret)
+ pr_warning("Failed to register sub display device\n");
+#endif
+
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+ /* Snowball dont need this delay at all */
+ if (machine_is_snowball())
+ delayed_work_dispreg_hdmi(NULL);
+ else {
+ INIT_DELAYED_WORK_DEFERRABLE(&work_dispreg_hdmi,
+ delayed_work_dispreg_hdmi);
+
+ schedule_delayed_work(&work_dispreg_hdmi,
+ msecs_to_jiffies(DISPREG_HDMI_DELAY));
+ }
+#endif
+#ifdef CONFIG_DISPLAY_AB8500_TERTIARY
+ ret = mcde_display_device_register(&tvout_ab8500_display);
+ if (ret)
+ pr_warning("Failed to register ab8500 tvout device\n");
+#endif
+
+ return ret;
+}
+
+module_init(init_display_devices);
+
+#endif
diff --git a/arch/arm/mach-ux500/board-mop500-mcde.h b/arch/arm/mach-ux500/board-mop500-mcde.h
new file mode 100644
index 00000000000..38094b81b34
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-mcde.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com> for ST-Ericsson
+ *
+ * MOP500 board specific initialization for regulators
+ */
+
+#ifndef __BOARD_MOP500_MCDE_H
+#define __BOARD_MOP500_MCDE_H
+
+#include <video/mcde_display.h>
+
+#ifdef CONFIG_DISPLAY_AB8500_TERTIARY
+extern struct mcde_display_device tvout_ab8500_display;
+#endif
+
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+extern struct mcde_display_device av8100_hdmi;
+#endif
+
+#endif /* __BOARD_MOP500_MCDE_H */
diff --git a/arch/arm/mach-ux500/board-mop500-pins.c b/arch/arm/mach-ux500/board-mop500-pins.c
index 74bfcff2bdf..f8e7cd74e3c 100644
--- a/arch/arm/mach-ux500/board-mop500-pins.c
+++ b/arch/arm/mach-ux500/board-mop500-pins.c
@@ -6,25 +6,28 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/string.h>
#include <asm/mach-types.h>
#include <plat/pincfg.h>
-#include <plat/gpio-nomadik.h>
+#include <linux/gpio/nomadik.h>
+
#include <mach/hardware.h>
+#include <mach/suspend.h>
#include "pins-db8500.h"
+#include "pins.h"
-static pin_cfg_t mop500_pins_common[] = {
- /* I2C */
- GPIO147_I2C0_SCL,
- GPIO148_I2C0_SDA,
- GPIO16_I2C1_SCL,
- GPIO17_I2C1_SDA,
- GPIO10_I2C2_SDA,
- GPIO11_I2C2_SCL,
- GPIO229_I2C3_SDA,
- GPIO230_I2C3_SCL,
+#include "board-pins-sleep-force.h"
+
+enum custom_pin_cfg_t {
+ PINS_FOR_DEFAULT,
+ PINS_FOR_U9500,
+};
+
+static enum custom_pin_cfg_t pinsfor;
+static pin_cfg_t mop500_pins_common[] = {
/* MSP0 */
GPIO12_MSP0_TXD,
GPIO13_MSP0_TFS,
@@ -32,83 +35,20 @@ static pin_cfg_t mop500_pins_common[] = {
GPIO15_MSP0_RXD,
/* MSP2: HDMI */
- GPIO193_MSP2_TXD,
- GPIO194_MSP2_TCK,
- GPIO195_MSP2_TFS,
+ GPIO193_MSP2_TXD | PIN_INPUT_PULLDOWN,
+ GPIO194_MSP2_TCK | PIN_INPUT_PULLDOWN,
+ GPIO195_MSP2_TFS | PIN_INPUT_PULLDOWN,
GPIO196_MSP2_RXD | PIN_OUTPUT_LOW,
+ /* LCD TE0 */
+ GPIO68_LCD_VSI0 | PIN_INPUT_PULLUP,
+
/* Touch screen INTERFACE */
GPIO84_GPIO | PIN_INPUT_PULLUP, /* TOUCH_INT1 */
/* STMPE1601/tc35893 keypad IRQ */
GPIO218_GPIO | PIN_INPUT_PULLUP,
- /* MMC0 (MicroSD card) */
- GPIO18_MC0_CMDDIR | PIN_OUTPUT_HIGH,
- GPIO19_MC0_DAT0DIR | PIN_OUTPUT_HIGH,
- GPIO20_MC0_DAT2DIR | PIN_OUTPUT_HIGH,
-
- GPIO22_MC0_FBCLK | PIN_INPUT_NOPULL,
- GPIO23_MC0_CLK | PIN_OUTPUT_LOW,
- GPIO24_MC0_CMD | PIN_INPUT_PULLUP,
- GPIO25_MC0_DAT0 | PIN_INPUT_PULLUP,
- GPIO26_MC0_DAT1 | PIN_INPUT_PULLUP,
- GPIO27_MC0_DAT2 | PIN_INPUT_PULLUP,
- GPIO28_MC0_DAT3 | PIN_INPUT_PULLUP,
-
- /* SDI1 (SDIO) */
- GPIO208_MC1_CLK | PIN_OUTPUT_LOW,
- GPIO209_MC1_FBCLK | PIN_INPUT_NOPULL,
- GPIO210_MC1_CMD | PIN_INPUT_PULLUP,
- GPIO211_MC1_DAT0 | PIN_INPUT_PULLUP,
- GPIO212_MC1_DAT1 | PIN_INPUT_PULLUP,
- GPIO213_MC1_DAT2 | PIN_INPUT_PULLUP,
- GPIO214_MC1_DAT3 | PIN_INPUT_PULLUP,
-
- /* MMC2 (On-board DATA INTERFACE eMMC) */
- GPIO128_MC2_CLK | PIN_OUTPUT_LOW,
- GPIO129_MC2_CMD | PIN_INPUT_PULLUP,
- GPIO130_MC2_FBCLK | PIN_INPUT_NOPULL,
- GPIO131_MC2_DAT0 | PIN_INPUT_PULLUP,
- GPIO132_MC2_DAT1 | PIN_INPUT_PULLUP,
- GPIO133_MC2_DAT2 | PIN_INPUT_PULLUP,
- GPIO134_MC2_DAT3 | PIN_INPUT_PULLUP,
- GPIO135_MC2_DAT4 | PIN_INPUT_PULLUP,
- GPIO136_MC2_DAT5 | PIN_INPUT_PULLUP,
- GPIO137_MC2_DAT6 | PIN_INPUT_PULLUP,
- GPIO138_MC2_DAT7 | PIN_INPUT_PULLUP,
-
- /* MMC4 (On-board STORAGE INTERFACE eMMC) */
- GPIO197_MC4_DAT3 | PIN_INPUT_PULLUP,
- GPIO198_MC4_DAT2 | PIN_INPUT_PULLUP,
- GPIO199_MC4_DAT1 | PIN_INPUT_PULLUP,
- GPIO200_MC4_DAT0 | PIN_INPUT_PULLUP,
- GPIO201_MC4_CMD | PIN_INPUT_PULLUP,
- GPIO202_MC4_FBCLK | PIN_INPUT_NOPULL,
- GPIO203_MC4_CLK | PIN_OUTPUT_LOW,
- GPIO204_MC4_DAT7 | PIN_INPUT_PULLUP,
- GPIO205_MC4_DAT6 | PIN_INPUT_PULLUP,
- GPIO206_MC4_DAT5 | PIN_INPUT_PULLUP,
- GPIO207_MC4_DAT4 | PIN_INPUT_PULLUP,
-
- /* SKE keypad */
- GPIO153_KP_I7,
- GPIO154_KP_I6,
- GPIO155_KP_I5,
- GPIO156_KP_I4,
- GPIO157_KP_O7,
- GPIO158_KP_O6,
- GPIO159_KP_O5,
- GPIO160_KP_O4,
- GPIO161_KP_I3,
- GPIO162_KP_I2,
- GPIO163_KP_I1,
- GPIO164_KP_I0,
- GPIO165_KP_O3,
- GPIO166_KP_O2,
- GPIO167_KP_O1,
- GPIO168_KP_O0,
-
/* UART */
/* uart-0 pins gpio configuration should be
* kept intact to prevent glitch in tx line
@@ -128,9 +68,14 @@ static pin_cfg_t mop500_pins_common[] = {
GPIO31_U2_CTSn | PIN_INPUT_PULLUP,
GPIO32_U2_RTSn | PIN_OUTPUT_HIGH,
- /* Display & HDMI HW sync */
- GPIO68_LCD_VSI0 | PIN_INPUT_PULLUP,
- GPIO69_LCD_VSI1 | PIN_INPUT_PULLUP,
+ /* HSI */
+ GPIO219_HSIR_FLA0,
+ GPIO220_HSIR_DAT0,
+ GPIO221_HSIR_RDY0,
+ GPIO222_HSIT_FLA0,
+ GPIO223_HSIT_DAT0,
+ GPIO224_HSIT_RDY0,
+ GPIO225_GPIO | PIN_INPUT_PULLDOWN, /* CA_WAKE0 */
};
static pin_cfg_t mop500_pins_default[] = {
@@ -140,10 +85,13 @@ static pin_cfg_t mop500_pins_default[] = {
GPIO145_SSP0_RXD | PIN_PULL_DOWN,
GPIO146_SSP0_TXD,
+ /* XENON Flashgun INTERFACE */
+ GPIO6_IP_GPIO0 | PIN_INPUT_PULLUP,/* XENON_FLASH_ID */
+ GPIO7_IP_GPIO1 | PIN_INPUT_PULLUP,/* XENON_READY */
GPIO217_GPIO | PIN_INPUT_PULLUP, /* TC35892 IRQ */
- /* SDI0 (MicroSD card) */
+ /* sdi0 (removable MMC/SD/SDIO cards) not handled by pm_runtime */
GPIO21_MC0_DAT31DIR | PIN_OUTPUT_HIGH,
/* UART */
@@ -155,13 +103,11 @@ static pin_cfg_t mop500_pins_default[] = {
static pin_cfg_t hrefv60_pins[] = {
/* WLAN */
- GPIO4_GPIO | PIN_INPUT_PULLUP,/* WLAN_IRQ */
GPIO85_GPIO | PIN_OUTPUT_LOW,/* WLAN_ENA */
/* XENON Flashgun INTERFACE */
GPIO6_IP_GPIO0 | PIN_INPUT_PULLUP,/* XENON_FLASH_ID */
GPIO7_IP_GPIO1 | PIN_INPUT_PULLUP,/* XENON_READY */
- GPIO170_GPIO | PIN_OUTPUT_LOW, /* XENON_CHARGE */
/* Assistant LED INTERFACE */
GPIO21_GPIO | PIN_OUTPUT_LOW, /* XENON_EN1 */
@@ -172,7 +118,7 @@ static pin_cfg_t hrefv60_pins[] = {
GPIO32_GPIO | PIN_INPUT_PULLDOWN, /* Magnetometer DRDY */
/* Display Interface */
- GPIO65_GPIO | PIN_OUTPUT_LOW, /* DISP1 RST */
+ GPIO65_GPIO | PIN_OUTPUT_HIGH, /* DISP1 NO RST */
GPIO66_GPIO | PIN_OUTPUT_LOW, /* DISP2 RST */
/* Touch screen INTERFACE */
@@ -218,7 +164,7 @@ static pin_cfg_t hrefv60_pins[] = {
GPIO145_GPIO | PIN_INPUT_PULLDOWN,/* HAL_SW */
/* Audio Amplifier Interface */
- GPIO149_GPIO | PIN_OUTPUT_LOW, /* VAUDIO_HF_EN */
+ GPIO149_GPIO | PIN_OUTPUT_HIGH, /* VAUDIO_HF_EN, enable MAX8968 */
/* GBF INTERFACE */
GPIO171_GPIO | PIN_OUTPUT_LOW, /* GBF_ENA_RESET */
@@ -233,7 +179,20 @@ static pin_cfg_t hrefv60_pins[] = {
/* Proximity Sensor */
GPIO217_GPIO | PIN_INPUT_PULLUP,
+ /* SD card detect */
+ GPIO95_GPIO | PIN_INPUT_PULLUP,
+};
+static pin_cfg_t u9500_pins[] = {
+ GPIO4_U1_RXD | PIN_INPUT_PULLUP,
+ GPIO5_U1_TXD | PIN_OUTPUT_HIGH,
+ GPIO144_GPIO | PIN_INPUT_PULLUP,/* WLAN_IRQ */
+ GPIO226_GPIO | PIN_OUTPUT_HIGH, /* HSI AC_WAKE0 */
+};
+
+static pin_cfg_t u8500_pins[] = {
+ GPIO226_GPIO | PIN_OUTPUT_LOW, /* WLAN_PMU_EN */
+ GPIO4_GPIO | PIN_INPUT_PULLUP,/* WLAN_IRQ */
};
static pin_cfg_t snowball_pins[] = {
@@ -274,15 +233,720 @@ static pin_cfg_t snowball_pins[] = {
/* RSTn_LAN */
GPIO141_GPIO | PIN_OUTPUT_HIGH,
+
+ /* Accelerometer/Magnetometer */
+ GPIO163_GPIO | PIN_INPUT_PULLUP, /* ACCEL_IRQ1 */
+ GPIO164_GPIO | PIN_INPUT_PULLUP, /* ACCEL_IRQ2 */
+ GPIO165_GPIO | PIN_INPUT_PULLUP, /* MAG_DRDY */
+
+ /* WLAN/GBF */
+ GPIO171_GPIO | PIN_OUTPUT_HIGH,/* GBF_ENA */
+ GPIO215_GPIO | PIN_OUTPUT_LOW,/* WLAN_ENA */
+ GPIO216_GPIO | PIN_INPUT_PULLUP,/* WLAN_IRQ */
+};
+
+/*
+ * I2C
+ */
+
+static UX500_PINS(mop500_pins_i2c0,
+ GPIO147_I2C0_SCL,
+ GPIO148_I2C0_SDA,
+);
+
+static UX500_PINS(mop500_pins_i2c1,
+ GPIO16_I2C1_SCL,
+ GPIO17_I2C1_SDA,
+);
+
+static UX500_PINS(mop500_pins_i2c2,
+ GPIO10_I2C2_SDA,
+ GPIO11_I2C2_SCL,
+);
+
+static UX500_PINS(mop500_pins_i2c3,
+ GPIO229_I2C3_SDA,
+ GPIO230_I2C3_SCL,
+);
+
+static UX500_PINS(mop500_pins_mcde_dpi,
+ GPIO64_LCDB_DE,
+ GPIO65_LCDB_HSO,
+ GPIO66_LCDB_VSO,
+ GPIO67_LCDB_CLK,
+ GPIO70_LCD_D0,
+ GPIO71_LCD_D1,
+ GPIO72_LCD_D2,
+ GPIO73_LCD_D3,
+ GPIO74_LCD_D4,
+ GPIO75_LCD_D5,
+ GPIO76_LCD_D6,
+ GPIO77_LCD_D7,
+ GPIO153_LCD_D24,
+ GPIO154_LCD_D25,
+ GPIO155_LCD_D26,
+ GPIO156_LCD_D27,
+ GPIO157_LCD_D28,
+ GPIO158_LCD_D29,
+ GPIO159_LCD_D30,
+ GPIO160_LCD_D31,
+ GPIO161_LCD_D32,
+ GPIO162_LCD_D33,
+ GPIO163_LCD_D34,
+ GPIO164_LCD_D35,
+ GPIO165_LCD_D36,
+ GPIO166_LCD_D37,
+ GPIO167_LCD_D38,
+ GPIO168_LCD_D39,
+);
+
+static UX500_PINS(mop500_pins_mcde_tvout,
+ GPIO78_LCD_D8,
+ GPIO79_LCD_D9,
+ GPIO80_LCD_D10,
+ GPIO81_LCD_D11,
+ GPIO150_LCDA_CLK,
+);
+
+static UX500_PINS(mop500_pins_mcde_hdmi,
+ GPIO69_LCD_VSI1 | PIN_INPUT_PULLUP,
+);
+
+static UX500_PINS(mop500_pins_ske,
+ GPIO153_KP_I7 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO154_KP_I6 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO155_KP_I5 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO156_KP_I4 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO161_KP_I3 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO162_KP_I2 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO163_KP_I1 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO164_KP_I0 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO157_KP_O7 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO158_KP_O6 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO159_KP_O5 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO160_KP_O4 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO165_KP_O3 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO166_KP_O2 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO167_KP_O1 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO168_KP_O0 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+);
+
+/* sdi0 (removable MMC/SD/SDIO cards) */
+static UX500_PINS(mop500_pins_sdi0,
+ GPIO18_MC0_CMDDIR | PIN_OUTPUT_HIGH,
+ GPIO19_MC0_DAT0DIR | PIN_OUTPUT_HIGH,
+ GPIO20_MC0_DAT2DIR | PIN_OUTPUT_HIGH,
+
+ GPIO22_MC0_FBCLK | PIN_INPUT_NOPULL,
+ GPIO23_MC0_CLK | PIN_OUTPUT_LOW,
+ GPIO24_MC0_CMD | PIN_INPUT_PULLUP,
+ GPIO25_MC0_DAT0 | PIN_INPUT_PULLUP,
+ GPIO26_MC0_DAT1 | PIN_INPUT_PULLUP,
+ GPIO27_MC0_DAT2 | PIN_INPUT_PULLUP,
+ GPIO28_MC0_DAT3 | PIN_INPUT_PULLUP,
+);
+
+/* sdi1 (WLAN CW1200) */
+static UX500_PINS(mop500_pins_sdi1,
+ GPIO208_MC1_CLK | PIN_OUTPUT_LOW,
+ GPIO209_MC1_FBCLK | PIN_INPUT_NOPULL,
+ GPIO210_MC1_CMD | PIN_INPUT_PULLUP,
+ GPIO211_MC1_DAT0 | PIN_INPUT_PULLUP,
+ GPIO212_MC1_DAT1 | PIN_INPUT_PULLUP,
+ GPIO213_MC1_DAT2 | PIN_INPUT_PULLUP,
+ GPIO214_MC1_DAT3 | PIN_INPUT_PULLUP,
+);
+
+/* sdi2 (POP eMMC) */
+static UX500_PINS(mop500_pins_sdi2,
+ GPIO128_MC2_CLK | PIN_OUTPUT_LOW,
+ GPIO129_MC2_CMD | PIN_INPUT_PULLUP,
+ GPIO130_MC2_FBCLK | PIN_INPUT_NOPULL,
+ GPIO131_MC2_DAT0 | PIN_INPUT_PULLUP,
+ GPIO132_MC2_DAT1 | PIN_INPUT_PULLUP,
+ GPIO133_MC2_DAT2 | PIN_INPUT_PULLUP,
+ GPIO134_MC2_DAT3 | PIN_INPUT_PULLUP,
+ GPIO135_MC2_DAT4 | PIN_INPUT_PULLUP,
+ GPIO136_MC2_DAT5 | PIN_INPUT_PULLUP,
+ GPIO137_MC2_DAT6 | PIN_INPUT_PULLUP,
+ GPIO138_MC2_DAT7 | PIN_INPUT_PULLUP,
+);
+
+/* sdi4 (PCB eMMC) */
+static UX500_PINS(mop500_pins_sdi4,
+ GPIO197_MC4_DAT3 | PIN_INPUT_PULLUP,
+ GPIO198_MC4_DAT2 | PIN_INPUT_PULLUP,
+ GPIO199_MC4_DAT1 | PIN_INPUT_PULLUP,
+ GPIO200_MC4_DAT0 | PIN_INPUT_PULLUP,
+ GPIO201_MC4_CMD | PIN_INPUT_PULLUP,
+ GPIO202_MC4_FBCLK | PIN_INPUT_NOPULL,
+ GPIO203_MC4_CLK | PIN_OUTPUT_LOW,
+ GPIO204_MC4_DAT7 | PIN_INPUT_PULLUP,
+ GPIO205_MC4_DAT6 | PIN_INPUT_PULLUP,
+ GPIO206_MC4_DAT5 | PIN_INPUT_PULLUP,
+ GPIO207_MC4_DAT4 | PIN_INPUT_PULLUP,
+);
+
+/* USB */
+static UX500_PINS(mop500_pins_usb,
+ GPIO256_USB_NXT,
+ GPIO257_USB_STP | PIN_OUTPUT_HIGH,
+ GPIO258_USB_XCLK,
+ GPIO259_USB_DIR,
+ GPIO260_USB_DAT7,
+ GPIO261_USB_DAT6,
+ GPIO262_USB_DAT5,
+ GPIO263_USB_DAT4,
+ GPIO264_USB_DAT3,
+ GPIO265_USB_DAT2,
+ GPIO266_USB_DAT1,
+ GPIO267_USB_DAT0,
+);
+
+/* SPI2 */
+static UX500_PINS(mop500_pins_spi2,
+ GPIO216_GPIO | PIN_OUTPUT_HIGH,
+ GPIO218_SPI2_RXD | PIN_INPUT_PULLDOWN,
+ GPIO215_SPI2_TXD | PIN_OUTPUT_LOW,
+ GPIO217_SPI2_CLK | PIN_OUTPUT_LOW,
+);
+
+static struct ux500_pin_lookup mop500_pins[] = {
+ PIN_LOOKUP("mcde-dpi", &mop500_pins_mcde_dpi),
+ PIN_LOOKUP("mcde-tvout", &mop500_pins_mcde_tvout),
+ PIN_LOOKUP("av8100-hdmi", &mop500_pins_mcde_hdmi),
+ PIN_LOOKUP("nmk-i2c.0", &mop500_pins_i2c0),
+ PIN_LOOKUP("nmk-i2c.1", &mop500_pins_i2c1),
+ PIN_LOOKUP("nmk-i2c.2", &mop500_pins_i2c2),
+ PIN_LOOKUP("nmk-i2c.3", &mop500_pins_i2c3),
+ PIN_LOOKUP("ske", &mop500_pins_ske),
+ PIN_LOOKUP("sdi0", &mop500_pins_sdi0),
+ PIN_LOOKUP("sdi1", &mop500_pins_sdi1),
+ PIN_LOOKUP("sdi2", &mop500_pins_sdi2),
+ PIN_LOOKUP("sdi4", &mop500_pins_sdi4),
+ PIN_LOOKUP("ab8500-usb.0", &mop500_pins_usb),
+ PIN_LOOKUP("spi2", &mop500_pins_spi2),
+};
+
+/*
+ * Sleep pin configuration for u8500 platform.
+ * If another HW is used the GPIO's must be configured
+ * correctly when entering sleep for optimal power
+ * consumption.
+ */
+static pin_cfg_t mop500_pins_common_power_save_bank0[] = {
+ GPIO0_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO1_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO2_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO3_GPIO | PIN_SLPM_DIR_OUTPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO4_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO5_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO6_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO7_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO8_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO9_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO10_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO11_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO12_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO13_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO14_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO15_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO16_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO17_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO18_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO19_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO20_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO21_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO22_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO23_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO24_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO25_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO26_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO27_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO28_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO29_U2_RXD | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO30_U2_TXD | PIN_SLPM_DIR_OUTPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO31_U2_CTSn | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank0_href60[] = {
+ GPIO0_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO1_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO2_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO3_GPIO | PIN_SLPM_DIR_OUTPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO4_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO5_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO6_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO7_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO8_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO9_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO10_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO11_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO12_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO13_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO14_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO15_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO16_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO17_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO18_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO19_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO20_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO21_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO22_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO23_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO24_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO25_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO26_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO27_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO28_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO29_U2_RXD | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO30_U2_TXD | PIN_SLPM_DIR_OUTPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO31_U2_CTSn | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank1[] = {
+ GPIO32_U2_RTSn | PIN_SLPM_DIR_OUTPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO33_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO34_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO35_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO36_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank2[] = {
+ GPIO64_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO65_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO66_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO67_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO68_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO69_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO70_STMAPE_CLK | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO71_STMAPE_DAT3 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO72_STMAPE_DAT2 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO73_STMAPE_DAT1 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO74_STMAPE_DAT0 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO75_U2_RXD | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO76_U2_TXD | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO77_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO78_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO79_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO80_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO81_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO82_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO83_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO84_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO85_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO86_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO87_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO88_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO89_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO90_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO91_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO92_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO93_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO94_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO95_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank2_href60[] = {
+ GPIO64_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO65_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO66_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO67_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO68_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO69_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO70_STMAPE_CLK | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO71_STMAPE_DAT3 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO72_STMAPE_DAT2 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO73_STMAPE_DAT1 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO74_STMAPE_DAT0 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO75_U2_RXD | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO76_U2_TXD | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO77_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO78_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO79_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO80_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO81_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO82_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO83_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO84_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO85_GPIO,
+ GPIO86_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO87_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO88_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO89_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO90_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO91_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO92_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO93_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO94_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO95_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank3[] = {
+ GPIO96_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO97_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank4[] = {
+ GPIO128_MC2_CLK | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO129_MC2_CMD | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO130_MC2_FBCLK | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO131_MC2_DAT0 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO132_MC2_DAT1 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO133_MC2_DAT2 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO134_MC2_DAT3 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO135_MC2_DAT4 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO136_MC2_DAT5 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO137_MC2_DAT6 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO138_MC2_DAT7 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO139_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO140_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO141_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO142_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO143_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO144_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO145_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO146_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO147_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO148_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO149_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO150_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO151_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO152_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO153_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO154_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO155_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO156_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO157_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO158_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO159_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank5[] = {
+ GPIO160_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO161_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO162_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO163_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO164_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO165_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO166_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO167_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO168_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO169_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO170_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO171_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank5_href60[] = {
+ GPIO160_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO161_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO162_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO163_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO164_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO165_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO166_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO167_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO168_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO169_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO170_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO171_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank6[] = {
+ GPIO192_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO193_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO194_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO195_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO196_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO197_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO198_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO199_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO200_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO201_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO202_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO203_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO204_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO205_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO206_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO207_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO208_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO209_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO210_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO211_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO212_GPIO,
+ GPIO213_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO214_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO215_GPIO,
+
+ GPIO216_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO217_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO218_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO219_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO220_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO221_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO222_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO223_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank6_href60[] = {
+ GPIO192_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO193_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO194_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO195_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO196_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO197_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO198_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO199_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO200_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO201_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO202_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO203_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO204_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO205_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO206_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO207_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO208_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO209_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO210_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO211_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO212_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO213_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO214_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO215_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO216_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO217_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO218_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO219_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO220_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO221_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO222_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO223_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank7[] = {
+ GPIO224_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO225_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO226_GPIO | PIN_SLPM_DIR_OUTPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO227_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO228_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO229_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO230_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank7_href60[] = {
+ GPIO224_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO225_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO226_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO227_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO228_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO229_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO230_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank8[] = {
+ GPIO256_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO257_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO258_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO259_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+
+ GPIO260_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO261_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO262_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO263_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+
+ GPIO264_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO265_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO266_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO267_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
};
+static void mop500_pins_suspend_force(void)
+{
+ if (machine_is_hrefv60())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank0_href60,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank0_href60));
+ else
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank0,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank0));
+
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank1,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank1));
+
+ if (machine_is_hrefv60())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank2_href60,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank2_href60));
+ else
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank2,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank2));
+
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank3,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank3));
+
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank4,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank4));
+
+ if (machine_is_hrefv60())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank5_href60,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank5_href60));
+ else
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank5,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank5));
+
+ if (machine_is_hrefv60())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank6_href60,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank6_href60));
+ else
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank6,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank6));
+
+ if (machine_is_hrefv60())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank7_href60,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank7_href60));
+ else
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank7,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank7));
+
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank8,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank8));
+}
+
+/*
+ * This function is called to force gpio power save
+ * mux settings during suspend.
+ * This is a temporary solution until all drivers are
+ * controlling their pin settings when in inactive mode.
+ */
+static void mop500_pins_suspend_force_mux(void)
+{
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank0,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank0));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank1,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank1));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank2,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank2));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank3,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank3));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank4,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank4));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank5,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank5));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank6,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank6));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank7,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank7));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank8,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank8));
+}
+
+/*
+ * passing "pinsfor=" in kernel cmdline allows for custom
+ * configuration of GPIOs on u8500 derived boards.
+ */
+static int __init early_pinsfor(char *p)
+{
+ pinsfor = PINS_FOR_DEFAULT;
+
+ if (strcmp(p, "u9500-21") == 0)
+ pinsfor = PINS_FOR_U9500;
+
+ return 0;
+}
+early_param("pinsfor", early_pinsfor);
+
+int pins_for_u9500(void)
+{
+ if (pinsfor == PINS_FOR_U9500)
+ return 1;
+
+ return 0;
+}
+
void __init mop500_pins_init(void)
{
nmk_config_pins(mop500_pins_common,
ARRAY_SIZE(mop500_pins_common));
+ ux500_pins_add(mop500_pins, ARRAY_SIZE(mop500_pins));
+
+ switch (pinsfor) {
+ case PINS_FOR_U9500:
+ nmk_config_pins(u9500_pins, ARRAY_SIZE(u9500_pins));
+ break;
+
+ case PINS_FOR_DEFAULT:
+ nmk_config_pins(u8500_pins, ARRAY_SIZE(u8500_pins));
+ default:
+ break;
+ }
+
nmk_config_pins(mop500_pins_default,
ARRAY_SIZE(mop500_pins_default));
+
+ suspend_set_pins_force_fn(mop500_pins_suspend_force,
+ mop500_pins_suspend_force_mux);
}
void __init snowball_pins_init(void)
@@ -290,8 +954,15 @@ void __init snowball_pins_init(void)
nmk_config_pins(mop500_pins_common,
ARRAY_SIZE(mop500_pins_common));
+ ux500_pins_add(mop500_pins, ARRAY_SIZE(mop500_pins));
+
+ nmk_config_pins(u8500_pins, ARRAY_SIZE(u8500_pins));
+
nmk_config_pins(snowball_pins,
ARRAY_SIZE(snowball_pins));
+
+ suspend_set_pins_force_fn(mop500_pins_suspend_force,
+ mop500_pins_suspend_force_mux);
}
void __init hrefv60_pins_init(void)
@@ -299,6 +970,22 @@ void __init hrefv60_pins_init(void)
nmk_config_pins(mop500_pins_common,
ARRAY_SIZE(mop500_pins_common));
+ ux500_pins_add(mop500_pins, ARRAY_SIZE(mop500_pins));
+
nmk_config_pins(hrefv60_pins,
ARRAY_SIZE(hrefv60_pins));
+
+ switch (pinsfor) {
+ case PINS_FOR_U9500:
+ nmk_config_pins(u9500_pins, ARRAY_SIZE(u9500_pins));
+ break;
+
+ case PINS_FOR_DEFAULT:
+ nmk_config_pins(u8500_pins, ARRAY_SIZE(u8500_pins));
+ default:
+ break;
+ }
+
+ suspend_set_pins_force_fn(mop500_pins_suspend_force,
+ mop500_pins_suspend_force_mux);
}
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 2735d03996c..10d77fb5cf5 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -13,6 +13,48 @@
#include <linux/regulator/ab8500.h>
#include "board-mop500-regulators.h"
+#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE
+/*
+ * GPIO regulator controlled by the ab8500 GPIO16
+ */
+static struct regulator_consumer_supply gpio_wlan_vbat_consumers[] = {
+ /* for cg2900 chip */
+ REGULATOR_SUPPLY("vdd", "cg2900-uart.0"),
+ /* for cw1200 chip */
+ REGULATOR_SUPPLY("vdd", "cw1200_wlan"),
+};
+
+struct regulator_init_data gpio_wlan_vbat_regulator = {
+ .constraints = {
+ .name = "WLAN-VBAT",
+ .min_uV = 3600000,
+ .max_uV = 3600000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(gpio_wlan_vbat_consumers),
+ .consumer_supplies = gpio_wlan_vbat_consumers,
+};
+
+/*
+ * GPIO regulator controlled by the ab8500 GPIO26
+ */
+static struct regulator_consumer_supply gpio_en_3v3_consumers[] = {
+ /* for LAN chip */
+ REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
+};
+
+struct regulator_init_data gpio_en_3v3_regulator = {
+ .constraints = {
+ .name = "EN-3V3",
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(gpio_en_3v3_consumers),
+ .consumer_supplies = gpio_en_3v3_consumers,
+};
+#endif
+
/*
* TPS61052 regulator
*/
@@ -38,21 +80,39 @@ struct regulator_init_data tps61052_regulator = {
};
static struct regulator_consumer_supply ab8500_vaux1_consumers[] = {
- /* External displays, connector on board 2v5 power supply */
- REGULATOR_SUPPLY("vaux12v5", "mcde.0"),
+ /* lps001wp baromenter i2c dev name is 2-005c
+ * maybe change that in the driver, like for lsm303dlh drivers
+ */
+ REGULATOR_SUPPLY("vdd", "2-005c"),
+ /* Main display, u8500 R3 uib */
+ REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
+ /* Main display, u8500 uib and ST uib */
+ REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.0"),
+ /* Secondary display, ST uib */
+ REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.1"),
/* SFH7741 proximity sensor */
REGULATOR_SUPPLY("vcc", "gpio-keys.0"),
/* BH1780GLS ambient light sensor */
REGULATOR_SUPPLY("vcc", "2-0029"),
/* lsm303dlh accelerometer */
- REGULATOR_SUPPLY("vdd", "3-0018"),
+ REGULATOR_SUPPLY("vdd", "lsm303dlh.0"),
/* lsm303dlh magnetometer */
- REGULATOR_SUPPLY("vdd", "3-001e"),
+ REGULATOR_SUPPLY("vdd", "lsm303dlh.1"),
/* Rohm BU21013 Touchscreen devices */
REGULATOR_SUPPLY("avdd", "3-005c"),
REGULATOR_SUPPLY("avdd", "3-005d"),
/* Synaptics RMI4 Touchscreen device */
REGULATOR_SUPPLY("vdd", "3-004b"),
+ /* L3G4200D Gyroscope device */
+ REGULATOR_SUPPLY("vdd", "l3g4200d"),
+ /* Proximity and Hal sensor device */
+ REGULATOR_SUPPLY("vdd", "sensor1p.0"),
+ /* Ambient light sensor device */
+ REGULATOR_SUPPLY("vdd", "3-0029"),
+ /* Cypress TrueTouch Touchscreen device */
+ REGULATOR_SUPPLY("vcpin", "spi8.0"),
+ /* Camera device */
+ REGULATOR_SUPPLY("vaux12v5", "mmio_camera"),
};
static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
@@ -60,6 +120,12 @@ static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
REGULATOR_SUPPLY("vmmc", "sdi4"),
/* AB8500 audio codec */
REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"),
+ /* AB8500 accessory detect 1 */
+ REGULATOR_SUPPLY("vcc-N2158", "ab8500-acc-det.0"),
+ /* AB8500 Tv-out device */
+ REGULATOR_SUPPLY("vcc-N2158", "mcde_tv_ab8500.4"),
+ /* AV8100 HDMI device */
+ REGULATOR_SUPPLY("vcc-N2158", "av8100_hdmi.3"),
};
static struct regulator_consumer_supply ab8500_vaux3_consumers[] = {
@@ -72,6 +138,30 @@ static struct regulator_consumer_supply ab8500_vtvout_consumers[] = {
REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"),
/* Internal general-purpose ADC */
REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
+ /* ADC for charger */
+ REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
+ /* AB8500 Tv-out device */
+ REGULATOR_SUPPLY("vtvout", "mcde_tv_ab8500.4"),
+};
+
+static struct regulator_consumer_supply ab8500_vaudio_consumers[] = {
+ /* AB8500 audio codec device */
+ REGULATOR_SUPPLY("v-audio", NULL),
+};
+
+static struct regulator_consumer_supply ab8500_vamic1_consumers[] = {
+ /* AB8500 audio codec device */
+ REGULATOR_SUPPLY("v-amic1", NULL),
+};
+
+static struct regulator_consumer_supply ab8500_vamic2_consumers[] = {
+ /* AB8500 audio codec device */
+ REGULATOR_SUPPLY("v-amic2", NULL),
+};
+
+static struct regulator_consumer_supply ab8500_vdmic_consumers[] = {
+ /* AB8500 audio codec device */
+ REGULATOR_SUPPLY("v-dmic", NULL),
};
static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
@@ -82,74 +172,90 @@ static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
};
static struct regulator_consumer_supply ab8500_vana_consumers[] = {
- /* External displays, connector on board, 1v8 power supply */
- REGULATOR_SUPPLY("vsmps2", "mcde.0"),
+ /* DB8500 DSI */
+ REGULATOR_SUPPLY("vdddsi1v2", "mcde"),
+ /* DB8500 CSI */
+ REGULATOR_SUPPLY("vddcsi1v2", "mmio_camera"),
+};
+
+static struct regulator_consumer_supply ab8500_sysclkreq_2_consumers[] = {
+ /* CG2900 device */
+ REGULATOR_SUPPLY("gbf_1v8", "cg2900-uart.0"),
+};
+
+static struct regulator_consumer_supply ab8500_sysclkreq_4_consumers[] = {
+ /* CW1200 device */
+ REGULATOR_SUPPLY("wlan_1v8", "cw1200_wlan.0"),
};
/* ab8500 regulator register initialization */
-struct ab8500_regulator_reg_init
-ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
+static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
/*
* VanaRequestCtrl = HP/LP depending on VxRequest
+ * VpllRequestCtrl = HP/LP depending on VxRequest
* VextSupply1RequestCtrl = HP/LP depending on VxRequest
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0xfc, 0x00),
/*
* VextSupply2RequestCtrl = HP/LP depending on VxRequest
* VextSupply3RequestCtrl = HP/LP depending on VxRequest
* Vaux1RequestCtrl = HP/LP depending on VxRequest
* Vaux2RequestCtrl = HP/LP depending on VxRequest
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0xff, 0x00),
/*
* Vaux3RequestCtrl = HP/LP depending on VxRequest
* SwHPReq = Control through SWValid disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x07, 0x00),
/*
+ * Vsmps1SysClkReq1HPValid = enabled
+ * Vsmps2SysClkReq1HPValid = enabled
+ * Vsmps3SysClkReq1HPValid = enabled
* VanaSysClkReq1HPValid = disabled
+ * VpllSysClkReq1HPValid = enabled
* Vaux1SysClkReq1HPValid = disabled
* Vaux2SysClkReq1HPValid = disabled
* Vaux3SysClkReq1HPValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0xff, 0x17),
/*
* VextSupply1SysClkReq1HPValid = disabled
* VextSupply2SysClkReq1HPValid = disabled
* VextSupply3SysClkReq1HPValid = SysClkReq1 controlled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x40),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x70, 0x40),
/*
* VanaHwHPReq1Valid = disabled
* Vaux1HwHPreq1Valid = disabled
* Vaux2HwHPReq1Valid = disabled
* Vaux3HwHPReqValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0xe8, 0x00),
/*
* VextSupply1HwHPReq1Valid = disabled
* VextSupply2HwHPReq1Valid = disabled
* VextSupply3HwHPReq1Valid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x07, 0x00),
/*
* VanaHwHPReq2Valid = disabled
* Vaux1HwHPReq2Valid = disabled
* Vaux2HwHPReq2Valid = disabled
* Vaux3HwHPReq2Valid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0xe8, 0x00),
/*
* VextSupply1HwHPReq2Valid = disabled
* VextSupply2HwHPReq2Valid = disabled
* VextSupply3HwHPReq2Valid = HWReq2 controlled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x04),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x07, 0x04),
/*
* VanaSwHPReqValid = disabled
* Vaux1SwHPReqValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0xa0, 0x00),
/*
* Vaux2SwHPReqValid = disabled
* Vaux3SwHPReqValid = disabled
@@ -157,7 +263,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* VextSupply2SwHPReqValid = disabled
* VextSupply3SwHPReqValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x1f, 0x00),
/*
* SysClkReq2Valid1 = SysClkReq2 controlled
* SysClkReq3Valid1 = disabled
@@ -167,7 +273,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* SysClkReq7Valid1 = disabled
* SysClkReq8Valid1 = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0x2a),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0xfe, 0x2a),
/*
* SysClkReq2Valid2 = disabled
* SysClkReq3Valid2 = disabled
@@ -177,7 +283,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* SysClkReq7Valid2 = disabled
* SysClkReq8Valid2 = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0x20),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0xfe, 0x20),
/*
* VTVoutEna = disabled
* Vintcore12Ena = disabled
@@ -185,66 +291,93 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* Vintcore12LP = inactive (HP)
* VTVoutLP = inactive (HP)
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0x10),
+ INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0xfe, 0x10),
/*
* VaudioEna = disabled
* VdmicEna = disabled
* Vamic1Ena = disabled
* Vamic2Ena = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x1e, 0x00),
/*
* Vamic1_dzout = high-Z when Vamic1 is disabled
* Vamic2_dzout = high-Z when Vamic2 is disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x03, 0x00),
+ /*
+ * Vsmps1Regu = HW control
+ * Vsmps1SelCtrl = Vsmps1 voltage defined by Vsmsp1Sel2
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VSMPS1REGU, 0x0f, 0x06),
+ /*
+ * Vsmps2Regu = HW control
+ * Vsmps2SelCtrl = Vsmps2 voltage defined by Vsmsp2Sel2
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VSMPS2REGU, 0x0f, 0x06),
+ /*
+ * Vsmps3Sel2 = 1.2125 V
+ * NOTE! PRCMU register
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VSMPS3SEL2, 0x7f, 0x29),
+ /*
+ * Vsmps3Regu = HW control
+ * Vsmps3SelCtrl = Vsmps3 voltage defined by Vsmps3Sel2
+ * NOTE! PRCMU register
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VSMPS3REGU, 0x0f, 0x06),
+ /*
+ * Vsmps3Sel1 = 0.925V
+ * NOTE! PRCMU register
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VSMPS3SEL1, 0x7f, 0x12),
/*
* VPll = Hw controlled
* VanaRegu = force off
*/
- INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x02),
+ INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x0f, 0x02),
/*
* VrefDDREna = disabled
* VrefDDRSleepMode = inactive (no pulldown)
*/
- INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x03, 0x00),
/*
* VextSupply1Regu = HW control
* VextSupply2Regu = HW control
- * VextSupply3Regu = HW control
+ * VextSupply3Regu = Low Power mode
* ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
* ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
*/
- INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0x2a),
+ INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0xff, 0x1a),
/*
* Vaux1Regu = force HP
* Vaux2Regu = force off
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x01),
+ INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x0f, 0x01),
/*
- * Vaux3regu = force off
+ * Vrf1Regu = HW control
+ * Vaux3Regu = force off
*/
- INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x0f, 0x08),
/*
- * Vsmps1 = 1.15V
+ * Vsmps1Sel1 = 1.2 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VSMPS1SEL1, 0x24),
+ INIT_REGULATOR_REGISTER(AB8500_VSMPS1SEL1, 0x3f, 0x28),
/*
- * Vaux1Sel = 2.5 V
+ * Vaux1Sel = 2.8 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x08),
+ INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x0f, 0x0C),
/*
* Vaux2Sel = 2.9 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0d),
+ INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0f, 0x0d),
/*
* Vaux3Sel = 2.91 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07),
+ INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07, 0x07),
/*
* VextSupply12LP = disabled (no LP)
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x01, 0x00),
/*
* Vaux1Disch = short discharge time
* Vaux2Disch = short discharge time
@@ -253,23 +386,24 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* VTVoutDisch = short discharge time
* VaudioDisch = short discharge time
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0xfc, 0x00),
/*
* VanaDisch = short discharge time
* VdmicPullDownEna = pulldown disabled when Vdmic is disabled
* VdmicDisch = short discharge time
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x16, 0x00),
};
/* AB8500 regulators */
-struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
+static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
/* supplies to the display/camera */
[AB8500_LDO_AUX1] = {
+ .supply_regulator = "ab8500-ext-supply3",
.constraints = {
.name = "V-DISPLAY",
- .min_uV = 2500000,
- .max_uV = 2900000,
+ .min_uV = 2800000,
+ .max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.boot_on = 1, /* display is on at boot */
@@ -286,24 +420,32 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
},
/* supplies to the on-board eMMC */
[AB8500_LDO_AUX2] = {
+ .supply_regulator = "ab8500-ext-supply3",
.constraints = {
.name = "V-eMMC1",
.min_uV = 1100000,
.max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS,
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
.consumer_supplies = ab8500_vaux2_consumers,
},
/* supply for VAUX3, supplies to SDcard slots */
[AB8500_LDO_AUX3] = {
+ .supply_regulator = "ab8500-ext-supply3",
.constraints = {
.name = "V-MMC-SD",
.min_uV = 1100000,
.max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS,
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
.consumer_supplies = ab8500_vaux3_consumers,
@@ -323,6 +465,8 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.name = "V-AUD",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaudio_consumers),
+ .consumer_supplies = ab8500_vaudio_consumers,
},
/* supply for v-anamic1 VAMic1-LDO */
[AB8500_LDO_ANAMIC1] = {
@@ -330,6 +474,8 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.name = "V-AMIC1",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers),
+ .consumer_supplies = ab8500_vamic1_consumers,
},
/* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
[AB8500_LDO_ANAMIC2] = {
@@ -337,6 +483,8 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.name = "V-AMIC2",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers),
+ .consumer_supplies = ab8500_vamic2_consumers,
},
/* supply for v-dmic, VDMIC LDO */
[AB8500_LDO_DMIC] = {
@@ -344,23 +492,87 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.name = "V-DMIC",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vdmic_consumers),
+ .consumer_supplies = ab8500_vdmic_consumers,
},
/* supply for v-intcore12, VINTCORE12 LDO */
[AB8500_LDO_INTCORE] = {
.constraints = {
.name = "V-INTCORE",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .min_uV = 1250000,
+ .max_uV = 1350000,
+ .input_uV = 1800000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE |
+ REGULATOR_CHANGE_DRMS,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
.consumer_supplies = ab8500_vintcore_consumers,
},
- /* supply for U8500 CSI/DSI, VANA LDO */
+ /* supply for U8500 CSI-DSI, VANA LDO */
[AB8500_LDO_ANA] = {
.constraints = {
- .name = "V-CSI/DSI",
+ .name = "V-CSI-DSI",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
.consumer_supplies = ab8500_vana_consumers,
},
+ /* sysclkreq 2 pin */
+ [AB8500_SYSCLKREQ_2] = {
+ .constraints = {
+ .name = "V-SYSCLKREQ-2",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies =
+ ARRAY_SIZE(ab8500_sysclkreq_2_consumers),
+ .consumer_supplies = ab8500_sysclkreq_2_consumers,
+ },
+ /* sysclkreq 4 pin */
+ [AB8500_SYSCLKREQ_4] = {
+ .constraints = {
+ .name = "V-SYSCLKREQ-4",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies =
+ ARRAY_SIZE(ab8500_sysclkreq_4_consumers),
+ .consumer_supplies = ab8500_sysclkreq_4_consumers,
+ },
+};
+
+/* supply for VextSupply3 */
+static struct regulator_consumer_supply ab8500_ext_supply3_consumers[] = {
+ /* SIM supply for 3 V SIM cards */
+ REGULATOR_SUPPLY("vinvsim", "sim-detect.0"),
+};
+
+/*
+ * AB8500 external regulators
+ */
+static struct regulator_init_data ab8500_ext_regulators[] = {
+ /* fixed Vbat supplies VSMPS3_EXT_3V4 and VSMPS4_EXT_3V4 */
+ [AB8500_EXT_SUPPLY3] = {
+ .constraints = {
+ .name = "ab8500-ext-supply3",
+ .min_uV = 3400000,
+ .max_uV = 3400000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies =
+ ARRAY_SIZE(ab8500_ext_supply3_consumers),
+ .consumer_supplies = ab8500_ext_supply3_consumers,
+ },
+};
+
+struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
+ .reg_init = ab8500_reg_init,
+ .num_reg_init = ARRAY_SIZE(ab8500_reg_init),
+ .regulator = ab8500_regulators,
+ .num_regulator = ARRAY_SIZE(ab8500_regulators),
+ .ext_regulator = ab8500_ext_regulators,
+ .num_ext_regulator = ARRAY_SIZE(ab8500_ext_regulators),
};
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h
index 94992158d96..ed309081e14 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.h
+++ b/arch/arm/mach-ux500/board-mop500-regulators.h
@@ -14,9 +14,9 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
-extern struct ab8500_regulator_reg_init
-ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS];
-extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS];
+extern struct ab8500_regulator_platform_data ab8500_regulator_plat_data;
extern struct regulator_init_data tps61052_regulator;
+extern struct regulator_init_data gpio_wlan_vbat_regulator;
+extern struct regulator_init_data gpio_en_3v3_regulator;
#endif
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index a0b53f6baca..1c47f60c588 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -11,35 +11,30 @@
#include <linux/amba/mmci.h>
#include <linux/mmc/host.h>
#include <linux/platform_device.h>
+#include <linux/delay.h>
#include <asm/mach-types.h>
#include <plat/ste_dma40.h>
#include <mach/devices.h>
#include <mach/hardware.h>
+#include <mach/ste-dma40-db8500.h>
#include "devices-db8500.h"
#include "board-mop500.h"
-#include "ste-dma40-db8500.h"
+
+#undef CONFIG_STE_DMA40 /* Temporarily disable DMA */
/*
* SDI 0 (MicroSD slot)
*/
-/* MMCIPOWER bits */
-#define MCI_DATA2DIREN (1 << 2)
-#define MCI_CMDDIREN (1 << 3)
-#define MCI_DATA0DIREN (1 << 4)
-#define MCI_DATA31DIREN (1 << 5)
-#define MCI_FBCLKEN (1 << 7)
-
/* GPIO pins used by the sdi0 level shifter */
static int sdi0_en = -1;
static int sdi0_vsel = -1;
-static u32 mop500_sdi0_vdd_handler(struct device *dev, unsigned int vdd,
- unsigned char power_mode)
+static int mop500_sdi0_ios_handler(struct device *dev, struct mmc_ios *ios)
{
- switch (power_mode) {
+ switch (ios->power_mode) {
case MMC_POWER_UP:
case MMC_POWER_ON:
/*
@@ -52,6 +47,7 @@ static u32 mop500_sdi0_vdd_handler(struct device *dev, unsigned int vdd,
*/
gpio_direction_output(sdi0_vsel, 0);
gpio_direction_output(sdi0_en, 1);
+ udelay(100);
break;
case MMC_POWER_OFF:
gpio_direction_output(sdi0_vsel, 0);
@@ -59,15 +55,14 @@ static u32 mop500_sdi0_vdd_handler(struct device *dev, unsigned int vdd,
break;
}
- return MCI_FBCLKEN | MCI_CMDDIREN | MCI_DATA0DIREN |
- MCI_DATA2DIREN | MCI_DATA31DIREN;
+ return 0;
}
#ifdef CONFIG_STE_DMA40
struct stedma40_chan_cfg mop500_sdi0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
- .src_dev_type = DB8500_DMA_DEV29_SD_MM0_RX,
+ .src_dev_type = DB8500_DMA_DEV1_SD_MMC0_RX,
.dst_dev_type = STEDMA40_DEV_DST_MEMORY,
.src_info.data_width = STEDMA40_WORD_WIDTH,
.dst_info.data_width = STEDMA40_WORD_WIDTH,
@@ -79,7 +74,7 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_PERIPH,
.src_dev_type = STEDMA40_DEV_SRC_MEMORY,
- .dst_dev_type = DB8500_DMA_DEV29_SD_MM0_TX,
+ .dst_dev_type = DB8500_DMA_DEV1_SD_MMC0_TX,
.src_info.data_width = STEDMA40_WORD_WIDTH,
.dst_info.data_width = STEDMA40_WORD_WIDTH,
.use_fixed_channel = true,
@@ -88,13 +83,17 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
#endif
static struct mmci_platform_data mop500_sdi0_data = {
- .vdd_handler = mop500_sdi0_vdd_handler,
+ .ios_handler = mop500_sdi0_ios_handler,
.ocr_mask = MMC_VDD_29_30,
.f_max = 50000000,
.capabilities = MMC_CAP_4_BIT_DATA |
MMC_CAP_SD_HIGHSPEED |
MMC_CAP_MMC_HIGHSPEED,
.gpio_wp = -1,
+ .sigdir = MCI_ST_FBCLKEN |
+ MCI_ST_CMDDIREN |
+ MCI_ST_DATA0DIREN |
+ MCI_ST_DATA2DIREN,
#ifdef CONFIG_STE_DMA40
.dma_filter = stedma40_filter,
.dma_rx_param = &mop500_sdi0_dma_cfg_rx,
@@ -102,29 +101,72 @@ static struct mmci_platform_data mop500_sdi0_data = {
#endif
};
-static void sdi0_configure(void)
+/*
+ * SDI1 (SDIO WLAN)
+ */
+#ifdef SDIO_DMA_ON
+#ifdef CONFIG_STE_DMA40
+static struct stedma40_chan_cfg sdi1_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB8500_DMA_DEV32_SD_MM1_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct stedma40_chan_cfg sdi1_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV32_SD_MM1_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+#endif
+#endif
+
+static struct mmci_platform_data mop500_sdi1_data = {
+ .ocr_mask = MMC_VDD_29_30,
+ .f_max = 50000000,
+ .capabilities = MMC_CAP_4_BIT_DATA,
+ .gpio_cd = -1,
+ .gpio_wp = -1,
+#ifdef SDIO_DMA_ON
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &sdi1_dma_cfg_rx,
+ .dma_tx_param = &sdi1_dma_cfg_tx,
+#endif
+#endif
+};
+
+static void sdi0_sdi1_configure(void)
{
- int ret;
+ int ret;
+ u32 periphid = 0;
- ret = gpio_request(sdi0_en, "level shifter enable");
- if (!ret)
- ret = gpio_request(sdi0_vsel,
- "level shifter 1v8-3v select");
- if (ret) {
- pr_warning("unable to config sdi0 gpios for level shifter.\n");
- return;
- }
+ ret = gpio_request(sdi0_en, "level shifter enable");
+ if (!ret)
+ ret = gpio_request(sdi0_vsel,
+ "level shifter 1v8-3v select");
+
+ if (ret) {
+ pr_warning("unable to config sdi0 gpios for level shifter.\n");
+ return;
+ }
+
+ /* Select the default 2.9V and enable level shifter */
+ gpio_direction_output(sdi0_vsel, 0);
+ gpio_direction_output(sdi0_en, 1);
- /* Select the default 2.9V and enable level shifter */
- gpio_direction_output(sdi0_vsel, 0);
- gpio_direction_output(sdi0_en, 1);
+ /* v2 has a new version of this block that need to be forced */
+ if (cpu_is_u8500v20_or_later())
+ periphid = 0x10480180;
- /* Add the device, force v2 to subrevision 1 */
- if (cpu_is_u8500v2())
- db8500_add_sdi0(&mop500_sdi0_data, 0x10480180);
- else
- db8500_add_sdi0(&mop500_sdi0_data, 0);
+ db8500_add_sdi0(&mop500_sdi0_data, periphid);
+ db8500_add_sdi1(&mop500_sdi1_data, periphid);
}
void mop500_sdi_tc35892_init(void)
@@ -132,13 +174,12 @@ void mop500_sdi_tc35892_init(void)
mop500_sdi0_data.gpio_cd = GPIO_SDMMC_CD;
sdi0_en = GPIO_SDMMC_EN;
sdi0_vsel = GPIO_SDMMC_1V8_3V_SEL;
- sdi0_configure();
+ sdi0_sdi1_configure();
}
/*
* SDI 2 (POP eMMC, not on DB8500ed)
*/
-
#ifdef CONFIG_STE_DMA40
struct stedma40_chan_cfg mop500_sdi2_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
@@ -215,11 +256,12 @@ void __init mop500_sdi_init(void)
u32 periphid = 0;
/* v2 has a new version of this block that need to be forced */
- if (cpu_is_u8500v2())
+ if (cpu_is_u8500v20_or_later()) {
periphid = 0x10480180;
- /* PoP:ed eMMC on top of DB8500 v1.0 has problems with high speed */
- if (!cpu_is_u8500v10())
+
+ /* PoP:ed eMMC on DB8500 v1.0 has problems with high speed */
mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED;
+ }
db8500_add_sdi2(&mop500_sdi2_data, periphid);
@@ -227,7 +269,7 @@ void __init mop500_sdi_init(void)
db8500_add_sdi4(&mop500_sdi4_data, periphid);
/*
- * On boards with the TC35892 GPIO expander, sdi0 will finally
+ * On boards with the TC35892 GPIO expander, sdi0 and sdi1 will finally
* be added when the TC35892 initializes and calls
* mop500_sdi_tc35892_init() above.
*/
@@ -246,7 +288,7 @@ void __init snowball_sdi_init(void)
mop500_sdi0_data.cd_invert = true;
sdi0_en = SNOWBALL_SDMMC_EN_GPIO;
sdi0_vsel = SNOWBALL_SDMMC_1V8_3V_GPIO;
- sdi0_configure();
+ sdi0_sdi1_configure();
}
void __init hrefv60_sdi_init(void)
@@ -263,5 +305,5 @@ void __init hrefv60_sdi_init(void)
mop500_sdi0_data.gpio_cd = HREFV60_SDMMC_CD_GPIO;
sdi0_en = HREFV60_SDMMC_EN_GPIO;
sdi0_vsel = HREFV60_SDMMC_1V8_3V_GPIO;
- sdi0_configure();
+ sdi0_sdi1_configure();
}
diff --git a/arch/arm/mach-ux500/board-mop500-stm.c b/arch/arm/mach-ux500/board-mop500-stm.c
new file mode 100644
index 00000000000..7d8148c5878
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-stm.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson
+ *
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * Author: Olivier Germain <olivier.germain@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
+#include <plat/pincfg.h>
+#include <mach/devices.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <asm/io.h>
+#include <trace/stm.h>
+#include "pins-db8500.h"
+
+static pin_cfg_t mop500_stm_mipi34_pins[] = {
+ GPIO70_STMAPE_CLK | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO71_STMAPE_DAT3 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO72_STMAPE_DAT2 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO73_STMAPE_DAT1 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO74_STMAPE_DAT0 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO75_U2_RXD | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO76_U2_TXD | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+};
+
+static pin_cfg_t mop500_stm_mipi60_pins[] = {
+ GPIO153_U2_RXD,
+ GPIO154_U2_TXD,
+ GPIO155_STMAPE_CLK,
+ GPIO156_STMAPE_DAT3,
+ GPIO157_STMAPE_DAT2,
+ GPIO158_STMAPE_DAT1,
+ GPIO159_STMAPE_DAT0,
+};
+
+static pin_cfg_t mop500_ske_pins[] = {
+ GPIO153_KP_I7 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO154_KP_I6 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO155_KP_I5 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO156_KP_I4 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO161_KP_I3 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO162_KP_I2 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO163_KP_I1 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO164_KP_I0 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO157_KP_O7 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO158_KP_O6 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO159_KP_O5 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO160_KP_O4 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO165_KP_O3 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO166_KP_O2 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO167_KP_O1 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO168_KP_O0 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+};
+
+static int stm_ste_disable_ape_on_mipi60(void)
+{
+ int retval;
+
+ retval = nmk_config_pins_sleep(ARRAY_AND_SIZE(mop500_stm_mipi60_pins));
+ if (retval)
+ pr_err("STM: Failed to disable MIPI60\n");
+ else {
+ retval = nmk_config_pins(ARRAY_AND_SIZE(mop500_ske_pins));
+ if (retval)
+ pr_err("STM: Failed to enable SKE gpio\n");
+ }
+ return retval;
+}
+
+/*
+ * Manage STM output pins connection (MIP34/MIPI60 connectors)
+ */
+#define PRCM_GPIOCR (_PRCMU_BASE + 0x138)
+#define PRCM_GPIOCR_DBG_STM_MOD_CMD1 0x800
+#define PRCM_GPIOCR_DBG_UARTMOD_CMD0 0x1
+
+
+static int stm_ste_connection(enum stm_connection_type con_type)
+{
+ int retval = -EINVAL;
+ u32 gpiocr = readl(PRCM_GPIOCR);
+
+ if (con_type != STM_DISCONNECT) {
+ /* Always enable MIPI34 GPIO pins */
+ retval = nmk_config_pins(
+ ARRAY_AND_SIZE(mop500_stm_mipi34_pins));
+ if (retval) {
+ pr_err("STM: Failed to enable MIPI34\n");
+ return retval;
+ }
+ }
+
+ switch (con_type) {
+ case STM_DEFAULT_CONNECTION:
+ case STM_STE_MODEM_ON_MIPI34_NONE_ON_MIPI60:
+ /* Enable altC3 on GPIO70-74 (STMMOD) & GPIO75-76 (UARTMOD) */
+ gpiocr |= (PRCM_GPIOCR_DBG_STM_MOD_CMD1
+ | PRCM_GPIOCR_DBG_UARTMOD_CMD0);
+ writel(gpiocr, PRCM_GPIOCR);
+ retval = stm_ste_disable_ape_on_mipi60();
+ break;
+
+ case STM_STE_APE_ON_MIPI34_NONE_ON_MIPI60:
+ /* Disable altC3 on GPIO70-74 (STMMOD) & GPIO75-76 (UARTMOD) */
+ gpiocr &= ~(PRCM_GPIOCR_DBG_STM_MOD_CMD1
+ | PRCM_GPIOCR_DBG_UARTMOD_CMD0);
+ writel(gpiocr, PRCM_GPIOCR);
+ retval = stm_ste_disable_ape_on_mipi60();
+ break;
+
+ case STM_STE_MODEM_ON_MIPI34_APE_ON_MIPI60:
+ /* Enable altC3 on GPIO70-74 (STMMOD) and GPIO75-76 (UARTMOD) */
+ gpiocr |= (PRCM_GPIOCR_DBG_STM_MOD_CMD1
+ | PRCM_GPIOCR_DBG_UARTMOD_CMD0);
+ writel(gpiocr, PRCM_GPIOCR);
+
+ /* Enable APE on MIPI60 */
+ retval = nmk_config_pins_sleep(ARRAY_AND_SIZE(mop500_ske_pins));
+ if (retval)
+ pr_err("STM: Failed to disable SKE GPIO\n");
+ else {
+ retval = nmk_config_pins(
+ ARRAY_AND_SIZE(mop500_stm_mipi60_pins));
+ if (retval)
+ pr_err("STM: Failed to enable MIPI60\n");
+ }
+ break;
+
+ case STM_DISCONNECT:
+ retval = nmk_config_pins_sleep(
+ ARRAY_AND_SIZE(mop500_stm_mipi34_pins));
+ if (retval)
+ pr_err("STM: Failed to disable MIPI34\n");
+
+ retval = stm_ste_disable_ape_on_mipi60();
+ break;
+
+ default:
+ pr_err("STM: bad connection type\n");
+ break;
+ }
+ return retval;
+}
+
+/* Possible STM sources (masters) on ux500 */
+enum stm_master {
+ STM_ARM0 = 0,
+ STM_ARM1 = 1,
+ STM_SVA = 2,
+ STM_SIA = 3,
+ STM_SIA_XP70 = 4,
+ STM_PRCMU = 5,
+ STM_MCSBAG = 9
+};
+
+#define STM_ENABLE_ARM0 BIT(STM_ARM0)
+#define STM_ENABLE_ARM1 BIT(STM_ARM1)
+#define STM_ENABLE_SVA BIT(STM_SVA)
+#define STM_ENABLE_SIA BIT(STM_SIA)
+#define STM_ENABLE_SIA_XP70 BIT(STM_SIA_XP70)
+#define STM_ENABLE_PRCMU BIT(STM_PRCMU)
+#define STM_ENABLE_MCSBAG BIT(STM_MCSBAG)
+
+/*
+ * These are the channels used by NMF and some external softwares
+ * expect the NMF traces to be output on these channels
+ * For legacy reason, we need to reserve them.
+ */
+static const s16 stm_channels_reserved[] = {
+ 100, /* NMF MPCEE channel */
+ 101, /* NMF CM channel */
+ 151, /* NMF HOSTEE channel */
+};
+
+/* On Ux500 we 2 consecutive STMs therefore 512 channels available */
+static struct stm_platform_data stm_pdata = {
+ .regs_phys_base = U8500_STM_REG_BASE,
+ .channels_phys_base = U8500_STM_BASE,
+ .id_mask = 0x000fffff, /* Ignore revisions differences */
+ .channels_reserved = stm_channels_reserved,
+ .channels_reserved_sz = ARRAY_SIZE(stm_channels_reserved),
+ /* Enable all except MCSBAG */
+ .masters_enabled = STM_ENABLE_ARM0 | STM_ENABLE_ARM1 |
+ STM_ENABLE_SVA | STM_ENABLE_PRCMU |
+ STM_ENABLE_SIA | STM_ENABLE_SIA_XP70,
+ /* Provide function for MIPI34/MIPI60 STM connection */
+ .stm_connection = stm_ste_connection,
+};
+
+struct platform_device u8500_stm_device = {
+ .name = "stm",
+ .id = -1,
+ .dev = {
+ .platform_data = &stm_pdata,
+ },
+};
diff --git a/arch/arm/mach-ux500/board-mop500-stuib.c b/arch/arm/mach-ux500/board-mop500-stuib.c
index 8c979770d87..3a01b119511 100644
--- a/arch/arm/mach-ux500/board-mop500-stuib.c
+++ b/arch/arm/mach-ux500/board-mop500-stuib.c
@@ -11,33 +11,70 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
+#ifdef CONFIG_U8500_FLASH
+#include <../drivers/staging/camera_flash/adp1653_plat.h>
+#endif
#include <linux/input/matrix_keypad.h>
#include <asm/mach-types.h>
#include "board-mop500.h"
-/* STMPE/SKE keypad use this key layout */
+/*
+ * ux500 keymaps
+ *
+ * Organized row-wise as on the UIB, starting at the top-left
+ *
+ * we support two key layouts, specific to requirements. The first
+ * keylayout includes controls for power/volume a few generic keys;
+ * the second key layout contains the full numeric layout, enter/back/left
+ * buttons along with a "."(dot), specifically for connectivity testing
+ */
static const unsigned int mop500_keymap[] = {
+#if defined(CONFIG_KEYLAYOUT_LAYOUT1)
KEY(2, 5, KEY_END),
- KEY(4, 1, KEY_POWER),
+ KEY(4, 1, KEY_HOME),
KEY(3, 5, KEY_VOLUMEDOWN),
- KEY(1, 3, KEY_3),
+ KEY(1, 3, KEY_EMAIL),
KEY(5, 2, KEY_RIGHT),
- KEY(5, 0, KEY_9),
+ KEY(5, 0, KEY_BACKSPACE),
KEY(0, 5, KEY_MENU),
KEY(7, 6, KEY_ENTER),
KEY(4, 5, KEY_0),
- KEY(6, 7, KEY_2),
+ KEY(6, 7, KEY_DOT),
KEY(3, 4, KEY_UP),
KEY(3, 3, KEY_DOWN),
KEY(6, 4, KEY_SEND),
KEY(6, 2, KEY_BACK),
KEY(4, 2, KEY_VOLUMEUP),
- KEY(5, 5, KEY_1),
+ KEY(5, 5, KEY_SPACE),
KEY(4, 3, KEY_LEFT),
+ KEY(3, 2, KEY_SEARCH),
+#elif defined(CONFIG_KEYLAYOUT_LAYOUT2)
+ KEY(2, 5, KEY_RIGHT),
+ KEY(4, 1, KEY_ENTER),
+ KEY(3, 5, KEY_MENU),
+ KEY(1, 3, KEY_3),
+ KEY(5, 2, KEY_6),
+ KEY(5, 0, KEY_9),
+
+ KEY(0, 5, KEY_UP),
+ KEY(7, 6, KEY_DOWN),
+ KEY(4, 5, KEY_0),
+ KEY(6, 7, KEY_2),
+ KEY(3, 4, KEY_5),
+ KEY(3, 3, KEY_8),
+
+ KEY(6, 4, KEY_LEFT),
+ KEY(6, 2, KEY_BACK),
+ KEY(4, 2, KEY_KPDOT),
+ KEY(5, 5, KEY_1),
+ KEY(4, 3, KEY_4),
KEY(3, 2, KEY_7),
+#else
+#warning "No keypad layout defined."
+#endif
};
static const struct matrix_keymap_data mop500_keymap_data = {
@@ -73,6 +110,24 @@ static struct i2c_board_info __initdata mop500_i2c0_devices_stuib[] = {
},
};
+#ifdef CONFIG_U8500_FLASH
+/*
+ * Config data for the flash
+ */
+static struct adp1653_platform_data __initdata adp1653_pdata_u8500_uib = {
+ .irq_no = CAMERA_FLASH_INT_PIN
+};
+#endif
+
+static struct i2c_board_info __initdata mop500_i2c2_devices_stuib[] = {
+#ifdef CONFIG_U8500_FLASH
+ {
+ I2C_BOARD_INFO("adp1653", 0x30),
+ .platform_data = &adp1653_pdata_u8500_uib
+ }
+#endif
+};
+
/*
* BU21013 ROHM touchscreen interface on the STUIBs
*/
@@ -111,6 +166,7 @@ static int bu21013_gpio_board_init(int reset_pin)
__func__);
return retval;
}
+ gpio_set_value_cansleep(reset_pin, 1);
}
return retval;
@@ -133,7 +189,8 @@ static int bu21013_gpio_board_exit(int reset_pin)
__func__);
return retval;
}
- gpio_set_value(reset_pin, 0);
+ gpio_set_value_cansleep(reset_pin, 0);
+ gpio_free(reset_pin);
}
bu21013_devices--;
@@ -157,9 +214,19 @@ static struct bu21013_platform_device tsc_plat_device = {
.irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN),
.touch_x_max = TOUCH_XMAX,
.touch_y_max = TOUCH_YMAX,
- .ext_clk = false,
- .x_flip = false,
- .y_flip = true,
+ .x_max_res = 480,
+ .y_max_res = 864,
+ .portrait = true,
+ .has_ext_clk = true,
+ .enable_ext_clk = false,
+#if defined(CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE) && \
+ CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE == 270
+ .x_flip = true,
+ .y_flip = false,
+#else
+ .x_flip = false,
+ .y_flip = true,
+#endif
};
static struct bu21013_platform_device tsc_plat2_device = {
@@ -169,18 +236,28 @@ static struct bu21013_platform_device tsc_plat2_device = {
.irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN),
.touch_x_max = TOUCH_XMAX,
.touch_y_max = TOUCH_YMAX,
- .ext_clk = false,
- .x_flip = false,
- .y_flip = true,
+ .x_max_res = 480,
+ .y_max_res = 864,
+ .portrait = true,
+ .has_ext_clk = true,
+ .enable_ext_clk = false,
+#if defined(CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE) && \
+ CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE == 270
+ .x_flip = true,
+ .y_flip = false,
+#else
+ .x_flip = false,
+ .y_flip = true,
+#endif
};
static struct i2c_board_info __initdata u8500_i2c3_devices_stuib[] = {
{
- I2C_BOARD_INFO("bu21013_tp", 0x5C),
+ I2C_BOARD_INFO("bu21013_ts", 0x5C),
.platform_data = &tsc_plat_device,
},
{
- I2C_BOARD_INFO("bu21013_tp", 0x5D),
+ I2C_BOARD_INFO("bu21013_ts", 0x5D),
.platform_data = &tsc_plat2_device,
},
@@ -191,15 +268,25 @@ void __init mop500_stuib_init(void)
if (machine_is_hrefv60()) {
tsc_plat_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
tsc_plat2_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
+#ifdef CONFIG_U8500_FLASH
+ adp1653_pdata_u8500_uib.enable_gpio =
+ HREFV60_CAMERA_FLASH_ENABLE;
+#endif
} else {
tsc_plat_device.cs_pin = GPIO_BU21013_CS;
tsc_plat2_device.cs_pin = GPIO_BU21013_CS;
-
+#ifdef CONFIG_U8500_FLASH
+ adp1653_pdata_u8500_uib.enable_gpio =
+ GPIO_CAMERA_FLASH_ENABLE;
+#endif
}
mop500_uib_i2c_add(0, mop500_i2c0_devices_stuib,
ARRAY_SIZE(mop500_i2c0_devices_stuib));
+ mop500_uib_i2c_add(2, mop500_i2c2_devices_stuib,
+ ARRAY_SIZE(mop500_i2c2_devices_stuib));
+
mop500_uib_i2c_add(3, u8500_i2c3_devices_stuib,
ARRAY_SIZE(u8500_i2c3_devices_stuib));
}
diff --git a/arch/arm/mach-ux500/board-mop500-u8500uib.c b/arch/arm/mach-ux500/board-mop500-u8500uib.c
index feb5744d98b..20763004c4a 100644
--- a/arch/arm/mach-ux500/board-mop500-u8500uib.c
+++ b/arch/arm/mach-ux500/board-mop500-u8500uib.c
@@ -8,11 +8,22 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/i2c.h>
+#ifdef CONFIG_U8500_FLASH
+#include <../drivers/staging/camera_flash/adp1653_plat.h>
+#endif
#include <linux/gpio.h>
#include <linux/interrupt.h>
+#ifdef CONFIG_SENSORS_LSM303DLH
+#include <linux/lsm303dlh.h>
+#endif
+#ifdef CONFIG_SENSORS_L3G4200D
+#include <linux/l3g4200d.h>
+#endif
#include <linux/mfd/tc3589x.h>
#include <linux/input/matrix_keypad.h>
+#include <asm/mach-types.h>
+#include <linux/gpio.h>
#include <mach/irqs.h>
#include "board-mop500.h"
@@ -21,12 +32,75 @@
struct i2c_board_info __initdata __weak mop500_i2c3_devices_u8500[] = {
};
+#ifdef CONFIG_SENSORS_LSM303DLH
+/*
+ * LSM303DLH accelerometer + magnetometer & L3G4200D Gyroscope sensors
+ */
+static struct lsm303dlh_platform_data __initdata lsm303dlh_pdata_u8500 = {
+ .name_a = "lsm303dlh.0",
+ .name_m = "lsm303dlh.1",
+ .axis_map_x = 1,
+ .axis_map_y = 0,
+ .axis_map_z = 2,
+ .negative_x = 1,
+ .negative_y = 1,
+ .negative_z = 1,
+};
+#endif
+
+#ifdef CONFIG_SENSORS_L3G4200D
+static struct l3g4200d_gyr_platform_data __initdata l3g4200d_pdata_u8500 = {
+ .name_gyr = "l3g4200d",
+ .axis_map_x = 1,
+ .axis_map_y = 0,
+ .axis_map_z = 2,
+ .negative_x = 0,
+ .negative_y = 0,
+ .negative_z = 1,
+};
+#endif
+
+#ifdef CONFIG_U8500_FLASH
+static struct adp1653_platform_data __initdata adp1653_pdata_u8500_uib = {
+ .irq_no = CAMERA_FLASH_INT_PIN
+};
+#endif
+
+static struct i2c_board_info __initdata mop500_i2c2_devices_u8500[] = {
+#ifdef CONFIG_SENSORS_LSM303DLH
+ {
+ /* LSM303DLH Accelerometer */
+ I2C_BOARD_INFO("lsm303dlh_a", 0x18),
+ .platform_data = &lsm303dlh_pdata_u8500,
+ },
+ {
+ /* LSM303DLH Magnetometer */
+ I2C_BOARD_INFO("lsm303dlh_m", 0x1E),
+ .platform_data = &lsm303dlh_pdata_u8500,
+ },
+#endif
+#ifdef CONFIG_SENSORS_L3G4200D
+ {
+ /* L3G4200D Gyroscope */
+ I2C_BOARD_INFO("l3g4200d", 0x68),
+ .platform_data = &l3g4200d_pdata_u8500,
+ },
+#endif
+#ifdef CONFIG_U8500_FLASH
+ {
+ I2C_BOARD_INFO("adp1653", 0x30),
+ .platform_data = &adp1653_pdata_u8500_uib
+ }
+#endif
+};
+
+
/*
* TC35893
*/
static const unsigned int u8500_keymap[] = {
KEY(3, 1, KEY_END),
- KEY(4, 1, KEY_POWER),
+ KEY(4, 1, KEY_HOME),
KEY(6, 4, KEY_VOLUMEDOWN),
KEY(4, 2, KEY_EMAIL),
KEY(3, 3, KEY_RIGHT),
@@ -87,4 +161,28 @@ void __init mop500_u8500uib_init(void)
mop500_uib_i2c_add(0, mop500_i2c0_devices_u8500,
ARRAY_SIZE(mop500_i2c0_devices_u8500));
+ if (machine_is_hrefv60()) {
+#ifdef CONFIG_SENSORS_LSM303DLH
+ lsm303dlh_pdata_u8500.irq_a1 = HREFV60_ACCEL_INT1_GPIO;
+ lsm303dlh_pdata_u8500.irq_a2 = HREFV60_ACCEL_INT2_GPIO;
+ lsm303dlh_pdata_u8500.irq_m = HREFV60_MAGNET_DRDY_GPIO;
+#endif
+#ifdef CONFIG_U8500_FLASH
+ adp1653_pdata_u8500_uib.enable_gpio =
+ HREFV60_CAMERA_FLASH_ENABLE;
+#endif
+ } else {
+#ifdef CONFIG_SENSORS_LSM303DLH
+ lsm303dlh_pdata_u8500.irq_a1 = GPIO_ACCEL_INT1;
+ lsm303dlh_pdata_u8500.irq_a2 = GPIO_ACCEL_INT2;
+ lsm303dlh_pdata_u8500.irq_m = GPIO_MAGNET_DRDY;
+#endif
+#ifdef CONFIG_U8500_FLASH
+ adp1653_pdata_u8500_uib.enable_gpio =
+ GPIO_CAMERA_FLASH_ENABLE;
+#endif
+ }
+
+ mop500_uib_i2c_add(2, mop500_i2c2_devices_u8500,
+ ARRAY_SIZE(mop500_i2c2_devices_u8500));
}
diff --git a/arch/arm/mach-ux500/board-mop500-uib.c b/arch/arm/mach-ux500/board-mop500-uib.c
index 5af36aa56c0..8679b15643c 100644
--- a/arch/arm/mach-ux500/board-mop500-uib.c
+++ b/arch/arm/mach-ux500/board-mop500-uib.c
@@ -1,4 +1,5 @@
/*
+
* Copyright (C) ST-Ericsson SA 2010
*
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
@@ -10,13 +11,16 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/i2c.h>
-
#include <mach/hardware.h>
+#include <asm/mach-types.h>
+
#include "board-mop500.h"
enum mop500_uib {
STUIB,
U8500UIB,
+ U8500UIB_R3,
+ NO_UIB,
};
struct uib {
@@ -25,6 +29,8 @@ struct uib {
void (*init)(void);
};
+static u8 type_of_uib = NO_UIB;
+
static struct uib __initdata mop500_uibs[] = {
[STUIB] = {
.name = "ST-UIB",
@@ -36,9 +42,16 @@ static struct uib __initdata mop500_uibs[] = {
.option = "u8500uib",
.init = mop500_u8500uib_init,
},
+#ifdef CONFIG_TOUCHSCREEN_CYTTSP_SPI
+ [U8500UIB_R3] = {
+ .name = "U8500-UIBR3",
+ .option = "u8500uibr3",
+ .init = mop500_u8500uib_r3_init,
+ },
+#endif
};
-static struct uib *mop500_uib;
+static struct uib __initdata *mop500_uib;
static int __init mop500_uib_setup(char *str)
{
@@ -64,7 +77,7 @@ __setup("uib=", mop500_uib_setup);
* The UIBs are detected after the I2C host controllers are registered, so
* i2c_register_board_info() can't be used.
*/
-void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
+void mop500_uib_i2c_add(int busnum, struct i2c_board_info const *info,
unsigned n)
{
struct i2c_adapter *adap;
@@ -90,26 +103,46 @@ void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
static void __init __mop500_uib_init(struct uib *uib, const char *why)
{
pr_info("%s (%s)\n", uib->name, why);
+
+ if (strcmp("stuib", uib->option) == 0)
+ type_of_uib = STUIB;
+ else if (strcmp("u8500uib", uib->option) == 0)
+ type_of_uib = U8500UIB;
+ else if (strcmp("u8500uibr3", uib->option) == 0)
+ type_of_uib = U8500UIB_R3;
+
uib->init();
}
+int uib_is_stuib(void)
+{
+ return (type_of_uib == STUIB);
+}
+
+int uib_is_u8500uib(void)
+{
+ return (type_of_uib == U8500UIB);
+}
+
+int uib_is_u8500uibr3(void)
+{
+ return (type_of_uib == U8500UIB_R3);
+}
+
/*
* Detect the UIB attached based on the presence or absence of i2c devices.
*/
static int __init mop500_uib_init(void)
{
- struct uib *uib = mop500_uib;
+ struct uib *uib = mop500_uibs;
struct i2c_adapter *i2c0;
+ struct i2c_adapter *i2c3;
int ret;
- if (!cpu_is_u8500())
+ /* snowball and non u8500 cpus dont have uib */
+ if (!cpu_is_u8500() || machine_is_snowball())
return -ENODEV;
- if (uib) {
- __mop500_uib_init(uib, "from uib= boot argument");
- return 0;
- }
-
i2c0 = i2c_get_adapter(0);
if (!i2c0) {
__mop500_uib_init(&mop500_uibs[STUIB],
@@ -121,12 +154,28 @@ static int __init mop500_uib_init(void)
ret = i2c_smbus_xfer(i2c0, 0x44, 0, I2C_SMBUS_WRITE, 0,
I2C_SMBUS_QUICK, NULL);
i2c_put_adapter(i2c0);
-
- if (ret == 0)
- uib = &mop500_uibs[U8500UIB];
- else
- uib = &mop500_uibs[STUIB];
-
+ i2c3 = i2c_get_adapter(3);
+ if (ret == 0) {
+ if (!i2c3) {
+ __mop500_uib_init(&mop500_uibs[STUIB],
+ "fallback, could not get i2c3");
+ return -ENODEV;
+ }
+ ret = i2c_smbus_xfer(i2c3, 0x4B, 0, I2C_SMBUS_WRITE, 0,
+ I2C_SMBUS_QUICK, NULL);
+ i2c_put_adapter(i2c3);
+ if (ret == 0)
+ uib = &mop500_uibs[U8500UIB];
+ else
+ uib = &mop500_uibs[U8500UIB_R3];
+ }
+ else {
+ ret = i2c_smbus_xfer(i2c3, 0x5C, 0, I2C_SMBUS_WRITE, 0,
+ I2C_SMBUS_QUICK, NULL);
+ i2c_put_adapter(i2c3);
+ if (ret == 0)
+ uib = &mop500_uibs[STUIB];
+ }
__mop500_uib_init(uib, "detected");
return 0;
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index bdd7b80dd7a..ca1677e8b3e 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -15,40 +15,80 @@
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl022.h>
#include <linux/amba/serial.h>
#include <linux/spi/spi.h>
+#ifdef CONFIG_HSI
+#include <linux/hsi/hsi.h>
+#endif
#include <linux/mfd/ab8500.h>
#include <linux/regulator/ab8500.h>
#include <linux/mfd/tc3589x.h>
-#include <linux/mfd/tps6105x.h>
#include <linux/mfd/ab8500/gpio.h>
+#include <linux/regulator/fixed.h>
#include <linux/leds-lp5521.h>
#include <linux/input.h>
#include <linux/smsc911x.h>
#include <linux/gpio_keys.h>
#include <linux/delay.h>
-
+#include <linux/mfd/ab8500/denc.h>
+#ifdef CONFIG_STM_MSP_I2S
+#include <linux/spi/stm_msp.h>
+#endif
+#include <linux/leds_pwm.h>
+#include <linux/pwm_backlight.h>
+#include <linux/gpio/nomadik.h>
#include <linux/leds.h>
+
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <plat/i2c.h>
#include <plat/ste_dma40.h>
#include <plat/pincfg.h>
-#include <plat/gpio-nomadik.h>
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
+#include <mach/sensors1p.h>
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+#include <mach/abx500-accdet.h>
+#endif
#include <mach/irqs.h>
+#include <mach/ste-dma40-db8500.h>
+#ifdef CONFIG_U8500_SIM_DETECT
+#include <mach/sim_detect.h>
+#endif
+#ifdef CONFIG_CRYPTO_DEV_UX500
+#include <mach/crypto-ux500.h>
+#endif
+#ifdef CONFIG_AV8100
+#include <video/av8100.h>
+#endif
+
+#ifdef CONFIG_KEYBOARD_NOMADIK_SKE
+#include <plat/ske.h>
+#include "pins.h"
+#endif
#include "pins-db8500.h"
-#include "ste-dma40-db8500.h"
#include "devices-db8500.h"
#include "board-mop500.h"
#include "board-mop500-regulators.h"
+#include "board-mop500-bm.h"
+#include "board-ux500-usb.h"
+#if defined(CONFIG_CW1200) || defined(CONFIG_CW1200_MODULE)
+#include "board-mop500-wlan.h"
+#endif
+
+#ifdef CONFIG_AB8500_DENC
+static struct ab8500_denc_platform_data ab8500_denc_pdata = {
+ .ddr_enable = true,
+ .ddr_little_endian = false,
+};
+#endif
static struct gpio_led snowball_led_array[] = {
{
@@ -71,7 +111,7 @@ static struct platform_device snowball_led_dev = {
};
static struct ab8500_gpio_platform_data ab8500_gpio_pdata = {
- .gpio_base = MOP500_AB8500_GPIO(0),
+ .gpio_base = AB8500_PIN_GPIO1,
.irq_base = MOP500_AB8500_VIR_GPIO_IRQ_BASE,
/* config_reg is the initial configuration of ab8500 pins.
* The pins can be configured as GPIO or alt functions based
@@ -79,18 +119,43 @@ static struct ab8500_gpio_platform_data ab8500_gpio_pdata = {
* register. This is the array of 7 configuration settings.
* One has to compile time decide these settings. Below is the
* explanation of these setting
- * GpioSel1 = 0x00 => Pins GPIO1 to GPIO8 are not used as GPIO
- * GpioSel2 = 0x1E => Pins GPIO10 to GPIO13 are configured as GPIO
- * GpioSel3 = 0x80 => Pin GPIO24 is configured as GPIO
- * GpioSel4 = 0x01 => Pin GPIo25 is configured as GPIO
- * GpioSel5 = 0x7A => Pins GPIO34, GPIO36 to GPIO39 are conf as GPIO
- * GpioSel6 = 0x00 => Pins GPIO41 & GPIo42 are not configured as GPIO
+ * GpioSel1 = 0x0F => Pin GPIO1 (SysClkReq2)
+ * Pin GPIO2 (SysClkReq3)
+ * Pin GPIO3 (SysClkReq4)
+ * Pin GPIO4 (SysClkReq6) are configured as GPIO
+ * GpioSel2 = 0x9E => Pins GPIO10 to GPIO13 are configured as GPIO
+ * GpioSel3 = 0x80 => Pin GPIO24 (SysClkReq7) is configured as GPIO
+ * GpioSel4 = 0x01 => Pin GPIO25 (SysClkReq8) is configured as GPIO
+ * GpioSel5 = 0x78 => Pin GPIO36 (ApeSpiClk)
+ Pin GPIO37 (ApeSpiCSn)
+ Pin GPIO38 (ApeSpiDout)
+ Pin GPIO39 (ApeSpiDin) are configured as GPIO
+ * GpioSel6 = 0x02 => Pin GPIO42 (SysClkReq5) is configured as GPIO
* AlternaFunction = 0x00 => If Pins GPIO10 to 13 are not configured
- * as GPIO then this register selectes the alternate fucntions
+ * as GPIO then this register selectes the alternate functions
+ */
+ .config_reg = {0x0F, 0x9E, 0x80, 0x01, 0x78, 0x02, 0x00},
+
+ /* config_direction allows for the initial GPIO direction to
+ * be set. For Snowball we set GPIO26 to output.
*/
- .config_reg = {0x00, 0x1E, 0x80, 0x01,
- 0x7A, 0x00, 0x00},
+ .config_direction = {0x00, 0x00, 0x00, 0x02, 0x00, 0x00},
+
+ /*
+ * config_pullups allows for the intial configuration of the
+ * GPIO pullup/pulldown configuration.
+ */
+ .config_pullups = {0xE0, 0x01, 0x00, 0x00, 0x00, 0x00},
+};
+
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+static struct abx500_accdet_platform_data ab8500_accdet_pdata = {
+ .btn_keycode = KEY_MEDIA,
+ .accdet1_dbth = ACCDET1_TH_1200mV | ACCDET1_DB_70ms,
+ .accdet2122_th = ACCDET21_TH_1000mV | ACCDET22_TH_1000mV,
+ .video_ctrl_gpio = AB8500_PIN_GPIO35,
};
+#endif
static struct gpio_keys_button snowball_key_array[] = {
{
@@ -183,13 +248,36 @@ static struct platform_device snowball_sbnet_dev = {
},
};
+#ifdef CONFIG_MODEM_U8500
+static struct platform_device u8500_modem_dev = {
+ .name = "u8500-modem",
+ .id = 0,
+ .dev = {
+ .platform_data = NULL,
+ },
+};
+#endif
+
static struct ab8500_platform_data ab8500_platdata = {
.irq_base = MOP500_AB8500_IRQ_BASE,
- .regulator_reg_init = ab8500_regulator_reg_init,
- .num_regulator_reg_init = ARRAY_SIZE(ab8500_regulator_reg_init),
- .regulator = ab8500_regulators,
- .num_regulator = ARRAY_SIZE(ab8500_regulators),
+ .regulator = &ab8500_regulator_plat_data,
+#ifdef CONFIG_AB8500_DENC
+ .denc = &ab8500_denc_pdata,
+#endif
+ .battery = &ab8500_bm_data,
+ .charger = &ab8500_charger_plat_data,
+ .btemp = &ab8500_btemp_plat_data,
+ .fg = &ab8500_fg_plat_data,
+ .chargalg = &ab8500_chargalg_plat_data,
.gpio = &ab8500_gpio_pdata,
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+ .accdet = &ab8500_accdet_pdata,
+#endif
+#ifdef CONFIG_PM
+ .pm_power_off = true,
+#endif
+ .thermal_time_out = 20, /* seconds */
+ .usb = &abx500_usbgpio_plat_data,
};
static struct resource ab8500_resources[] = {
@@ -210,16 +298,200 @@ struct platform_device ab8500_device = {
.resource = ab8500_resources,
};
+
+
+#ifdef CONFIG_KEYBOARD_NOMADIK_SKE
+
+/*
+ * Nomadik SKE keypad
+ */
+#define ROW_PIN_I0 164
+#define ROW_PIN_I1 163
+#define ROW_PIN_I2 162
+#define ROW_PIN_I3 161
+#define ROW_PIN_I4 156
+#define ROW_PIN_I5 155
+#define ROW_PIN_I6 154
+#define ROW_PIN_I7 153
+#define COL_PIN_O0 168
+#define COL_PIN_O1 167
+#define COL_PIN_O2 166
+#define COL_PIN_O3 165
+#define COL_PIN_O4 160
+#define COL_PIN_O5 159
+#define COL_PIN_O6 158
+#define COL_PIN_O7 157
+
+static int ske_kp_rows[] = {
+ ROW_PIN_I0, ROW_PIN_I1, ROW_PIN_I2, ROW_PIN_I3,
+ ROW_PIN_I4, ROW_PIN_I5, ROW_PIN_I6, ROW_PIN_I7,
+};
+static int ske_kp_cols[] = {
+ COL_PIN_O0, COL_PIN_O1, COL_PIN_O2, COL_PIN_O3,
+ COL_PIN_O4, COL_PIN_O5, COL_PIN_O6, COL_PIN_O7,
+};
+
+static bool ske_config;
/*
- * TPS61052
+ * ske_set_gpio_row: request and set gpio rows
*/
+static int ske_set_gpio_row(int gpio)
+{
+ int ret;
-static struct tps6105x_platform_data mop500_tps61052_data = {
- .mode = TPS6105X_MODE_VOLTAGE,
- .regulator_data = &tps61052_regulator,
+ if (!ske_config) {
+ ret = gpio_request(gpio, "ske-kp");
+ if (ret < 0) {
+ pr_err("ske_set_gpio_row: gpio request failed\n");
+ return ret;
+ }
+ }
+
+ ret = gpio_direction_output(gpio, 1);
+ if (ret < 0) {
+ pr_err("ske_set_gpio_row: gpio direction failed\n");
+ gpio_free(gpio);
+ }
+
+ return ret;
+}
+
+/*
+ * ske_kp_init - enable the gpio configuration
+ */
+static int ske_kp_init(void)
+{
+ struct ux500_pins *pins;
+ int ret, i;
+
+ pins = ux500_pins_get("ske");
+ if (pins)
+ ux500_pins_enable(pins);
+
+ for (i = 0; i < SKE_KPD_MAX_ROWS; i++) {
+ ret = ske_set_gpio_row(ske_kp_rows[i]);
+ if (ret < 0) {
+ pr_err("ske_kp_init: failed init\n");
+ return ret;
+ }
+ }
+ if (!ske_config)
+ ske_config = true;
+
+ return 0;
+}
+
+static int ske_kp_exit(void)
+{
+ struct ux500_pins *pins;
+
+ pins = ux500_pins_get("ske");
+ if (pins)
+ ux500_pins_disable(pins);
+
+ return 0;
+}
+
+
+static const unsigned int mop500_ske_keymap[] = {
+#if defined(CONFIG_KEYLAYOUT_LAYOUT1)
+ KEY(2, 5, KEY_END),
+ KEY(4, 1, KEY_HOME),
+ KEY(3, 5, KEY_VOLUMEDOWN),
+ KEY(1, 3, KEY_EMAIL),
+ KEY(5, 2, KEY_RIGHT),
+ KEY(5, 0, KEY_BACKSPACE),
+
+ KEY(0, 5, KEY_MENU),
+ KEY(7, 6, KEY_ENTER),
+ KEY(4, 5, KEY_0),
+ KEY(6, 7, KEY_DOT),
+ KEY(3, 4, KEY_UP),
+ KEY(3, 3, KEY_DOWN),
+
+ KEY(6, 4, KEY_SEND),
+ KEY(6, 2, KEY_BACK),
+ KEY(4, 2, KEY_VOLUMEUP),
+ KEY(5, 5, KEY_SPACE),
+ KEY(4, 3, KEY_LEFT),
+ KEY(3, 2, KEY_SEARCH),
+#elif defined(CONFIG_KEYLAYOUT_LAYOUT2)
+ KEY(2, 5, KEY_RIGHT),
+ KEY(4, 1, KEY_ENTER),
+ KEY(3, 5, KEY_MENU),
+ KEY(1, 3, KEY_3),
+ KEY(5, 2, KEY_6),
+ KEY(5, 0, KEY_9),
+
+ KEY(0, 5, KEY_UP),
+ KEY(7, 6, KEY_DOWN),
+ KEY(4, 5, KEY_0),
+ KEY(6, 7, KEY_2),
+ KEY(3, 4, KEY_5),
+ KEY(3, 3, KEY_8),
+
+ KEY(6, 4, KEY_LEFT),
+ KEY(6, 2, KEY_BACK),
+ KEY(4, 2, KEY_KPDOT),
+ KEY(5, 5, KEY_1),
+ KEY(4, 3, KEY_4),
+ KEY(3, 2, KEY_7),
+#else
+#warning "No keypad layout defined."
+#endif
+};
+
+static struct matrix_keymap_data mop500_ske_keymap_data = {
+ .keymap = mop500_ske_keymap,
+ .keymap_size = ARRAY_SIZE(mop500_ske_keymap),
+};
+
+
+
+static struct ske_keypad_platform_data mop500_ske_keypad_data = {
+ .init = ske_kp_init,
+ .exit = ske_kp_exit,
+ .gpio_input_pins = ske_kp_rows,
+ .gpio_output_pins = ske_kp_cols,
+ .keymap_data = &mop500_ske_keymap_data,
+ .no_autorepeat = true,
+ .krow = SKE_KPD_MAX_ROWS, /* 8x8 matrix */
+ .kcol = SKE_KPD_MAX_COLS,
+ .debounce_ms = 20, /* in timeout period */
+ .switch_delay = 200, /* in jiffies */
+};
+
+#endif
+
+
+#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE
+/*
+ * GPIO-regulator wlan vbat data
+ */
+static struct fixed_voltage_config snowball_gpio_wlan_vbat_data = {
+ .supply_name = "WLAN-VBAT",
+ .gpio = SNOWBALL_EN_3V6_GPIO,
+ .microvolts = 3600000,
+ .enable_high = 1,
+ .init_data = &gpio_wlan_vbat_regulator,
+ .startup_delay = 3500, /* Startup time */
};
/*
+ * GPIO-regulator en 3v3 vbat data
+ */
+
+static struct fixed_voltage_config snowball_gpio_en_3v3_data = {
+ .supply_name = "EN-3V3",
+ .gpio = SNOWBALL_EN_3V3_ETH_GPIO,
+ .microvolts = 3300000,
+ .enable_high = 1,
+ .init_data = &gpio_en_3v3_regulator,
+ .startup_delay = 5000, /* 1200us according to data sheet */
+};
+#endif
+
+/*
* TC35892
*/
@@ -240,53 +512,64 @@ static struct tc3589x_platform_data mop500_tc35892_data = {
};
static struct lp5521_led_config lp5521_pri_led[] = {
- [0] = {
- .chan_nr = 0,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
- [1] = {
- .chan_nr = 1,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
- [2] = {
- .chan_nr = 2,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
+ [0] = {
+ .chan_nr = 0,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+ [1] = {
+ .chan_nr = 1,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+ [2] = {
+ .chan_nr = 2,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
};
+#ifdef CONFIG_AV8100
+static struct av8100_platform_data av8100_plat_data = {
+ .irq = NOMADIK_GPIO_TO_IRQ(192),
+ .reset = MOP500_HDMI_RST_GPIO,
+ .inputclk_id = "sysclk2",
+ .regulator_pwr_id = "hdmi_1v8",
+ .alt_powerupseq = true,
+ .mclk_freq = 3, /* MCLK_RNG_31_38 */
+};
+#endif
+
static struct lp5521_platform_data __initdata lp5521_pri_data = {
- .label = "lp5521_pri",
- .led_config = &lp5521_pri_led[0],
- .num_channels = 3,
- .clock_mode = LP5521_CLOCK_EXT,
+ .label = "lp5521_pri",
+ .led_config = &lp5521_pri_led[0],
+ .num_channels = 3,
+ .clock_mode = LP5521_CLOCK_EXT,
};
static struct lp5521_led_config lp5521_sec_led[] = {
- [0] = {
- .chan_nr = 0,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
- [1] = {
- .chan_nr = 1,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
- [2] = {
- .chan_nr = 2,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
+ [0] = {
+ .chan_nr = 0,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+ [1] = {
+ .chan_nr = 1,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+ [2] = {
+ .chan_nr = 2,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
};
static struct lp5521_platform_data __initdata lp5521_sec_data = {
- .label = "lp5521_sec",
- .led_config = &lp5521_sec_led[0],
- .num_channels = 3,
- .clock_mode = LP5521_CLOCK_EXT,
+ .label = "lp5521_sec",
+ .led_config = &lp5521_sec_led[0],
+ .num_channels = 3,
+ .clock_mode = LP5521_CLOCK_EXT,
};
static struct i2c_board_info __initdata mop500_i2c0_devices[] = {
@@ -295,15 +578,26 @@ static struct i2c_board_info __initdata mop500_i2c0_devices[] = {
.irq = NOMADIK_GPIO_TO_IRQ(217),
.platform_data = &mop500_tc35892_data,
},
- /* I2C0 devices only available prior to HREFv60 */
+#ifdef CONFIG_AV8100
{
- I2C_BOARD_INFO("tps61052", 0x33),
- .platform_data = &mop500_tps61052_data,
+ I2C_BOARD_INFO("av8100", 0x70),
+ .platform_data = &av8100_plat_data,
},
+#endif
+ /* I2C0 devices only available prior to HREFv60 */
};
#define NUM_PRE_V60_I2C0_DEVICES 1
+static struct i2c_board_info __initdata snowball_i2c0_devices[] = {
+#ifdef CONFIG_AV8100
+ {
+ I2C_BOARD_INFO("av8100", 0x70),
+ .platform_data = &av8100_plat_data,
+ },
+#endif
+};
+
static struct i2c_board_info __initdata mop500_i2c2_devices[] = {
{
/* lp5521 LED driver, 1st device */
@@ -344,13 +638,13 @@ static struct nmk_i2c_controller u8500_i2c##id##_data = { \
/*
* The board uses 4 i2c controllers, initialize all of
* them with slave data setup time of 250 ns,
- * Tx & Rx FIFO threshold values as 8 and standard
+ * Tx & Rx FIFO threshold values as 1 and standard
* mode of operation
*/
-U8500_I2C_CONTROLLER(0, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
-U8500_I2C_CONTROLLER(1, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
-U8500_I2C_CONTROLLER(2, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
-U8500_I2C_CONTROLLER(3, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
+U8500_I2C_CONTROLLER(0, 0xe, 1, 8, 400000, 200, I2C_FREQ_MODE_FAST);
+U8500_I2C_CONTROLLER(1, 0xe, 1, 8, 400000, 200, I2C_FREQ_MODE_FAST);
+U8500_I2C_CONTROLLER(2, 0xe, 1, 8, 400000, 200, I2C_FREQ_MODE_FAST);
+U8500_I2C_CONTROLLER(3, 0xe, 1, 8, 400000, 200, I2C_FREQ_MODE_FAST);
static void __init mop500_i2c_init(void)
{
@@ -367,18 +661,25 @@ static struct gpio_keys_button mop500_gpio_keys[] = {
.code = SW_FRONT_PROXIMITY,
.active_low = 0,
.can_disable = 1,
+ },
+ {
+ .desc = "HED54XXU11 Hall Effect Sensor",
+ .type = EV_SW,
+ .code = SW_LID, /* FIXME arbitrary usage */
+ .active_low = 0,
+ .can_disable = 1,
}
};
-static struct regulator *prox_regulator;
-static int mop500_prox_activate(struct device *dev);
-static void mop500_prox_deactivate(struct device *dev);
+static struct regulator *sensors1p_regulator;
+static int mop500_sensors1p_activate(struct device *dev);
+static void mop500_sensors1p_deactivate(struct device *dev);
static struct gpio_keys_platform_data mop500_gpio_keys_data = {
.buttons = mop500_gpio_keys,
.nbuttons = ARRAY_SIZE(mop500_gpio_keys),
- .enable = mop500_prox_activate,
- .disable = mop500_prox_deactivate,
+ .enable = mop500_sensors1p_activate,
+ .disable = mop500_sensors1p_deactivate,
};
static struct platform_device mop500_gpio_keys_device = {
@@ -389,31 +690,312 @@ static struct platform_device mop500_gpio_keys_device = {
},
};
-static int mop500_prox_activate(struct device *dev)
+#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE
+static struct platform_device snowball_gpio_wlan_vbat_regulator_device = {
+ .name = "reg-fixed-voltage",
+ .id = 0,
+ .dev = {
+ .platform_data = &snowball_gpio_wlan_vbat_data,
+ },
+};
+
+static struct platform_device snowball_gpio_en_3v3_regulator_device = {
+ .name = "reg-fixed-voltage",
+ .id = 1,
+ .dev = {
+ .platform_data = &snowball_gpio_en_3v3_data,
+ },
+};
+#endif
+
+static int mop500_sensors1p_activate(struct device *dev)
{
- prox_regulator = regulator_get(&mop500_gpio_keys_device.dev,
+ sensors1p_regulator = regulator_get(&mop500_gpio_keys_device.dev,
"vcc");
- if (IS_ERR(prox_regulator)) {
- dev_err(&mop500_gpio_keys_device.dev,
- "no regulator\n");
- return PTR_ERR(prox_regulator);
+ if (IS_ERR(sensors1p_regulator)) {
+ dev_err(&mop500_gpio_keys_device.dev, "no regulator\n");
+ return PTR_ERR(sensors1p_regulator);
}
- regulator_enable(prox_regulator);
+ regulator_enable(sensors1p_regulator);
return 0;
}
-static void mop500_prox_deactivate(struct device *dev)
+static void mop500_sensors1p_deactivate(struct device *dev)
{
- regulator_disable(prox_regulator);
- regulator_put(prox_regulator);
+ regulator_disable(sensors1p_regulator);
+ regulator_put(sensors1p_regulator);
}
+#ifdef CONFIG_LEDS_PWM
+static struct led_pwm pwm_leds_data[] = {
+ [0] = {
+ .name = "lcd-backlight",
+ .pwm_id = 1,
+ .max_brightness = 255,
+ .lth_brightness = 90,
+ .pwm_period_ns = 1023,
+ },
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_SECONDARY
+ [1] = {
+ .name = "sec-lcd-backlight",
+ .pwm_id = 2,
+ .max_brightness = 255,
+ .lth_brightness = 90,
+ .pwm_period_ns = 1023,
+ },
+#endif
+};
+
+static struct led_pwm_platform_data u8500_leds_data = {
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_SECONDARY
+ .num_leds = 2,
+#else
+ .num_leds = 1,
+#endif
+ .leds = pwm_leds_data,
+};
+
+static struct platform_device ux500_leds_device = {
+ .name = "leds_pwm",
+ .dev = {
+ .platform_data = &u8500_leds_data,
+ },
+};
+#endif
+
+#ifdef CONFIG_BACKLIGHT_PWM
+static struct platform_pwm_backlight_data u8500_backlight_data[] = {
+ [0] = {
+ .pwm_id = 1,
+ .max_brightness = 255,
+ .dft_brightness = 200,
+ .lth_brightness = 90,
+ .pwm_period_ns = 1023,
+ },
+ [1] = {
+ .pwm_id = 2,
+ .max_brightness = 255,
+ .dft_brightness = 200,
+ .lth_brightness = 90,
+ .pwm_period_ns = 1023,
+ },
+};
+
+static struct platform_device ux500_backlight_device[] = {
+ [0] = {
+ .name = "pwm-backlight",
+ .id = 0,
+ .dev = {
+ .platform_data = &u8500_backlight_data[0],
+ },
+ },
+ [1] = {
+ .name = "pwm-backlight",
+ .id = 1,
+ .dev = {
+ .platform_data = &u8500_backlight_data[1],
+ },
+ },
+};
+#endif
+
+/* Force feedback vibrator device */
+static struct platform_device ste_ff_vibra_device = {
+ .name = "ste_ff_vibra"
+};
+
+#ifdef CONFIG_HSI
+static struct hsi_board_info __initdata u8500_hsi_devices[] = {
+ {
+ .name = "hsi_char",
+ .hsi_id = 0,
+ .port = 0,
+ .tx_cfg = {
+ .mode = HSI_MODE_FRAME,
+ .channels = 1,
+ .speed = 200000,
+ {.arb_mode = HSI_ARB_RR},
+ },
+ .rx_cfg = {
+ .mode = HSI_MODE_FRAME,
+ .channels = 1,
+ .speed = 200000,
+ {.flow = HSI_FLOW_SYNC},
+ },
+ },
+ {
+ .name = "hsi_test",
+ .hsi_id = 0,
+ .port = 0,
+ .tx_cfg = {
+ .mode = HSI_MODE_FRAME,
+ .channels = 2,
+ .speed = 100000,
+ {.arb_mode = HSI_ARB_RR},
+ },
+ .rx_cfg = {
+ .mode = HSI_MODE_FRAME,
+ .channels = 2,
+ .speed = 200000,
+ {.flow = HSI_FLOW_SYNC},
+ },
+ },
+ {
+ .name = "cfhsi_v3_driver",
+ .hsi_id = 0,
+ .port = 0,
+ .tx_cfg = {
+ .mode = HSI_MODE_STREAM,
+ .channels = 2,
+ .speed = 20000,
+ {.arb_mode = HSI_ARB_RR},
+ },
+ .rx_cfg = {
+ .mode = HSI_MODE_STREAM,
+ .channels = 2,
+ .speed = 200000,
+ {.flow = HSI_FLOW_SYNC},
+ },
+ },
+};
+#endif
+
+#ifdef CONFIG_U8500_SIM_DETECT
+static struct sim_detect_platform_data sim_detect_pdata = {
+ .irq_num = MOP500_AB8500_VIR_GPIO_IRQ(6),
+};
+struct platform_device u8500_sim_detect_device = {
+ .name = "sim-detect",
+ .id = 0,
+ .dev = {
+ .platform_data = &sim_detect_pdata,
+ },
+};
+#endif
+
+#ifdef CONFIG_SENSORS1P_MOP
+static struct sensors1p_config sensors1p_config = {
+ /* SFH7741 */
+ .proximity = {
+ .pin = EGPIO_PIN_7,
+ .startup_time = 120, /* ms */
+ .regulator = "v-proximity",
+ },
+ /* HED54XXU11 */
+ .hal = {
+ .pin = EGPIO_PIN_8,
+ .startup_time = 100, /* Actually, I have no clue. */
+ .regulator = "v-hal",
+ },
+};
+
+struct platform_device sensors1p_device = {
+ .name = "sensors1p",
+ .dev = {
+ .platform_data = (void *)&sensors1p_config,
+ },
+};
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_UX500
+static struct cryp_platform_data u8500_cryp1_platform_data = {
+ .mem_to_engine = {
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV48_CAC1_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .mode = STEDMA40_MODE_LOGICAL,
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
+ },
+ .engine_to_mem = {
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB8500_DMA_DEV48_CAC1_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .mode = STEDMA40_MODE_LOGICAL,
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
+ }
+};
+
+static struct hash_platform_data u8500_hash1_platform_data = {
+ .mem_to_engine = {
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV50_HAC1_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .mode = STEDMA40_MODE_LOGICAL,
+ .src_info.psize = STEDMA40_PSIZE_LOG_16,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_16,
+ },
+};
+#endif
+
/* add any platform devices here - TODO */
static struct platform_device *mop500_platform_devs[] __initdata = {
+#ifdef CONFIG_SENSORS1P_MOP
+ &sensors1p_device,
+#endif
+#ifdef CONFIG_U8500_SIM_DETECT
+ &u8500_sim_detect_device,
+#endif
+ &u8500_shrm_device,
+ &ste_ff_vibra_device,
+#ifdef CONFIG_U8500_MMIO
+ &ux500_mmio_device,
+#endif
+ &ux500_hwmem_device,
+#ifdef CONFIG_FB_MCDE
+ &u8500_mcde_device,
+#endif
+ &u8500_b2r2_device,
+ &u8500_thsens_device,
+#ifdef CONFIG_STE_TRACE_MODEM
+ &u8500_trace_modem,
+#endif
&mop500_gpio_keys_device,
- &ab8500_device,
+#ifdef CONFIG_LEDS_PWM
+ &ux500_leds_device,
+#endif
+#ifdef CONFIG_BACKLIGHT_PWM
+ &ux500_backlight_device[0],
+ &ux500_backlight_device[1],
+#endif
+#ifdef CONFIG_DB8500_MLOADER
+ &mloader_fw_device,
+#endif
+#ifdef CONFIG_HSI
+ &u8500_hsi_device,
+#endif
+#ifdef CONFIG_MODEM_U8500
+ &u8500_modem_dev,
+#endif
+};
+
+#ifdef CONFIG_STM_MSP_I2S
+/*
+ * MSP-SPI
+ */
+
+#define NUM_MSP_CLIENTS 10
+
+static struct stm_msp_controller mop500_msp2_spi_data = {
+ .id = 2,
+ .num_chipselect = NUM_MSP_CLIENTS,
+ .base_addr = U8500_MSP2_BASE,
+ .device_name = "msp2",
};
+/*
+ * SSP
+ */
+
+#define NUM_SSP_CLIENTS 10
+
#ifdef CONFIG_STE_DMA40
static struct stedma40_chan_cfg ssp0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
@@ -435,27 +1017,33 @@ static struct stedma40_chan_cfg ssp0_dma_cfg_tx = {
#endif
static struct pl022_ssp_controller ssp0_platform_data = {
- .bus_id = 0,
+ .bus_id = 4,
#ifdef CONFIG_STE_DMA40
.enable_dma = 1,
.dma_filter = stedma40_filter,
.dma_rx_param = &ssp0_dma_cfg_rx,
.dma_tx_param = &ssp0_dma_cfg_tx,
-#else
- .enable_dma = 0,
#endif
/* on this platform, gpio 31,142,144,214 &
* 224 are connected as chip selects
*/
- .num_chipselect = 5,
+ .num_chipselect = NUM_SSP_CLIENTS,
};
+
static void __init mop500_spi_init(void)
{
db8500_add_ssp0(&ssp0_platform_data);
+ if (!machine_is_snowball())
+ db8500_add_msp2_spi(&mop500_msp2_spi_data);
+}
+#else
+static void __init mop500_spi_init(void)
+{
}
+#endif /* CONFIG_STM_MSP_I2S */
-#ifdef CONFIG_STE_DMA40
+#ifdef CONFIG_STE_DMA40_REMOVE
static struct stedma40_chan_cfg uart0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
@@ -521,6 +1109,7 @@ static pin_cfg_t mop500_pins_uart0[] = {
#define PRCC_K_SOFTRST_SET 0x18
#define PRCC_K_SOFTRST_CLEAR 0x1C
+/* pl011 reset */
static void ux500_uart0_reset(void)
{
void __iomem *prcc_rst_set, *prcc_rst_clr;
@@ -559,8 +1148,10 @@ static void ux500_uart0_exit(void)
pr_err("pl011: uart pins_disable failed\n");
}
+
+
static struct amba_pl011_data uart0_plat = {
-#ifdef CONFIG_STE_DMA40
+#ifdef CONFIG_STE_DMA40_REMOVE
.dma_filter = stedma40_filter,
.dma_rx_param = &uart0_dma_cfg_rx,
.dma_tx_param = &uart0_dma_cfg_tx,
@@ -571,7 +1162,7 @@ static struct amba_pl011_data uart0_plat = {
};
static struct amba_pl011_data uart1_plat = {
-#ifdef CONFIG_STE_DMA40
+#ifdef CONFIG_STE_DMA40_REMOVE
.dma_filter = stedma40_filter,
.dma_rx_param = &uart1_dma_cfg_rx,
.dma_tx_param = &uart1_dma_cfg_tx,
@@ -579,7 +1170,7 @@ static struct amba_pl011_data uart1_plat = {
};
static struct amba_pl011_data uart2_plat = {
-#ifdef CONFIG_STE_DMA40
+#ifdef CONFIG_STE_DMA40_REMOVE
.dma_filter = stedma40_filter,
.dma_rx_param = &uart2_dma_cfg_rx,
.dma_tx_param = &uart2_dma_cfg_tx,
@@ -593,23 +1184,45 @@ static void __init mop500_uart_init(void)
db8500_add_uart2(&uart2_plat);
}
+static void __init u8500_cryp1_hash1_init(void)
+{
+#ifdef CONFIG_CRYPTO_DEV_UX500
+ db8500_add_cryp1(&u8500_cryp1_platform_data);
+ db8500_add_hash1(&u8500_hash1_platform_data);
+#endif
+}
+
static struct platform_device *snowball_platform_devs[] __initdata = {
+ &ux500_hwmem_device,
&snowball_led_dev,
&snowball_key_dev,
+#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE
+ &snowball_gpio_en_3v3_regulator_device,
+ &snowball_gpio_wlan_vbat_regulator_device,
+#endif
&snowball_sbnet_dev,
- &ab8500_device,
+#ifdef CONFIG_FB_MCDE
+ &u8500_mcde_device,
+#endif
+ &u8500_b2r2_device,
};
static void __init mop500_init_machine(void)
{
- int i2c0_devs;
-
mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR;
+ mop500_gpio_keys[1].gpio = GPIO_HAL_SENSOR;
u8500_init_devices();
mop500_pins_init();
+ u8500_cryp1_hash1_init();
+
+#ifdef CONFIG_HSI
+ hsi_register_board_info(u8500_hsi_devices,
+ ARRAY_SIZE(u8500_hsi_devices));
+#endif
+
platform_add_devices(mop500_platform_devs,
ARRAY_SIZE(mop500_platform_devs));
@@ -617,12 +1230,23 @@ static void __init mop500_init_machine(void)
mop500_sdi_init();
mop500_spi_init();
mop500_uart_init();
+#ifdef CONFIG_STM_MSP_I2S
+ mop500_msp_init();
+#endif
+#if defined(CONFIG_CW1200) || defined(CONFIG_CW1200_MODULE)
+ mop500_wlan_init();
+#endif
+
+#ifdef CONFIG_KEYBOARD_NOMADIK_SKE
+ db8500_add_ske_keypad(&mop500_ske_keypad_data);
+#endif
- i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
+ platform_device_register(&ab8500_device);
- i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
+ i2c_register_board_info(0, mop500_i2c0_devices,
+ ARRAY_SIZE(mop500_i2c0_devices));
i2c_register_board_info(2, mop500_i2c2_devices,
- ARRAY_SIZE(mop500_i2c2_devices));
+ ARRAY_SIZE(mop500_i2c2_devices));
/* This board has full regulator constraints */
regulator_has_full_constraints();
@@ -630,12 +1254,17 @@ static void __init mop500_init_machine(void)
static void __init snowball_init_machine(void)
{
- int i2c0_devs;
-
u8500_init_devices();
snowball_pins_init();
+ u8500_cryp1_hash1_init();
+
+#ifdef CONFIG_HSI
+ hsi_register_board_info(u8500_hsi_devices,
+ ARRAY_SIZE(u8500_hsi_devices));
+#endif
+
platform_add_devices(snowball_platform_devs,
ARRAY_SIZE(snowball_platform_devs));
@@ -643,11 +1272,17 @@ static void __init snowball_init_machine(void)
snowball_sdi_init();
mop500_spi_init();
mop500_uart_init();
+#ifdef CONFIG_STM_MSP_I2S
+ mop500_msp_init();
+#endif
+#if defined(CONFIG_CW1200) || defined(CONFIG_CW1200_MODULE)
+ mop500_wlan_init();
+#endif
- i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
- i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
- i2c_register_board_info(2, mop500_i2c2_devices,
- ARRAY_SIZE(mop500_i2c2_devices));
+ platform_device_register(&ab8500_device);
+
+ i2c_register_board_info(0, snowball_i2c0_devices,
+ ARRAY_SIZE(snowball_i2c0_devices));
/* This board has full regulator constraints */
regulator_has_full_constraints();
@@ -655,19 +1290,33 @@ static void __init snowball_init_machine(void)
static void __init hrefv60_init_machine(void)
{
- int i2c0_devs;
-
/*
* The HREFv60 board removed a GPIO expander and routed
* all these GPIO pins to the internal GPIO controller
* instead.
*/
mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO;
+ mop500_gpio_keys[1].gpio = HREFV60_HAL_SW_GPIO;
+
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+ /*
+ * On boards hrefpv60 and later, the accessory insertion/removal,
+ * button press/release are inverted.
+ */
+ ab8500_accdet_pdata.is_detection_inverted = true;
+#endif
u8500_init_devices();
hrefv60_pins_init();
+ u8500_cryp1_hash1_init();
+
+#ifdef CONFIG_HSI
+ hsi_register_board_info(u8500_hsi_devices,
+ ARRAY_SIZE(u8500_hsi_devices));
+#endif
+
platform_add_devices(mop500_platform_devs,
ARRAY_SIZE(mop500_platform_devs));
@@ -675,14 +1324,23 @@ static void __init hrefv60_init_machine(void)
mop500_sdi_init();
mop500_spi_init();
mop500_uart_init();
+#ifdef CONFIG_STM_MSP_I2S
+ mop500_msp_init();
+#endif
+#if defined(CONFIG_CW1200) || defined(CONFIG_CW1200_MODULE)
+ mop500_wlan_init();
+#endif
- i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
+#ifdef CONFIG_KEYBOARD_NOMADIK_SKE
+ db8500_add_ske_keypad(&mop500_ske_keypad_data);
+#endif
- i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES;
+ platform_device_register(&ab8500_device);
- i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
+ i2c_register_board_info(0, mop500_i2c0_devices,
+ ARRAY_SIZE(mop500_i2c0_devices));
i2c_register_board_info(2, mop500_i2c2_devices,
- ARRAY_SIZE(mop500_i2c2_devices));
+ ARRAY_SIZE(mop500_i2c2_devices));
/* This board has full regulator constraints */
regulator_has_full_constraints();
@@ -706,7 +1364,7 @@ MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
.init_machine = hrefv60_init_machine,
MACHINE_END
-MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
+MACHINE_START(SNOWBALL, "ST-Ericsson Snowball platform")
.atag_offset = 0x100,
.map_io = u8500_map_io,
.init_irq = ux500_init_irq,
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index de18a2a23e6..276be8a16ef 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -7,10 +7,19 @@
#ifndef __BOARD_MOP500_H
#define __BOARD_MOP500_H
-/* snowball GPIO for MMC card */
-#define SNOWBALL_SDMMC_EN_GPIO 217
-#define SNOWBALL_SDMMC_1V8_3V_GPIO 228
-#define SNOWBALL_SDMMC_CD_GPIO 218
+/* This defines the NOMADIK_NR_GPIO */
+#include <linux/mfd/ab8500/gpio.h>
+#include <mach/gpio.h>
+
+/* Snowball GPIO for MMC card */
+#define SNOWBALL_SDMMC_EN_GPIO 217
+#define SNOWBALL_SDMMC_1V8_3V_GPIO 228
+#define SNOWBALL_SDMMC_CD_GPIO 218
+
+/* Snowball specific GPIO assignments, this board has no GPIO expander */
+#define SNOWBALL_ACCEL_INT1_GPIO 163
+#define SNOWBALL_ACCEL_INT2_GPIO 164
+#define SNOWBALL_MAGNET_DRDY_GPIO 165
/* HREFv60-specific GPIO assignments, this board has no GPIO expander */
#define HREFV60_TOUCH_RST_GPIO 143
@@ -24,18 +33,43 @@
#define HREFV60_MAGNET_DRDY_GPIO 32
#define HREFV60_DISP1_RST_GPIO 65
#define HREFV60_DISP2_RST_GPIO 66
+#define HREFV60_MMIO_XENON_CHARGE 170
+#define HREFV60_XSHUTDOWN_SECONDARY_SENSOR 140
+#define HREFV60_CAMERA_FLASH_ENABLE 22
+#define XSHUTDOWN_PRIMARY_SENSOR 141
+#define XSHUTDOWN_SECONDARY_SENSOR 142
+#define CAMERA_FLASH_INT_PIN 7
+#define CYPRESS_TOUCH_INT_PIN 84
+#define CYPRESS_TOUCH_RST_GPIO 143
+#define CYPRESS_SLAVE_SELECT_GPIO 216
+
+/* MOP500 generic GPIOs */
+#define MOP500_HDMI_RST_GPIO 196
/* GPIOs on the TC35892 expander */
-#define MOP500_EGPIO(x) (NOMADIK_NR_GPIO + (x))
+#define GPIO_MAGNET_DRDY MOP500_EGPIO(1)
#define GPIO_SDMMC_CD MOP500_EGPIO(3)
+#define GPIO_CAMERA_FLASH_ENABLE MOP500_EGPIO(4)
+#define GPIO_MMIO_XENON_CHARGE MOP500_EGPIO(5)
#define GPIO_PROX_SENSOR MOP500_EGPIO(7)
+#define GPIO_HAL_SENSOR MOP500_EGPIO(8)
+#define GPIO_ACCEL_INT1 MOP500_EGPIO(10)
+#define GPIO_ACCEL_INT2 MOP500_EGPIO(11)
#define GPIO_BU21013_CS MOP500_EGPIO(13)
+#define MOP500_DISP2_RST_GPIO MOP500_EGPIO(14)
+#define MOP500_DISP1_RST_GPIO MOP500_EGPIO(15)
#define GPIO_SDMMC_EN MOP500_EGPIO(17)
#define GPIO_SDMMC_1V8_3V_SEL MOP500_EGPIO(18)
-#define MOP500_EGPIO_END MOP500_EGPIO(24)
-/* GPIOs on the AB8500 mixed-signals circuit */
-#define MOP500_AB8500_GPIO(x) (MOP500_EGPIO_END + (x))
+/*Snowball AB8500 GPIO */
+#define SNOWBALL_VSMPS2_1V8_GPIO AB8500_PIN_GPIO1 /* SYSCLKREQ2/GPIO1 */
+#define SNOWBALL_PM_GPIO1_GPIO AB8500_PIN_GPIO2 /* SYSCLKREQ3/GPIO2 */
+#define SNOWBALL_WLAN_CLK_REQ_GPIO AB8500_PIN_GPIO3 /* SYSCLKREQ4/GPIO3 */
+#define SNOWBALL_PM_GPIO4_GPIO AB8500_PIN_GPIO4 /* SYSCLKREQ6/GPIO4 */
+#define SNOWBALL_EN_3V6_GPIO AB8500_PIN_GPIO16 /* PWMOUT3/GPIO16 */
+#define SNOWBALL_PME_ETH_GPIO AB8500_PIN_GPIO24 /* SYSCLKREQ7/GPIO24 */
+#define SNOWBALL_EN_3V3_ETH_GPIO AB8500_PIN_GPIO26 /* GPIO26 */
+
struct i2c_board_info;
@@ -44,11 +78,21 @@ extern void snowball_sdi_init(void);
extern void mop500_sdi_tc35892_init(void);
void __init mop500_u8500uib_init(void);
void __init mop500_stuib_init(void);
+void __init mop500_msp_init(void);
void __init mop500_pins_init(void);
void __init hrefv60_pins_init(void);
void __init snowball_pins_init(void);
+void mop500_cyttsp_init(void);
+void __init mop500_u8500uib_r3_init(void);
-void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
+void mop500_uib_i2c_add(int busnum, struct i2c_board_info const *info,
unsigned n);
+int msp13_i2s_init(void);
+int msp13_i2s_exit(void);
+
+int uib_is_stuib(void);
+int uib_is_u8500uib(void);
+int uib_is_u8500uibr3(void);
+
#endif
diff --git a/arch/arm/mach-ux500/board-pins-sleep-force.c b/arch/arm/mach-ux500/board-pins-sleep-force.c
new file mode 100644
index 00000000000..bef6f4e6df4
--- /dev/null
+++ b/arch/arm/mach-ux500/board-pins-sleep-force.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/string.h>
+
+#include <linux/gpio/nomadik.h>
+#include <mach/hardware.h>
+
+#include "board-pins-sleep-force.h"
+#include "pins-db8500.h"
+#include "pins.h"
+
+static u32 u8500_gpio_banks[] = {U8500_GPIOBANK0_BASE,
+ U8500_GPIOBANK1_BASE,
+ U8500_GPIOBANK2_BASE,
+ U8500_GPIOBANK3_BASE,
+ U8500_GPIOBANK4_BASE,
+ U8500_GPIOBANK5_BASE,
+ U8500_GPIOBANK6_BASE,
+ U8500_GPIOBANK7_BASE,
+ U8500_GPIOBANK8_BASE};
+
+/*
+ * This function is called to force gpio power save
+ * settings during suspend.
+ */
+void sleep_pins_config_pm(pin_cfg_t *cfgs, int num)
+{
+ int i = 0;
+ int gpio = 0;
+ u32 w_imsc = 0;
+ u32 imsc = 0;
+ u32 offset;
+ u32 bitmask = 1;
+ u32 dirs_register = 0;
+ u32 dirc_register = 0;
+ u32 dats_register = 0;
+ u32 datc_register = 0;
+ u32 pdis_register_disable = 0;
+ u32 pdis_register_enabled = 0;
+ u32 slpm_register_disabled = 0;
+ u32 slpm_register_enabled = 0;
+ u32 bankaddr = 0;
+
+ gpio = PIN_NUM(cfgs[i]);
+
+ /* Get the bank number the pin is mapped to */
+ bankaddr = IO_ADDRESS(u8500_gpio_banks[(gpio >> GPIO_BLOCK_SHIFT)]);
+
+ w_imsc = readl(bankaddr + NMK_GPIO_RWIMSC) |
+ readl(bankaddr + NMK_GPIO_FWIMSC);
+
+ imsc = readl(bankaddr + NMK_GPIO_RIMSC) |
+ readl(bankaddr + NMK_GPIO_FIMSC);
+
+ for (i = 0; i < num; i++) {
+ /* Get the pin number */
+ gpio = PIN_NUM(cfgs[i]);
+
+ /* get the offest into the register */
+ offset = gpio % NMK_GPIO_PER_CHIP;
+ /* Set the bit to toggle */
+ bitmask = 1 << offset ;
+
+ /* Next we check for direction (INPUT/OUTPUT) */
+ switch (PIN_SLPM_DIR(cfgs[i])) {
+ case GPIO_IS_INPUT:
+ /* GPIO is set to input */
+ dirc_register |= bitmask;
+
+ /*
+ * Next check for pull (PULLUP/PULLDOWN)
+ * and configure accordingly.
+ */
+ switch (PIN_SLPM_PULL(cfgs[i])) {
+ case GPIO_PULL_UPDOWN_DISABLED:
+ pdis_register_disable |= bitmask;
+ break;
+
+ case GPIO_IS_PULLUP:
+ dats_register |= bitmask;
+ pdis_register_enabled |= bitmask;
+ break;
+
+ case GPIO_IS_PULLDOWN:
+ datc_register |= bitmask;
+ pdis_register_enabled |= bitmask;
+ break;
+
+ case GPIO_PULL_NO_CHANGE:
+ break;
+
+ default:
+ BUG();
+ break;
+
+ }
+ break;
+
+ case GPIO_IS_OUTPUT:
+ /* GPIO is set to output */
+ dirs_register |= bitmask;
+
+ /*
+ * Since its output there should not
+ * be a need to disable PULL UP/DOWN
+ * but better safe than sorry.
+ */
+ pdis_register_disable |= bitmask;
+ /* Next we check for setting GPIO HIGH/LOW */
+ switch (PIN_SLPM_VAL(cfgs[i])) {
+ case GPIO_IS_OUTPUT_LOW:
+ /* GPIO is set to LOW */
+ datc_register |= bitmask;
+ break;
+
+ case GPIO_IS_OUTPUT_HIGH:
+ /* GPIO is set to high */
+ dats_register |= bitmask;
+ break;
+
+ case GPIO_IS_NO_CHANGE:
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ break;
+ case GPIO_IS_NOT_CHANGED:
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ /* Next check for Sleep Power Managment (SLPM) */
+ switch (PIN_SLPM(cfgs[i])) {
+ case GPIO_WAKEUP_IS_ENABLED:
+ slpm_register_enabled |= bitmask;
+ break;
+
+ case GPIO_WAKEUP_IS_DISBLED:
+ slpm_register_disabled |= bitmask;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ /* Next check for Sleep Power Managment (SLPM) */
+ switch (PIN_SLPM_PDIS(cfgs[i])) {
+ case GPIO_PDIS_NO_CHANGE:
+ break;
+
+ case GPIO_PDIS_DISABLED:
+ pdis_register_disable |= bitmask;
+ break;
+
+ case GPIO_PDIS_ENABLED:
+ pdis_register_enabled |= bitmask;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ }
+
+ /* Write the register settings GPIO direction */
+ writel(dirs_register & ~w_imsc, bankaddr + NMK_GPIO_DIRS);
+ writel(dirc_register, bankaddr + NMK_GPIO_DIRC);
+
+ writel(datc_register & ~w_imsc, bankaddr + NMK_GPIO_DATC);
+ writel(dats_register & ~w_imsc, bankaddr + NMK_GPIO_DATS);
+
+ /* Write the PDIS enable/disable */
+ writel(readl(bankaddr + NMK_GPIO_PDIS)
+ | (pdis_register_disable & ~w_imsc & ~imsc), bankaddr + NMK_GPIO_PDIS);
+ writel(readl(bankaddr + NMK_GPIO_PDIS)
+ & (~pdis_register_enabled & ~w_imsc & ~imsc), bankaddr + NMK_GPIO_PDIS);
+
+ /* Write the SLPM enable/disable */
+ writel(readl(bankaddr + NMK_GPIO_SLPC) | slpm_register_disabled,
+ bankaddr + NMK_GPIO_SLPC);
+ writel(readl(bankaddr + NMK_GPIO_SLPC) & ~slpm_register_enabled,
+ bankaddr + NMK_GPIO_SLPC);
+}
+
+void sleep_pins_config_pm_mux(pin_cfg_t *cfgs, int num)
+{
+ int i = 0;
+ int gpio = 0;
+ u32 offset;
+ u32 bitmask = 1;
+ u32 gpio_afsla_register_set = 0;
+ u32 gpio_afslb_register_set = 0;
+ u32 gpio_afsla_register_clear = 0;
+ u32 gpio_afslb_register_clear = 0;
+ u32 bankaddr = 0;
+
+ gpio = PIN_NUM(cfgs[i]);
+
+ /* Get the bank number the pin is mapped to */
+ bankaddr = IO_ADDRESS(u8500_gpio_banks[(gpio >> GPIO_BLOCK_SHIFT)]);
+
+ for (i = 0; i < num; i++) {
+ /* Get the pin number */
+ gpio = PIN_NUM(cfgs[i]);
+
+ /* get the offset into the register */
+ offset = gpio % NMK_GPIO_PER_CHIP;
+ /* Set the bit to toggle */
+ bitmask = 1 << offset ;
+
+ /* First check for ALT pin configuration */
+ switch (PIN_ALT(cfgs[i])) {
+ case NMK_GPIO_ALT_GPIO:
+ /* Set bit to configured as GPIO */
+ gpio_afsla_register_clear |= bitmask;
+ gpio_afslb_register_clear |= bitmask;
+ break;
+
+ case NMK_GPIO_ALT_A:
+ /* ALT A setting so set corresponding bit */
+ gpio_afsla_register_set |= bitmask;
+ break;
+
+ case NMK_GPIO_ALT_B:
+ /* ALT B setting so set corresponding bit */
+ gpio_afslb_register_set |= bitmask;
+ break;
+
+ case NMK_GPIO_ALT_C:
+ /* ALT C setting so set corresponding bits */
+ gpio_afsla_register_set |= bitmask;
+ gpio_afslb_register_set |= bitmask;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+ }
+ /* Set bits that configures GPIO */
+ writel(readl(bankaddr + NMK_GPIO_AFSLA)
+ & ~gpio_afsla_register_clear, bankaddr + NMK_GPIO_AFSLA);
+ writel(readl(bankaddr + NMK_GPIO_AFSLB)
+ & ~gpio_afslb_register_clear, bankaddr + NMK_GPIO_AFSLB);
+
+ /* Set bits that configures ALT_X */
+ writel(readl(bankaddr + NMK_GPIO_AFSLA)
+ | gpio_afsla_register_set, bankaddr + NMK_GPIO_AFSLA);
+ writel(readl(bankaddr + NMK_GPIO_AFSLB)
+ | gpio_afslb_register_set, bankaddr + NMK_GPIO_AFSLB);
+}
diff --git a/arch/arm/mach-ux500/board-pins-sleep-force.h b/arch/arm/mach-ux500/board-pins-sleep-force.h
new file mode 100644
index 00000000000..0949c9bfcda
--- /dev/null
+++ b/arch/arm/mach-ux500/board-pins-sleep-force.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __BOARD_PINS_SLEEP_FORCE_H
+#define __BOARD_PINS_SLEEP_FORCE_H
+
+#include <plat/pincfg.h>
+
+#define NMK_GPIO_PER_CHIP 32
+#define GPIO_BLOCK_SHIFT 5
+
+#define GPIO_IS_NOT_CHANGED 0
+#define GPIO_IS_INPUT 1
+#define GPIO_IS_OUTPUT 2
+
+#define GPIO_WAKEUP_IS_ENABLED 0
+#define GPIO_WAKEUP_IS_DISBLED 1
+
+#define GPIO_IS_NO_CHANGE 0
+#define GPIO_IS_OUTPUT_LOW 1
+#define GPIO_IS_OUTPUT_HIGH 2
+
+#define GPIO_PULL_NO_CHANGE 0
+#define GPIO_PULL_UPDOWN_DISABLED 1
+#define GPIO_IS_PULLUP 2
+#define GPIO_IS_PULLDOWN 3
+
+#define GPIO_PDIS_NO_CHANGE 0
+#define GPIO_PDIS_DISABLED 1
+#define GPIO_PDIS_ENABLED 2
+
+void sleep_pins_config_pm_mux(pin_cfg_t *cfgs, int num);
+void sleep_pins_config_pm(pin_cfg_t *cfgs, int num);
+
+#endif
diff --git a/arch/arm/mach-ux500/board-u5500-bm.c b/arch/arm/mach-ux500/board-u5500-bm.c
new file mode 100644
index 00000000000..7cfc6386b35
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-bm.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * U5500 board specific charger and battery initialization parameters.
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/power_supply.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include "board-u5500-bm.h"
+
+#ifdef CONFIG_AB5500_BATTERY_THERM_ON_BATCTRL
+/*
+ * These are the defined batteries that uses a NTC and ID resistor placed
+ * inside of the battery pack.
+ * Note that the abx500_res_to_temp table must be strictly sorted by falling resistance
+ * values to work.
+ */
+static struct abx500_res_to_temp temp_tbl_type1[] = {
+ {-20, 67400},
+ { 0, 49200},
+ { 5, 44200},
+ { 10, 39400},
+ { 15, 35000},
+ { 20, 31000},
+ { 25, 27400},
+ { 30, 24300},
+ { 35, 21700},
+ { 40, 19400},
+ { 45, 17500},
+ { 50, 15900},
+ { 55, 14600},
+ { 60, 13500},
+ { 65, 12500},
+ { 70, 11800},
+ {100, 9200},
+};
+
+static struct abx500_res_to_temp temp_tbl_type2[] = {
+ {-20, 180700},
+ { 0, 160000},
+ { 5, 152700},
+ { 10, 144900},
+ { 15, 136800},
+ { 20, 128700},
+ { 25, 121000},
+ { 30, 113800},
+ { 35, 107300},
+ { 40, 101500},
+ { 45, 96500},
+ { 50, 92200},
+ { 55, 88600},
+ { 60, 85600},
+ { 65, 83000},
+ { 70, 80900},
+ {100, 73900},
+};
+
+static struct abx500_res_to_temp temp_tbl_A[] = {
+ {-5, 53407},
+ { 0, 48594},
+ { 5, 43804},
+ {10, 39188},
+ {15, 34870},
+ {20, 30933},
+ {25, 27422},
+ {30, 24347},
+ {35, 21694},
+ {40, 19431},
+ {45, 17517},
+ {50, 15908},
+ {55, 14561},
+ {60, 13437},
+ {65, 12500},
+};
+
+static struct abx500_res_to_temp temp_tbl_B[] = {
+ {-5, 165418},
+ { 0, 159024},
+ { 5, 151921},
+ {10, 144300},
+ {15, 136424},
+ {20, 128565},
+ {25, 120978},
+ {30, 113875},
+ {35, 107397},
+ {40, 101629},
+ {45, 96592},
+ {50, 92253},
+ {55, 88569},
+ {60, 85461},
+ {65, 82869},
+};
+
+static struct abx500_v_to_cap cap_tbl_type1[] = {
+ {4171, 100},
+ {4114, 95},
+ {4009, 83},
+ {3947, 74},
+ {3907, 67},
+ {3863, 59},
+ {3830, 56},
+ {3813, 53},
+ {3791, 46},
+ {3771, 33},
+ {3754, 25},
+ {3735, 20},
+ {3717, 17},
+ {3681, 13},
+ {3664, 8},
+ {3651, 6},
+ {3635, 5},
+ {3560, 3},
+ {3408, 1},
+ {3247, 0},
+};
+
+static struct abx500_v_to_cap cap_tbl_A[] = {
+ {4171, 100},
+ {4114, 95},
+ {4009, 83},
+ {3947, 74},
+ {3907, 67},
+ {3863, 59},
+ {3830, 56},
+ {3813, 53},
+ {3791, 46},
+ {3771, 33},
+ {3754, 25},
+ {3735, 20},
+ {3717, 17},
+ {3681, 13},
+ {3664, 8},
+ {3651, 6},
+ {3635, 5},
+ {3560, 3},
+ {3408, 1},
+ {3247, 0},
+};
+static struct abx500_v_to_cap cap_tbl_B[] = {
+ {4161, 100},
+ {4124, 98},
+ {4044, 90},
+ {4003, 85},
+ {3966, 80},
+ {3933, 75},
+ {3888, 67},
+ {3849, 60},
+ {3813, 55},
+ {3787, 47},
+ {3772, 30},
+ {3751, 25},
+ {3718, 20},
+ {3681, 16},
+ {3660, 14},
+ {3589, 10},
+ {3546, 7},
+ {3495, 4},
+ {3404, 2},
+ {3250, 0},
+};
+#endif
+static struct abx500_v_to_cap cap_tbl[] = {
+ {4186, 100},
+ {4163, 99},
+ {4114, 95},
+ {4068, 90},
+ {3990, 80},
+ {3926, 70},
+ {3898, 65},
+ {3866, 60},
+ {3833, 55},
+ {3812, 50},
+ {3787, 40},
+ {3768, 30},
+ {3747, 25},
+ {3730, 20},
+ {3705, 15},
+ {3699, 14},
+ {3684, 12},
+ {3672, 9},
+ {3657, 7},
+ {3638, 6},
+ {3556, 4},
+ {3424, 2},
+ {3317, 1},
+ {3094, 0},
+};
+
+/*
+ * Note that the abx500_res_to_temp table must be strictly sorted by falling
+ * resistance values to work.
+ */
+static struct abx500_res_to_temp temp_tbl[] = {
+ {-5, 214834},
+ { 0, 162943},
+ { 5, 124820},
+ {10, 96520},
+ {15, 75306},
+ {20, 59254},
+ {25, 47000},
+ {30, 37566},
+ {35, 30245},
+ {40, 24520},
+ {45, 20010},
+ {50, 16432},
+ {55, 13576},
+ {60, 11280},
+ {65, 9425},
+};
+
+static const struct abx500_battery_type bat_type[] = {
+ [BATTERY_UNKNOWN] = {
+ /* First element always represent the UNKNOWN battery */
+ .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
+ .resis_high = 0,
+ .resis_low = 0,
+ .battery_resistance = 300,
+ .charge_full_design = 612,
+ .nominal_voltage = 3700,
+ .termination_vol = 4050,
+ .termination_curr = 200,
+ .recharge_vol = 3990,
+ .normal_cur_lvl = 400,
+ .normal_vol_lvl = 4100,
+ .maint_a_cur_lvl = 400,
+ .maint_a_vol_lvl = 4050,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 400,
+ .maint_b_vol_lvl = 4025,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ },
+
+#ifdef CONFIG_AB5500_BATTERY_THERM_ON_BATCTRL
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 70000,
+ .resis_low = 8200,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3600,
+ .termination_vol = 4150,
+ .termination_curr = 80,
+ .recharge_vol = 4025,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4025,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_type1),
+ .r_to_t_tbl = temp_tbl_type1,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_type1),
+ .v_to_cap_tbl = cap_tbl_type1,
+
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 165418,
+ .resis_low = 82869,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3600,
+ .termination_vol = 4150,
+ .termination_curr = 80,
+ .recharge_vol = 4025,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4025,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_B),
+ .r_to_t_tbl = temp_tbl_B,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_B),
+ .v_to_cap_tbl = cap_tbl_B,
+ },
+#else
+/*
+ * These are the batteries that doesn't have an internal NTC resistor to measure
+ * its temperature. The temperature in this case is measure with a NTC placed
+ * near the battery but on the PCB.
+ */
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 76000,
+ .resis_low = 53000,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4025,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4025,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LION,
+ .resis_high = 30000,
+ .resis_low = 10000,
+ .battery_resistance = 300,
+ .charge_full_design = 950,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4025,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4025,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LION,
+ .resis_high = 95000,
+ .resis_low = 76001,
+ .battery_resistance = 300,
+ .charge_full_design = 950,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4025,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4025,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ },
+#endif
+};
+
+static char *ab5500_charger_supplied_to[] = {
+ "abx500_chargalg",
+ "ab5500_fg",
+ "ab5500_btemp",
+};
+
+static char *ab5500_btemp_supplied_to[] = {
+ "abx500_chargalg",
+ "ab5500_fg",
+};
+
+static char *ab5500_fg_supplied_to[] = {
+ "abx500_chargalg",
+};
+
+static char *abx500_chargalg_supplied_to[] = {
+ "ab5500_fg",
+};
+
+struct abx500_charger_platform_data ab5500_charger_plat_data = {
+ .supplied_to = ab5500_charger_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab5500_charger_supplied_to),
+};
+
+struct abx500_btemp_platform_data ab5500_btemp_plat_data = {
+ .supplied_to = ab5500_btemp_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab5500_btemp_supplied_to),
+};
+
+struct abx500_fg_platform_data ab5500_fg_plat_data = {
+ .supplied_to = ab5500_fg_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab5500_fg_supplied_to),
+};
+
+struct abx500_chargalg_platform_data abx500_chargalg_plat_data = {
+ .supplied_to = abx500_chargalg_supplied_to,
+ .num_supplicants = ARRAY_SIZE(abx500_chargalg_supplied_to),
+};
+
+static const struct abx500_bm_capacity_levels cap_levels = {
+ .critical = 2,
+ .low = 10,
+ .normal = 70,
+ .high = 95,
+ .full = 100,
+};
+
+static const struct abx500_fg_parameters fg = {
+ .recovery_sleep_timer = 10,
+ .recovery_total_time = 100,
+ .init_timer = 1,
+ .init_discard_time = 5,
+ .init_total_time = 40,
+ .high_curr_time = 60,
+ .accu_charging = 30,
+ .accu_high_curr = 30,
+ .high_curr_threshold = 50,
+ .lowbat_threshold = 3100,
+};
+
+static const struct abx500_maxim_parameters maxi_params = {
+ .ena_maxi = true,
+ .chg_curr = 910,
+ .wait_cycles = 10,
+ .charger_curr_step = 100,
+};
+
+static const struct abx500_bm_charger_parameters chg = {
+ .usb_volt_max = 5500,
+ .usb_curr_max = 1500,
+ .ac_volt_max = 7500,
+ .ac_curr_max = 1500,
+};
+
+struct abx500_bm_data ab5500_bm_data = {
+ .temp_under = 3,
+ .temp_low = 8,
+ /* TODO: Need to verify the temp values */
+ .temp_high = 155,
+ .temp_over = 160,
+ .main_safety_tmr_h = 4,
+ .usb_safety_tmr_h = 4,
+ .bkup_bat_v = 0x00,
+ .bkup_bat_i = 0x00,
+ .no_maintenance = true,
+#ifdef CONFIG_AB5500_BATTERY_THERM_ON_BATCTRL
+ .adc_therm = ABx500_ADC_THERM_BATCTRL,
+#else
+ .adc_therm = ABx500_ADC_THERM_BATTEMP,
+#endif
+ .chg_unknown_bat = false,
+ .enable_overshoot = false,
+ .fg_res = 20,
+ .cap_levels = &cap_levels,
+ .bat_type = bat_type,
+ .n_btypes = ARRAY_SIZE(bat_type),
+ .batt_id = 0,
+ .interval_charging = 5,
+ .interval_not_charging = 120,
+ .temp_hysteresis = 3,
+ .maxi = &maxi_params,
+ .chg_params = &chg,
+ .fg_params = &fg,
+};
+
+/* ab5500 energy management platform data */
+struct abx500_bm_plat_data abx500_bm_pt_data = {
+ .battery = &ab5500_bm_data,
+ .charger = &ab5500_charger_plat_data,
+ .btemp = &ab5500_btemp_plat_data,
+ .fg = &ab5500_fg_plat_data,
+ .chargalg = &abx500_chargalg_plat_data,
+};
diff --git a/arch/arm/mach-ux500/board-u5500-bm.h b/arch/arm/mach-ux500/board-u5500-bm.h
new file mode 100644
index 00000000000..a6346905911
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-bm.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * U5500 board specific charger and battery initialization parameters.
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#ifndef __BOARD_U5500_BM_H
+#define __BOARD_U5500_BM_H
+
+#include <linux/mfd/abx500/ab5500-bm.h>
+
+extern struct abx500_charger_platform_data ab5500_charger_plat_data;
+extern struct abx500_btemp_platform_data ab5500_btemp_plat_data;
+extern struct abx500_fg_platform_data ab5500_fg_plat_data;
+extern struct abx500_chargalg_platform_data abx500_chargalg_plat_data;
+extern struct abx500_bm_data ab5500_bm_data;
+extern struct abx500_bm_plat_data abx500_bm_pt_data;
+
+#endif
diff --git a/arch/arm/mach-ux500/board-u5500-cyttsp.c b/arch/arm/mach-ux500/board-u5500-cyttsp.c
new file mode 100755
index 00000000000..f0f8192fade
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-cyttsp.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Author: Avinash A <avinash.a@stericsson.com> for ST-Ericsson
+ * License terms:GNU General Public License (GPL) version 2
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/cyttsp.h>
+#include <linux/delay.h>
+#include <linux/amba/pl022.h>
+#include <plat/pincfg.h>
+#include <mach/hardware.h>
+
+#include "pins-db5500.h"
+#include "board-u5500.h"
+
+/* cyttsp_gpio_board_init : configures the touch panel. */
+static int cyttsp_plat_init(int on)
+{
+ int ret;
+
+ ret = gpio_direction_output(CYPRESS_SLAVE_SELECT_GPIO, 1);
+ if (ret < 0) {
+ pr_err("slave select gpio direction failed\n");
+ gpio_free(CYPRESS_SLAVE_SELECT_GPIO);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cyttsp_wakeup(void)
+{
+ int ret;
+
+ ret = gpio_request(CYPRESS_TOUCH_INT_PIN, "Wakeup_pin");
+ if (ret < 0) {
+ pr_err("touch gpio failed\n");
+ return ret;
+ }
+ ret = gpio_direction_output(CYPRESS_TOUCH_INT_PIN, 1);
+ if (ret < 0) {
+ pr_err("touch gpio direction failed\n");
+ goto out;
+ }
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 0);
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 1);
+ /*
+ * To wake up the controller from sleep
+ * state the interrupt pin needs to be
+ * pulsed twice with a delay greater
+ * than 2 micro seconds.
+ */
+ udelay(3);
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 0);
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 1);
+ ret = gpio_direction_input(CYPRESS_TOUCH_INT_PIN);
+ if (ret < 0) {
+ pr_err("touch gpio direction IN config failed\n");
+ goto out;
+ }
+out:
+ gpio_free(CYPRESS_TOUCH_INT_PIN);
+ return 0;
+}
+static struct cyttsp_platform_data cyttsp_spi_platdata = {
+ .maxx = 480,
+ .maxy = 854,
+ .flags = 0,
+ .gen = CY_GEN3,
+ .use_st = 0,
+ .use_mt = 1,
+ .use_trk_id = 0,
+ .use_hndshk = 0,
+ .use_sleep = 1,
+ .use_gestures = 0,
+ .use_load_file = 0,
+ .use_force_fw_update = 0,
+ .use_virtual_keys = 0,
+ /* activate up to 4 groups and set active distance */
+ .gest_set = CY_GEST_GRP_NONE | CY_ACT_DIST,
+ /* change scn_type to enable finger and/or stylus detection */
+ .scn_typ = 0xA5, /* autodetect finger+stylus; balanced mutual scan */
+ .act_intrvl = CY_ACT_INTRVL_DFLT, /* Active refresh interval; ms */
+ .tch_tmout = CY_TCH_TMOUT_DFLT, /* Active touch timeout; ms */
+ .lp_intrvl = CY_LP_INTRVL_DFLT, /* Low power refresh interval; ms */
+ .init = cyttsp_plat_init,
+ .mt_sync = input_mt_sync,
+ .wakeup = cyttsp_wakeup,
+ .name = CY_SPI_NAME,
+ .irq_gpio = CYPRESS_TOUCH_INT_PIN,
+ .rst_gpio = CYPRESS_TOUCH_RST_GPIO,
+};
+
+static void cyttsp_spi_cs_control(u32 command)
+{
+ if (command == SSP_CHIP_SELECT)
+ gpio_set_value(CYPRESS_SLAVE_SELECT_GPIO, 0);
+ else if (command == SSP_CHIP_DESELECT)
+ gpio_set_value(CYPRESS_SLAVE_SELECT_GPIO, 1);
+}
+
+static struct pl022_config_chip cyttsp_ssp_config_chip = {
+ .com_mode = INTERRUPT_TRANSFER,
+ .iface = SSP_INTERFACE_MOTOROLA_SPI,
+ /* we can act as master only */
+ .hierarchy = SSP_MASTER,
+ .slave_tx_disable = 0,
+ .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
+ .tx_lev_trig = SSP_TX_16_OR_MORE_EMPTY_LOC,
+ .ctrl_len = SSP_BITS_16,
+ .wait_state = SSP_MWIRE_WAIT_ZERO,
+ .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+ .cs_control = cyttsp_spi_cs_control,
+};
+
+static struct spi_board_info cypress_spi_devices[] = {
+ {
+ .modalias = CY_SPI_NAME,
+ .controller_data = &cyttsp_ssp_config_chip,
+ .platform_data = &cyttsp_spi_platdata,
+ .max_speed_hz = 1000000,
+ .bus_num = 1,
+ .chip_select = 0,
+ .mode = SPI_MODE_0,
+ }
+};
+
+void u5500_cyttsp_init(void)
+{
+ int ret = 0;
+
+ ret = gpio_request(CYPRESS_SLAVE_SELECT_GPIO, "slave_select_gpio");
+ if (ret < 0) {
+ pr_err("slave select gpio failed\n");
+ return;
+ }
+ if (cpu_is_u5500v2())
+ cyttsp_spi_platdata.invert = true;
+ spi_register_board_info(cypress_spi_devices,
+ ARRAY_SIZE(cypress_spi_devices));
+}
+
diff --git a/arch/arm/mach-ux500/board-u5500-mcde.c b/arch/arm/mach-ux500/board-u5500-mcde.c
new file mode 100644
index 00000000000..4af8ef048f7
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-mcde.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/dispdev.h>
+#include <video/av8100.h>
+#include <asm/mach-types.h>
+#include <video/mcde_display.h>
+#include <video/mcde_display-generic_dsi.h>
+#include <video/mcde_display-sony_acx424akp_dsi.h>
+#include <video/mcde_display-av8100.h>
+#include <video/mcde_fb.h>
+#include <video/mcde_dss.h>
+
+#define DSI_UNIT_INTERVAL_0 0xA
+#define DSI_UNIT_INTERVAL_2 0x5
+
+enum {
+#if defined(CONFIG_DISPLAY_GENERIC_DSI_PRIMARY) || \
+ defined(CONFIG_DISPLAY_SONY_ACX424AKP_DSI_PRIMARY)
+ PRIMARY_DISPLAY_ID,
+#endif
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+ AV8100_DISPLAY_ID,
+#endif
+ MCDE_NR_OF_DISPLAYS
+};
+
+
+#ifdef CONFIG_FB_MCDE
+
+/* The initialization of hdmi disp driver must be delayed in order to
+ * ensure that inputclk will be available (needed by hdmi hw) */
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+static struct delayed_work work_dispreg_hdmi;
+#define DISPREG_HDMI_DELAY 6000
+#endif
+
+static int display_initialized_during_boot;
+
+static int __init startup_graphics_setup(char *str)
+{
+
+ if (get_option(&str, &display_initialized_during_boot) != 1)
+ display_initialized_during_boot = 0;
+
+ switch (display_initialized_during_boot) {
+ case 1:
+ pr_info("Startup graphics support\n");
+ break;
+ case 0:
+ default:
+ pr_info("No startup graphics supported\n");
+ break;
+ };
+
+ return 1;
+}
+__setup("startup_graphics=", startup_graphics_setup);
+
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+static struct mcde_col_transform rgb_2_yCbCr_transform = {
+ .matrix = {
+ {0x0042, 0x0081, 0x0019},
+ {0xffda, 0xffb6, 0x0070},
+ {0x0070, 0xffa2, 0xffee},
+ },
+ .offset = {0x10, 0x80, 0x80},
+};
+#endif
+
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_PRIMARY
+static struct mcde_port port0 = {
+ .type = MCDE_PORTTYPE_DSI,
+ .mode = MCDE_PORTMODE_CMD,
+ .pixel_format = MCDE_PORTPIXFMT_DSI_24BPP,
+ .ifc = DSI_VIDEO_MODE,
+ .link = 0,
+ .sync_src = MCDE_SYNCSRC_BTA,
+ .phy = {
+ .dsi = {
+ .virt_id = 0,
+ .num_data_lanes = 2,
+ .ui = DSI_UNIT_INTERVAL_0,
+ .clk_cont = false,
+ .data_lanes_swap = false,
+ },
+ },
+};
+
+struct mcde_display_generic_platform_data generic_display0_pdata = {
+ .reset_gpio = 226,
+ .reset_delay = 10,
+ .sleep_out_delay = 140,
+#ifdef CONFIG_REGULATOR
+ .regulator_id = "v-display",
+ .min_supply_voltage = 2500000, /* 2.5V */
+ .max_supply_voltage = 2700000 /* 2.7V */
+#endif
+};
+
+struct mcde_display_device generic_display0 = {
+ .name = "mcde_disp_generic",
+ .id = PRIMARY_DISPLAY_ID,
+ .port = &port0,
+ .chnl_id = MCDE_CHNL_A,
+ .fifo = MCDE_FIFO_A,
+#ifdef CONFIG_MCDE_DISPLAY_PRIMARY_16BPP
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB565,
+#else
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGBA8888,
+#endif
+ .native_x_res = 864,
+ .native_y_res = 480,
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_VSYNC
+ .synchronized_update = true,
+#else
+ .synchronized_update = false,
+#endif
+ /* TODO: Remove rotation buffers once ESRAM driver is completed */
+ .rotbuf1 = U5500_ESRAM_BASE + 0x20000 * 2,
+ .rotbuf2 = U5500_ESRAM_BASE + 0x20000 * 2 + 0x10000,
+ .dev = {
+ .platform_data = &generic_display0_pdata,
+ },
+};
+#endif /* CONFIG_DISPLAY_GENERIC_DSI_PRIMARY */
+
+static struct mcde_port port1 = {
+ .link = 0,
+};
+
+struct mcde_display_sony_acx424akp_platform_data \
+ sony_acx424akp_display0_pdata = {
+ .reset_gpio = 226,
+};
+
+struct mcde_display_device sony_acx424akp_display0 = {
+ .name = "mcde_disp_sony_acx424akp",
+ .id = PRIMARY_DISPLAY_ID,
+ .port = &port1,
+ .chnl_id = MCDE_CHNL_A,
+ .fifo = MCDE_FIFO_A,
+#ifdef CONFIG_MCDE_DISPLAY_PRIMARY_16BPP
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB565,
+#else
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGBA8888,
+#endif
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_VSYNC
+ .synchronized_update = true,
+#else
+ .synchronized_update = false,
+#endif
+ /* TODO: Remove rotation buffers once ESRAM driver is completed */
+ .rotbuf1 = U5500_ESRAM_BASE + 0x20000 * 2,
+ .rotbuf2 = U5500_ESRAM_BASE + 0x20000 * 2 + 0x10000,
+ .dev = {
+ .platform_data = &sony_acx424akp_display0_pdata,
+ },
+};
+
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+static struct mcde_port port2 = {
+ .type = MCDE_PORTTYPE_DSI,
+ .mode = MCDE_PORTMODE_CMD,
+ .pixel_format = MCDE_PORTPIXFMT_DSI_24BPP,
+ .ifc = DSI_VIDEO_MODE,
+ .link = 1,
+#ifdef CONFIG_AV8100_HWTRIG_INT
+ .sync_src = MCDE_SYNCSRC_TE0,
+#endif
+#ifdef CONFIG_AV8100_HWTRIG_I2SDAT3
+ .sync_src = MCDE_SYNCSRC_TE1,
+#endif
+#ifdef CONFIG_AV8100_HWTRIG_DSI_TE
+ .sync_src = MCDE_SYNCSRC_TE_POLLING,
+#endif
+#ifdef CONFIG_AV8100_HWTRIG_NONE
+ .sync_src = MCDE_SYNCSRC_OFF,
+#endif
+ .update_auto_trig = true,
+ .phy = {
+ .dsi = {
+ .virt_id = 0,
+ .num_data_lanes = 2,
+ .ui = DSI_UNIT_INTERVAL_2,
+ .clk_cont = false,
+ .data_lanes_swap = false,
+ },
+ },
+ .hdmi_sdtv_switch = HDMI_SWITCH,
+};
+
+struct mcde_display_hdmi_platform_data av8100_hdmi_pdata = {
+ .rgb_2_yCbCr_transform = &rgb_2_yCbCr_transform,
+};
+
+static struct mcde_display_device av8100_hdmi = {
+ .name = "av8100_hdmi",
+ .id = AV8100_DISPLAY_ID,
+ .port = &port2,
+ .chnl_id = MCDE_CHNL_B,
+ .fifo = MCDE_FIFO_B,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB888,
+ .native_x_res = 1280,
+ .native_y_res = 720,
+ .synchronized_update = false,
+ .dev = {
+ .platform_data = &av8100_hdmi_pdata,
+ },
+ .platform_enable = NULL,
+ .platform_disable = NULL,
+};
+
+static void delayed_work_dispreg_hdmi(struct work_struct *ptr)
+{
+ if (mcde_display_device_register(&av8100_hdmi))
+ pr_warning("Failed to register av8100_hdmi\n");
+}
+#endif /* CONFIG_DISPLAY_AV8100_TERTIARY */
+
+/*
+* This function will create the framebuffer for the display that is registered.
+*/
+static int display_postregistered_callback(struct notifier_block *nb,
+ unsigned long event, void *dev)
+{
+ struct mcde_display_device *ddev = dev;
+ u16 width, height;
+ u16 virtual_width, virtual_height;
+ u32 rotate = FB_ROTATE_UR;
+ u32 rotate_angle = 0;
+ struct fb_info *fbi;
+#ifdef CONFIG_DISPDEV
+ struct mcde_fb *mfb;
+#endif
+
+ struct mcde_display_sony_acx424akp_platform_data *pdata =
+ ddev->dev.platform_data;
+
+ if (event != MCDE_DSS_EVENT_DISPLAY_REGISTERED)
+ return 0;
+
+ if (ddev->id < PRIMARY_DISPLAY_ID || ddev->id >= MCDE_NR_OF_DISPLAYS)
+ return 0;
+
+ mcde_dss_get_native_resolution(ddev, &width, &height);
+
+ if (pdata->disp_panel == DISPLAY_SONY_ACX424AKP)
+ rotate_angle = 0;
+ else
+ rotate_angle = \
+ CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE;
+
+#if defined(CONFIG_DISPLAY_GENERIC_DSI_PRIMARY) || \
+ defined(CONFIG_DISPLAY_SONY_ACX424AKP_DSI_PRIMARY)
+ if (ddev->id == PRIMARY_DISPLAY_ID) {
+ switch (rotate_angle) {
+ case 0:
+ rotate = FB_ROTATE_UR;
+ break;
+ case 90:
+ rotate = FB_ROTATE_CW;
+ swap(width, height);
+ break;
+ case 180:
+ rotate = FB_ROTATE_UD;
+ break;
+ case 270:
+ rotate = FB_ROTATE_CCW;
+ swap(width, height);
+ break;
+ }
+ }
+#endif
+
+ virtual_width = width;
+ virtual_height = height * 2;
+
+#ifdef CONFIG_DISPLAY_AV8100_TRIPPLE_BUFFER
+ if (ddev->id == AV8100_DISPLAY_ID)
+ virtual_height = height * 3;
+#endif
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+ if (ddev->id == AV8100_DISPLAY_ID) {
+#ifdef CONFIG_MCDE_DISPLAY_HDMI_FB_AUTO_CREATE
+ hdmi_fb_onoff(ddev, 1, 0, 0);
+#endif /* CONFIG_MCDE_DISPLAY_HDMI_FB_AUTO_CREATE */
+ goto out;
+ }
+#endif /* CONFIG_DISPLAY_AV8100_TERTIARY */
+
+ /* Create frame buffer */
+ fbi = mcde_fb_create(ddev,
+ width, height,
+ virtual_width, virtual_height,
+ ddev->default_pixel_format,
+ rotate);
+
+ if (IS_ERR(fbi)) {
+ dev_warn(&ddev->dev,
+ "Failed to create fb for display %s\n", ddev->name);
+ goto display_postregistered_callback_err;
+ } else {
+ dev_info(&ddev->dev, "Framebuffer created (%s)\n", ddev->name);
+ }
+
+#ifdef CONFIG_DISPDEV
+ mfb = to_mcde_fb(fbi);
+ /* Create a dispdev overlay for this display */
+ if (dispdev_create(ddev, true, mfb->ovlys[0]) < 0) {
+ dev_warn(&ddev->dev,
+ "Failed to create disp for display %s\n", ddev->name);
+ goto display_postregistered_callback_err;
+ } else {
+ dev_info(&ddev->dev, "Disp dev created for (%s)\n", ddev->name);
+ }
+#endif
+
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+out:
+#endif
+ return 0;
+
+display_postregistered_callback_err:
+ return -1;
+}
+
+static struct notifier_block display_nb = {
+ .notifier_call = display_postregistered_callback,
+};
+
+/*
+* This function is used to refresh the display (lcd, hdmi, tvout) with black
+* when the framebuffer is registered.
+* The main display will not be updated if startup graphics is displayed
+* from u-boot.
+*/
+static int framebuffer_postregistered_callback(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ int ret = 0;
+ struct fb_event *event_data = data;
+ struct fb_info *info;
+ struct fb_var_screeninfo var;
+ struct fb_fix_screeninfo fix;
+ struct mcde_fb *mfb;
+
+ if (event != FB_EVENT_FB_REGISTERED)
+ return 0;
+
+ if (!event_data)
+ return 0;
+
+ info = event_data->info;
+ mfb = to_mcde_fb(info);
+ if (mfb->id == 0 && display_initialized_during_boot)
+ goto out;
+
+ var = info->var;
+ fix = info->fix;
+ var.yoffset = var.yoffset ? 0 : var.yres;
+ if (info->fbops->fb_pan_display)
+ ret = info->fbops->fb_pan_display(&var, info);
+out:
+ return ret;
+}
+
+static struct notifier_block framebuffer_nb = {
+ .notifier_call = framebuffer_postregistered_callback,
+};
+
+int __init init_u5500_display_devices(void)
+{
+ int ret = 0;
+
+ if (!cpu_is_u5500())
+ return ret;
+
+ ret = fb_register_client(&framebuffer_nb);
+ if (ret)
+ pr_warning("Failed to register framebuffer notifier\n");
+
+ ret = mcde_dss_register_notifier(&display_nb);
+ if (ret)
+ pr_warning("Failed to register dss notifier\n");
+
+#ifdef CONFIG_DISPLAY_GENERIC_PRIMARY
+ if (cpu_is_u5500v1()) {
+ if (display_initialized_during_boot)
+ generic_display0.power_mode = MCDE_DISPLAY_PM_STANDBY;
+ ret = mcde_display_device_register(&generic_display0);
+ if (ret)
+ pr_warning("Failed to register generic \
+ display device 0\n");
+ }
+#endif
+
+#ifdef CONFIG_DISPLAY_SONY_ACX424AKP_DSI_PRIMARY
+ if (cpu_is_u5500v2()) {
+ if (display_initialized_during_boot)
+ sony_acx424akp_display0.power_mode = \
+ MCDE_DISPLAY_PM_STANDBY;
+ ret = mcde_display_device_register(&sony_acx424akp_display0);
+ if (ret)
+ pr_warning("Failed to register sony acx424akp \
+ display device 0\n");
+ }
+#endif
+
+#ifdef CONFIG_DISPLAY_AV8100_TERTIARY
+ INIT_DELAYED_WORK_DEFERRABLE(&work_dispreg_hdmi,
+ delayed_work_dispreg_hdmi);
+
+ schedule_delayed_work(&work_dispreg_hdmi,
+ msecs_to_jiffies(DISPREG_HDMI_DELAY));
+#endif
+
+ return ret;
+}
+
+module_init(init_display_devices);
+#endif
diff --git a/arch/arm/mach-ux500/board-u5500-pins.c b/arch/arm/mach-ux500/board-u5500-pins.c
new file mode 100644
index 00000000000..4635f240815
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-pins.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
+#include <plat/pincfg.h>
+
+#include "pins-db5500.h"
+#include "pins.h"
+
+static pin_cfg_t u5500_pins_default[] = {
+ /* MSP */
+ GPIO32_MSP0_TCK | PIN_INPUT_PULLDOWN,
+ GPIO33_MSP0_TFS | PIN_INPUT_PULLDOWN,
+ GPIO34_MSP0_TXD | PIN_INPUT_PULLDOWN,
+ GPIO35_MSP0_RXD | PIN_INPUT_PULLDOWN,
+ GPIO96_MSP1_TCK | PIN_INPUT_PULLDOWN,
+ GPIO97_MSP1_TFS | PIN_INPUT_PULLDOWN,
+ GPIO98_MSP1_TXD | PIN_INPUT_PULLDOWN,
+ GPIO99_MSP1_RXD | PIN_INPUT_PULLDOWN,
+ GPIO220_MSP2_TCK | PIN_OUTPUT_LOW,
+ GPIO221_MSP2_TFS | PIN_OUTPUT_LOW,
+ GPIO222_MSP2_TXD | PIN_OUTPUT_LOW,
+
+ /* DISPLAY_ENABLE */
+ GPIO226_GPIO | PIN_OUTPUT_LOW,
+
+ /* Backlight Enable */
+ GPIO224_GPIO | PIN_OUTPUT_HIGH,
+
+ /* UART0 */
+ GPIO28_U0_TXD | PIN_OUTPUT_HIGH,
+ GPIO29_U0_RXD | PIN_INPUT_PULLUP,
+
+ /* UART3 */
+ GPIO165_U3_RXD | PIN_INPUT_PULLUP,
+ GPIO166_U3_TXD | PIN_OUTPUT_HIGH,
+ GPIO167_U3_RTSn | PIN_OUTPUT_HIGH,
+ GPIO168_U3_CTSn | PIN_INPUT_PULLUP,
+
+ /* AB5500 */
+ GPIO78_IRQn | PIN_SLPM_NOCHANGE,
+ GPIO100_I2C0_SCL | PIN_INPUT_PULLUP | PIN_SLPM_NOCHANGE,
+ GPIO101_I2C0_SDA | PIN_SLPM_NOCHANGE,
+
+ /* TOUCH_IRQ */
+ GPIO179_GPIO | PIN_INPUT_PULLUP,
+
+ /* SD-CARD detect/levelshifter pins */
+ GPIO180_GPIO | PIN_INPUT_PULLUP,
+ GPIO227_GPIO,
+ GPIO185_GPIO,
+
+ /* Display & HDMI HW sync */
+ GPIO204_LCD_VSI1 | PIN_INPUT_PULLUP,
+
+ /* Camera & MMIO XshutDown*/
+ GPIO1_GPIO | PIN_OUTPUT_LOW,
+ GPIO2_GPIO | PIN_OUTPUT_LOW,
+
+ /* USB chip select */
+ GPIO76_GPIO | PIN_OUTPUT_LOW,
+
+ GPIO202_ACCU0_RXD | PIN_INPUT_PULLUP | PIN_SLPM_NOCHANGE,
+ GPIO203_ACCU0_TXD | PIN_OUTPUT_HIGH | PIN_SLPM_NOCHANGE,
+
+ /* Board Id Identification B5500 or S5500 */
+ GPIO0_GPIO | PIN_INPUT_PULLUP,
+
+};
+
+static UX500_PINS(db5500_kp_pins,
+ /* Keypad */
+ GPIO128_KP_I0 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO130_KP_I1 | PIN_INPUT_PULLUP | PIN_SLPM_INPUT_PULLUP,
+ GPIO132_KP_I2 | PIN_INPUT_PULLUP | PIN_SLPM_INPUT_PULLUP,
+ GPIO134_KP_I3 | PIN_INPUT_PULLUP | PIN_SLPM_INPUT_PULLUP,
+ GPIO137_KP_O4 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO139_KP_O5 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+);
+
+static UX500_PINS(db5500_pins_sdi0,
+ /* SDI0 (eMMC) */
+ GPIO5_MC0_DAT0 | PIN_INPUT_PULLUP,
+ GPIO6_MC0_DAT1 | PIN_INPUT_PULLUP,
+ GPIO7_MC0_DAT2 | PIN_INPUT_PULLUP,
+ GPIO8_MC0_DAT3 | PIN_INPUT_PULLUP,
+ GPIO9_MC0_DAT4 | PIN_INPUT_PULLUP,
+ GPIO10_MC0_DAT5 | PIN_INPUT_PULLUP,
+ GPIO11_MC0_DAT6 | PIN_INPUT_PULLUP,
+ GPIO12_MC0_DAT7 | PIN_INPUT_PULLUP,
+ GPIO13_MC0_CMD | PIN_INPUT_PULLUP,
+ GPIO14_MC0_CLK | PIN_OUTPUT_LOW,
+);
+
+static UX500_PINS(db5500_pins_sdi1,
+ /* SDI1 (SD-CARD) */
+ GPIO191_MC1_DAT0 | PIN_INPUT_PULLUP,
+ GPIO192_MC1_DAT1 | PIN_INPUT_PULLUP,
+ GPIO193_MC1_DAT2 | PIN_INPUT_PULLUP,
+ GPIO194_MC1_DAT3 | PIN_INPUT_PULLUP,
+ GPIO195_MC1_CLK | PIN_OUTPUT_LOW,
+ GPIO196_MC1_CMD | PIN_INPUT_PULLUP,
+ GPIO197_MC1_CMDDIR | PIN_OUTPUT_HIGH,
+ GPIO198_MC1_FBCLK | PIN_INPUT_NOPULL,
+ GPIO199_MC1_DAT0DIR | PIN_OUTPUT_HIGH,
+);
+
+static UX500_PINS(db5500_pins_sdi2,
+ /* SDI2 (eMMC) */
+ GPIO16_MC2_CMD | PIN_INPUT_PULLUP,
+ GPIO17_MC2_CLK | PIN_OUTPUT_LOW,
+ GPIO23_MC2_DAT0 | PIN_INPUT_PULLUP,
+ GPIO19_MC2_DAT1 | PIN_INPUT_PULLUP,
+ GPIO24_MC2_DAT2 | PIN_INPUT_PULLUP,
+ GPIO20_MC2_DAT3 | PIN_INPUT_PULLUP,
+ GPIO25_MC2_DAT4 | PIN_INPUT_PULLUP,
+ GPIO21_MC2_DAT5 | PIN_INPUT_PULLUP,
+ GPIO26_MC2_DAT6 | PIN_INPUT_PULLUP,
+ GPIO22_MC2_DAT7 | PIN_INPUT_PULLUP
+);
+
+static UX500_PINS(db5500_pins_sdi3,
+ /* SDI3 (SDIO) */
+ GPIO171_MC3_DAT0 | PIN_INPUT_PULLUP | PIN_LOWEMI_ENABLED,
+ GPIO172_MC3_DAT1 | PIN_INPUT_PULLUP | PIN_LOWEMI_ENABLED,
+ GPIO173_MC3_DAT2 | PIN_INPUT_PULLUP | PIN_LOWEMI_ENABLED,
+ GPIO174_MC3_DAT3 | PIN_INPUT_PULLUP | PIN_LOWEMI_ENABLED,
+ GPIO175_MC3_CMD | PIN_INPUT_PULLUP | PIN_LOWEMI_ENABLED,
+ GPIO176_MC3_CLK | PIN_OUTPUT_LOW,
+);
+
+static UX500_PINS(u5500_pins_i2c1,
+ GPIO3_I2C1_SCL,
+ GPIO4_I2C1_SDA,
+);
+
+static UX500_PINS(u5500_pins_i2c2,
+ GPIO218_I2C2_SCL,
+ GPIO219_I2C2_SDA,
+);
+
+static UX500_PINS(u5500_pins_i2c3,
+ GPIO177_I2C3_SCL,
+ GPIO178_I2C3_SDA,
+);
+
+static UX500_PINS(u5500_pins_spi3,
+ GPIO186_GPIO | PIN_OUTPUT_HIGH,
+ GPIO188_SPI3_RXD | PIN_INPUT_PULLDOWN,
+ GPIO189_SPI3_TXD | PIN_OUTPUT_LOW,
+ GPIO190_SPI3_CLK | PIN_OUTPUT_LOW,
+);
+
+/* USB */
+static UX500_PINS(u5500_pins_usb,
+ GPIO74_USB_NXT,
+ GPIO72_USB_STP | PIN_OUTPUT_HIGH,
+ GPIO75_USB_XCLK,
+ GPIO73_USB_DIR,
+ GPIO71_USB_DAT7,
+ GPIO70_USB_DAT6,
+ GPIO69_USB_DAT5,
+ GPIO68_USB_DAT4,
+ GPIO67_USB_DAT3,
+ GPIO66_USB_DAT2,
+ GPIO65_USB_DAT1,
+ GPIO64_USB_DAT0,
+);
+
+static struct ux500_pin_lookup u5500_pins[] = {
+ PIN_LOOKUP("nmk-i2c.1", &u5500_pins_i2c1),
+ PIN_LOOKUP("nmk-i2c.2", &u5500_pins_i2c2),
+ PIN_LOOKUP("nmk-i2c.3", &u5500_pins_i2c3),
+ PIN_LOOKUP("spi3", &u5500_pins_spi3),
+ PIN_LOOKUP("db5500_kp", &db5500_kp_pins),
+ PIN_LOOKUP("ab5500-usb.0", &u5500_pins_usb),
+ PIN_LOOKUP("sdi0", &db5500_pins_sdi0),
+ PIN_LOOKUP("sdi1", &db5500_pins_sdi1),
+ PIN_LOOKUP("sdi2", &db5500_pins_sdi2),
+ PIN_LOOKUP("sdi3", &db5500_pins_sdi3),
+};
+
+void __init u5500_pins_init(void)
+{
+ nmk_config_pins(u5500_pins_default, ARRAY_SIZE(u5500_pins_default));
+ ux500_pins_add(u5500_pins, ARRAY_SIZE(u5500_pins));
+}
diff --git a/arch/arm/mach-ux500/board-u5500-regulators.c b/arch/arm/mach-ux500/board-u5500-regulators.c
new file mode 100644
index 00000000000..66e8f3593d5
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-regulators.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/ab5500.h>
+
+#include "regulator-u5500.h"
+#include "board-u5500.h"
+
+/*
+ * AB5500
+ */
+
+static struct regulator_consumer_supply ab5500_ldo_g_consumers[] = {
+ REGULATOR_SUPPLY("vmmc", "sdi1"),
+};
+
+static struct regulator_consumer_supply ab5500_ldo_h_consumers[] = {
+ REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
+ REGULATOR_SUPPLY("vdd", "1-004b"), /* Synaptics */
+ REGULATOR_SUPPLY("vin", "2-0036"), /* LM3530 */
+ REGULATOR_SUPPLY("vcpin", "spi1.0"),
+ REGULATOR_SUPPLY("v-ana", "mmio_camera"),
+};
+
+static struct regulator_consumer_supply ab5500_ldo_k_consumers[] = {
+ REGULATOR_SUPPLY("vdd", "lsm303dlh.0"),
+ REGULATOR_SUPPLY("vdd", "lsm303dlh.1"),
+ REGULATOR_SUPPLY("v-mmio-camera", "mmio_camera"),
+};
+
+static struct regulator_consumer_supply ab5500_ldo_l_consumers[] = {
+ REGULATOR_SUPPLY("vmmc", "sdi0"),
+ REGULATOR_SUPPLY("vmmc", "sdi2"),
+};
+
+static struct regulator_consumer_supply ab5500_ldo_vdigmic_consumers[] = {
+};
+
+static struct regulator_consumer_supply ab5500_ldo_sim_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.5"),
+};
+
+static struct regulator_consumer_supply ab5500_bias2_consumers[] = {
+ REGULATOR_SUPPLY("v-amic", NULL),
+};
+
+static struct regulator_init_data
+ab5500_regulator_init_data[AB5500_NUM_REGULATORS] = {
+ /* SD Card */
+ [AB5500_LDO_G] = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 2910000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .consumer_supplies = ab5500_ldo_g_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(ab5500_ldo_g_consumers),
+ },
+ /* Display */
+ [AB5500_LDO_H] = {
+ .constraints = {
+ .min_uV = 2790000,
+ .max_uV = 2790000,
+ .apply_uV = 1,
+ .boot_on = 1, /* display on during boot */
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = ab5500_ldo_h_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(ab5500_ldo_h_consumers),
+ },
+ /* Camera */
+ [AB5500_LDO_K] = {
+ .constraints = {
+ .min_uV = 2790000,
+ .max_uV = 2790000,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = ab5500_ldo_k_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(ab5500_ldo_k_consumers),
+ },
+ /* External eMMC */
+ [AB5500_LDO_L] = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 2910000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .consumer_supplies = ab5500_ldo_l_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(ab5500_ldo_l_consumers),
+ },
+ [AB5500_LDO_VDIGMIC] = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = ab5500_ldo_vdigmic_consumers,
+ .num_consumer_supplies =
+ ARRAY_SIZE(ab5500_ldo_vdigmic_consumers),
+ },
+ [AB5500_LDO_SIM] = {
+ .constraints = {
+ .boot_on = 1,
+ .always_on = 1,
+ .min_uV = 2900000,
+ .max_uV = 2900000,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = ab5500_ldo_sim_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(ab5500_ldo_sim_consumers),
+ },
+ [AB5500_BIAS2] = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = ab5500_bias2_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(ab5500_bias2_consumers),
+ },
+};
+
+struct ab5500_regulator_platform_data u5500_ab5500_regulator_data = {
+ .regulator = ab5500_regulator_init_data,
+ .num_regulator = ARRAY_SIZE(ab5500_regulator_init_data),
+};
+
+
+static void __init u5500_regulators_init_debug(void)
+{
+ const char data[] = "debug";
+ int i;
+
+ for (i = 0; i < 6; i++)
+ platform_device_register_data(NULL, "reg-virt-consumer", i,
+ data, sizeof(data));
+}
+
+static struct regulator_consumer_supply u5500_vio_consumers[] = {
+ REGULATOR_SUPPLY("gbf_1v8", "cg2900-uart.0"),
+};
+
+static struct regulator_init_data u5500_vio_init_data = {
+ .constraints.always_on = 1,
+ .consumer_supplies = u5500_vio_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(u5500_vio_consumers),
+};
+
+static struct fixed_voltage_config u5500_vio_pdata __initdata = {
+ .supply_name = "vio_1v8",
+ .microvolts = 1800000,
+ .init_data = &u5500_vio_init_data,
+ .gpio = -EINVAL,
+};
+
+void __init u5500_regulators_init(void)
+{
+ u5500_regulators_init_debug();
+
+ platform_device_register_data(NULL, "reg-fixed-voltage", -1,
+ &u5500_vio_pdata,
+ sizeof(u5500_vio_pdata));
+
+ regulator_has_full_constraints();
+}
diff --git a/arch/arm/mach-ux500/board-u5500-sdi.c b/arch/arm/mach-ux500/board-u5500-sdi.c
index 63c3f8058ff..025f8bc8fa4 100644
--- a/arch/arm/mach-ux500/board-u5500-sdi.c
+++ b/arch/arm/mach-ux500/board-u5500-sdi.c
@@ -5,34 +5,30 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/amba/bus.h>
#include <linux/amba/mmci.h>
#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
-#include <plat/pincfg.h>
-#include <plat/gpio-nomadik.h>
-#include <mach/db5500-regs.h>
+#include <asm/mach-types.h>
#include <plat/ste_dma40.h>
+#include <mach/devices.h>
+#include <mach/hardware.h>
+#include <mach/ste-dma40-db5500.h>
-#include "pins-db5500.h"
#include "devices-db5500.h"
-#include "ste-dma40-db5500.h"
-
-static pin_cfg_t u5500_sdi_pins[] = {
- /* SDI0 (POP eMMC) */
- GPIO5_MC0_DAT0 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO6_MC0_DAT1 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO7_MC0_DAT2 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO8_MC0_DAT3 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO9_MC0_DAT4 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO10_MC0_DAT5 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO11_MC0_DAT6 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO12_MC0_DAT7 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO13_MC0_CMD | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO14_MC0_CLK | PIN_DIR_OUTPUT | PIN_VAL_LOW,
-};
+#include "board-u5500.h"
+
+#undef CONFIG_STE_DMA40 /* Temporarily disable DMA */
+/*
+ * SDI 0 (eMMC)
+ */
#ifdef CONFIG_STE_DMA40
-struct stedma40_chan_cfg u5500_sdi0_dma_cfg_rx = {
+static struct stedma40_chan_cfg sdi0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
.src_dev_type = DB5500_DMA_DEV24_SDMMC0_RX,
@@ -41,7 +37,7 @@ struct stedma40_chan_cfg u5500_sdi0_dma_cfg_rx = {
.dst_info.data_width = STEDMA40_WORD_WIDTH,
};
-static struct stedma40_chan_cfg u5500_sdi0_dma_cfg_tx = {
+static struct stedma40_chan_cfg sdi0_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_PERIPH,
.src_dev_type = STEDMA40_DEV_SRC_MEMORY,
@@ -61,14 +57,205 @@ static struct mmci_platform_data u5500_sdi0_data = {
.gpio_wp = -1,
#ifdef CONFIG_STE_DMA40
.dma_filter = stedma40_filter,
- .dma_rx_param = &u5500_sdi0_dma_cfg_rx,
- .dma_tx_param = &u5500_sdi0_dma_cfg_tx,
+ .dma_rx_param = &sdi0_dma_cfg_rx,
+ .dma_tx_param = &sdi0_dma_cfg_tx,
+#endif
+};
+
+/*
+ * SDI 1 (MicroSD slot)
+ */
+
+static int u5500_sdi1_ios_handler(struct device *dev, struct mmc_ios *ios)
+{
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ case MMC_POWER_ON:
+ /*
+ * Level shifter voltage should depend on vdd to when deciding
+ * on either 1.8V or 2.9V. Once the decision has been made the
+ * level shifter must be disabled and re-enabled with a changed
+ * select signal in order to switch the voltage. Since there is
+ * no framework support yet for indicating 1.8V in vdd, use the
+ * default 2.9V.
+ */
+ gpio_set_value_cansleep(GPIO_MMC_CARD_CTRL, 1);
+ udelay(100);
+ break;
+ case MMC_POWER_OFF:
+ gpio_set_value_cansleep(GPIO_MMC_CARD_CTRL, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static struct stedma40_chan_cfg sdi1_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB5500_DMA_DEV25_SDMMC1_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct stedma40_chan_cfg sdi1_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB5500_DMA_DEV25_SDMMC1_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct mmci_platform_data u5500_sdi1_data = {
+ .ios_handler = u5500_sdi1_ios_handler,
+ .ocr_mask = MMC_VDD_29_30,
+ .f_max = 50000000,
+ .capabilities = MMC_CAP_4_BIT_DATA |
+ MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_MMC_HIGHSPEED,
+ .gpio_cd = GPIO_SDMMC_CD,
+ .gpio_wp = -1,
+ .cd_invert = true,
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &sdi1_dma_cfg_rx,
+ .dma_tx_param = &sdi1_dma_cfg_tx,
+#endif
+};
+
+/*
+ * SDI2 (EMMC2)
+ */
+
+static struct stedma40_chan_cfg sdi2_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB5500_DMA_DEV26_SDMMC2_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct stedma40_chan_cfg sdi2_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB5500_DMA_DEV26_SDMMC2_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct mmci_platform_data u5500_sdi2_data = {
+ .ocr_mask = MMC_VDD_165_195,
+ .f_max = 50000000,
+ .capabilities = MMC_CAP_4_BIT_DATA |
+ MMC_CAP_8_BIT_DATA |
+ MMC_CAP_MMC_HIGHSPEED,
+ .gpio_cd = -1,
+ .gpio_wp = -1,
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &sdi2_dma_cfg_rx,
+ .dma_tx_param = &sdi2_dma_cfg_tx,
#endif
};
+/*
+ * SDI 3 (SDIO WLAN)
+ */
+#ifdef SDIO_DMA_ON
+#ifdef CONFIG_STE_DMA40
+static struct stedma40_chan_cfg sdi3_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB5500_DMA_DEV27_SDMMC3_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct stedma40_chan_cfg sdi3_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB5500_DMA_DEV27_SDMMC3_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+#endif
+#endif
+
+static struct mmci_platform_data u5500_sdi3_data = {
+ .ocr_mask = MMC_VDD_29_30,
+ .f_max = 50000000,
+ .capabilities = MMC_CAP_4_BIT_DATA,
+ .gpio_cd = -1,
+ .gpio_wp = -1,
+#ifdef SDIO_DMA_ON
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &sdi3_dma_cfg_rx,
+ .dma_tx_param = &sdi3_dma_cfg_tx,
+#endif
+#endif
+};
+
+static void sdi1_configure(void)
+{
+ int pin[2];
+ int ret;
+
+ /* Level-shifter GPIOs */
+ pin[0] = GPIO_MMC_CARD_CTRL;
+ pin[1] = GPIO_MMC_CARD_VSEL;
+
+ ret = gpio_request(pin[0], "MMC_CARD_CTRL");
+ if (!ret)
+ ret = gpio_request(pin[1], "MMC_CARD_VSEL");
+
+ if (ret) {
+ pr_warning("unable to config sdi0 gpios for level shifter.\n");
+ return;
+ }
+ /* Select the default 2.9V and eanble level shifter */
+ gpio_direction_output(pin[0], 1);
+ gpio_direction_output(pin[1], 0);
+}
+
+#define SDI_PID_V1 0x00480180
+#define SDI_PID_V2 0x10480180
+#define BACKUPRAM_ROM_DEBUG_ADDR 0xFFC
+#define MMC_BLOCK_ID 0x20
void __init u5500_sdi_init(void)
{
- nmk_config_pins(u5500_sdi_pins, ARRAY_SIZE(u5500_sdi_pins));
+ u32 periphid = 0;
+ int mmc_blk = 0;
+
+ if (cpu_is_u5500v1())
+ periphid = SDI_PID_V1;
+ else
+ periphid = SDI_PID_V2;
+
+ if (cpu_is_u5500v2())
+ /*
+ * Fix me in 5500 v2.1
+ * Dynamic detection of booting device by reading
+ * ROM debug register from BACKUP RAM and register the
+ * corresponding EMMC.
+ * This is done due to wrong configuration of MMC0 clock
+ * in ROM code for u5500 v2.
+ */
+ mmc_blk = readl(__io_address(U5500_BACKUPRAM1_BASE) +
+ BACKUPRAM_ROM_DEBUG_ADDR);
+
+ if (mmc_blk & MMC_BLOCK_ID)
+ db5500_add_sdi2(&u5500_sdi2_data, periphid);
+ else
+ db5500_add_sdi0(&u5500_sdi0_data, periphid);
- db5500_add_sdi0(&u5500_sdi0_data);
+ sdi1_configure();
+ db5500_add_sdi1(&u5500_sdi1_data, periphid);
+ db5500_add_sdi3(&u5500_sdi3_data, periphid);
}
diff --git a/arch/arm/mach-ux500/board-u5500.c b/arch/arm/mach-ux500/board-u5500.c
index 82025ba70c0..26a26d09d72 100644
--- a/arch/arm/mach-ux500/board-u5500.c
+++ b/arch/arm/mach-ux500/board-u5500.c
@@ -1,7 +1,6 @@
/*
* Copyright (C) ST-Ericsson SA 2010
*
- * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
*/
@@ -9,44 +8,160 @@
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/irq.h>
+#include <linux/gpio/nomadik.h>
#include <linux/i2c.h>
#include <linux/mfd/ab5500/ab5500.h>
+#include <linux/amba/pl022.h>
+#ifdef CONFIG_STM_I2S
+#include <linux/i2s/i2s.h>
+#endif
+#include <linux/led-lm3530.h>
+#include <../drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h>
+#include <linux/input/matrix_keypad.h>
+#ifdef CONFIG_SENSORS_LSM303DLH
+#include <linux/lsm303dlh.h>
+#endif
+#include <linux/leds-ab5500.h>
+#ifdef CONFIG_TOUCHSCREEN_CYTTSP_SPI
+#include <linux/cyttsp.h>
+#endif
+
+#ifdef CONFIG_AV8100
+#include <video/av8100.h>
+#endif
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
#include <plat/pincfg.h>
#include <plat/i2c.h>
-#include <plat/gpio-nomadik.h>
#include <mach/hardware.h>
+#include <mach/ste-dma40-db5500.h>
+#ifdef CONFIG_STM_I2S
+#include <mach/msp.h>
+#endif
#include <mach/devices.h>
#include <mach/setup.h>
+#include <mach/db5500-keypad.h>
+#include <mach/crypto-ux500.h>
+#include <mach/abx500-accdet.h>
+#include <mach/usb.h>
#include "pins-db5500.h"
+#include "pins.h"
#include "devices-db5500.h"
+#include "board-u5500.h"
+#include "board-u5500-bm.h"
+#include "board-u5500-wlan.h"
#include <linux/led-lm3530.h>
+#include "board-ux500-usb.h"
+#ifdef CONFIG_SENSORS_LSM303DLH
/*
- * GPIO
+ * LSM303DLH
*/
-static pin_cfg_t u5500_pins[] = {
- /* I2C */
- GPIO218_I2C2_SCL | PIN_INPUT_PULLUP,
- GPIO219_I2C2_SDA | PIN_INPUT_PULLUP,
+static struct lsm303dlh_platform_data __initdata lsm303dlh_pdata = {
+ .name_a = "lsm303dlh.0",
+ .name_m = "lsm303dlh.1",
+ .axis_map_x = 1,
+ .axis_map_y = 0,
+ .axis_map_z = 2,
+ .negative_x = 0,
+ .negative_y = 0,
+ .negative_z = 1,
+};
+#endif
- /* DISPLAY_ENABLE */
- GPIO226_GPIO | PIN_OUTPUT_LOW,
+/*
+ * Touchscreen
+ */
+static struct synaptics_rmi4_platform_data rmi4_i2c_platformdata = {
+ .irq_number = NOMADIK_GPIO_TO_IRQ(179),
+ .irq_type = (IRQF_TRIGGER_FALLING | IRQF_SHARED),
+#if defined(CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE) && \
+ CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE == 270
+ .x_flip = true,
+ .y_flip = false,
+#else
+ .x_flip = false,
+ .y_flip = true,
+#endif
+ .regulator_en = true,
+};
- /* Backlight Enbale */
- GPIO224_GPIO | PIN_OUTPUT_HIGH,
+static struct av8100_platform_data av8100_plat_data = {
+ .irq = NOMADIK_GPIO_TO_IRQ(223),
+ .reset = 225,
+ .alt_powerupseq = true,
+ .mclk_freq = 1, /* MCLK_RNG_22_27 */
};
+
+/*
+ * leds LM3530
+ */
+static struct lm3530_platform_data u5500_als_platform_data = {
+ .mode = LM3530_BL_MODE_MANUAL,
+ .als_input_mode = LM3530_INPUT_ALS1,
+ .max_current = LM3530_FS_CURR_26mA,
+ .pwm_pol_hi = true,
+ .als_avrg_time = LM3530_ALS_AVRG_TIME_512ms,
+ .brt_ramp_law = 1, /* Linear */
+ .brt_ramp_fall = LM3530_RAMP_TIME_8s,
+ .brt_ramp_rise = LM3530_RAMP_TIME_8s,
+ .als1_resistor_sel = LM3530_ALS_IMPD_13_53kOhm,
+ .als2_resistor_sel = LM3530_ALS_IMPD_Z,
+ .als_vmin = 730, /* mV */
+ .als_vmax = 1020, /* mV */
+ .brt_val = 0x7F, /* Max brightness */
+};
+
+
+/* leds-ab5500 */
+static struct ab5500_hvleds_platform_data ab5500_hvleds_data = {
+ .hw_fade = false,
+ .leds = {
+ [0] = {
+ .name = "red",
+ .led_on = true,
+ .led_id = 0,
+ .fade_hi = 255,
+ .fade_lo = 0,
+ .max_current = 10, /* wrong value may damage h/w */
+ },
+ [1] = {
+ .name = "green",
+ .led_on = true,
+ .led_id = 1,
+ .fade_hi = 255,
+ .fade_lo = 0,
+ .max_current = 10, /* wrong value may damage h/w */
+ },
+ [2] {
+ .name = "blue",
+ .led_on = true,
+ .led_id = 2,
+ .fade_hi = 255,
+ .fade_lo = 0,
+ .max_current = 10, /* wrong value may damage h/w */
+ },
+ },
+};
+
+static struct ab5500_ponkey_platform_data ab5500_ponkey_data = {
+ /*
+ * Shutdown time in secs. Can be set
+ * to 10sec, 5sec and 0sec(disabled)
+ */
+ .shutdown_secs = 10,
+};
+
/*
* I2C
*/
-#define U5500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, _sm) \
+#define U5500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, t_out, _sm) \
static struct nmk_i2c_controller u5500_i2c##id##_data = { \
/* \
* slave data setup time, which is \
@@ -61,31 +176,57 @@ static struct nmk_i2c_controller u5500_i2c##id##_data = { \
.rft = _rft, \
/* std. mode operation */ \
.clk_freq = clk, \
+ /* Slave response timeout(ms) */\
+ .timeout = t_out, \
.sm = _sm, \
}
+
/*
- * The board uses TODO <3> i2c controllers, initialize all of
+ * The board uses 3 i2c controllers, initialize all of
* them with slave data setup time of 250 ns,
* Tx & Rx FIFO threshold values as 1 and standard
* mode of operation
*/
-U5500_I2C_CONTROLLER(2, 0xe, 1, 1, 400000, I2C_FREQ_MODE_FAST);
+U5500_I2C_CONTROLLER(1, 0xe, 1, 10, 400000, 200, I2C_FREQ_MODE_FAST);
+U5500_I2C_CONTROLLER(2, 0xe, 1, 10, 400000, 200, I2C_FREQ_MODE_FAST);
+U5500_I2C_CONTROLLER(3, 0xe, 1, 10, 400000, 200, I2C_FREQ_MODE_FAST);
-static struct lm3530_platform_data u5500_als_platform_data = {
- .mode = LM3530_BL_MODE_MANUAL,
- .als_input_mode = LM3530_INPUT_ALS1,
- .max_current = LM3530_FS_CURR_26mA,
- .pwm_pol_hi = true,
- .als_avrg_time = LM3530_ALS_AVRG_TIME_512ms,
- .brt_ramp_law = 1, /* Linear */
- .brt_ramp_fall = LM3530_RAMP_TIME_8s,
- .brt_ramp_rise = LM3530_RAMP_TIME_8s,
- .als1_resistor_sel = LM3530_ALS_IMPD_13_53kOhm,
- .als2_resistor_sel = LM3530_ALS_IMPD_Z,
- .als_vmin = 730, /* mV */
- .als_vmax = 1020, /* mV */
- .brt_val = 0x7F, /* Max brightness */
+static struct i2c_board_info __initdata u5500_i2c1_devices[] = {
+ {
+ I2C_BOARD_INFO("synaptics_rmi4_i2c", 0x4B),
+ .platform_data = &rmi4_i2c_platformdata,
+ },
+};
+
+static struct i2c_board_info __initdata u5500v1_i2c2_sensor_devices[] = {
+#ifdef CONFIG_SENSORS_LSM303DLH
+ {
+ /* LSM303DLH Accelerometer */
+ I2C_BOARD_INFO("lsm303dlh_a", 0x19),
+ .platform_data = &lsm303dlh_pdata,
+ },
+ {
+ /* LSM303DLH Magnetometer */
+ I2C_BOARD_INFO("lsm303dlh_m", 0x1E),
+ .platform_data = &lsm303dlh_pdata,
+ },
+#endif
+};
+
+static struct i2c_board_info __initdata u5500v2_i2c2_sensor_devices[] = {
+#ifdef CONFIG_SENSORS_LSM303DLH
+ {
+ /* LSM303DLHC Accelerometer */
+ I2C_BOARD_INFO("lsm303dlhc_a", 0x19),
+ .platform_data = &lsm303dlh_pdata,
+ },
+ {
+ /* LSM303DLH Magnetometer */
+ I2C_BOARD_INFO("lsm303dlh_m", 0x1E),
+ .platform_data = &lsm303dlh_pdata,
+ },
+#endif
};
static struct i2c_board_info __initdata u5500_i2c2_devices[] = {
@@ -94,52 +235,429 @@ static struct i2c_board_info __initdata u5500_i2c2_devices[] = {
I2C_BOARD_INFO("lm3530-led", 0x36),
.platform_data = &u5500_als_platform_data,
},
+ {
+ I2C_BOARD_INFO("av8100", 0x70),
+ .platform_data = &av8100_plat_data,
+ },
};
-static void __init u5500_i2c_init(void)
+/*
+ * Keypad
+ */
+
+#define ROW_PIN_I0 128
+#define ROW_PIN_I1 130
+#define ROW_PIN_I2 132
+#define ROW_PIN_I3 134
+#define COL_PIN_O4 137
+#define COL_PIN_O5 139
+
+static int db5500_kp_rows[] = {
+ ROW_PIN_I0, ROW_PIN_I1, ROW_PIN_I2, ROW_PIN_I3,
+};
+
+static int db5500_kp_cols[] = {
+ COL_PIN_O4, COL_PIN_O5,
+};
+
+static bool db5500_config;
+static int db5500_set_gpio_row(int gpio)
{
- db5500_add_i2c2(&u5500_i2c2_data);
- i2c_register_board_info(2, ARRAY_AND_SIZE(u5500_i2c2_devices));
+ int ret = -1;
+
+
+ if (!db5500_config) {
+ ret = gpio_request(gpio, "db5500_kpd");
+ if (ret < 0) {
+ pr_err("db5500_set_gpio_row: gpio request failed\n");
+ return ret;
+ }
+ }
+
+ ret = gpio_direction_output(gpio, 1);
+ if (ret < 0) {
+ pr_err("db5500_set_gpio_row: gpio direction failed\n");
+ gpio_free(gpio);
+ }
+
+ return ret;
+}
+
+static int db5500_kp_init(void)
+{
+ struct ux500_pins *pins;
+ int ret, i;
+
+ pins = ux500_pins_get("db5500_kp");
+ if (pins)
+ ux500_pins_enable(pins);
+
+ for (i = 0; i < ARRAY_SIZE(db5500_kp_rows); i++) {
+ ret = db5500_set_gpio_row(db5500_kp_rows[i]);
+ if (ret < 0) {
+ pr_err("db5500_kp_init: failed init\n");
+ return ret;
+ }
+ }
+
+ if (!db5500_config)
+ db5500_config = true;
+
+ return 0;
+}
+
+static int db5500_kp_exit(void)
+{
+ struct ux500_pins *pins;
+
+ pins = ux500_pins_get("db5500_kp");
+ if (pins)
+ ux500_pins_disable(pins);
+
+ return 0;
+}
+
+static const unsigned int u5500_keymap[] = {
+ KEY(4, 0, KEY_CAMERA), /* Camera2 */
+ KEY(4, 1, KEY_CAMERA_FOCUS), /* Camera1 */
+ KEY(4, 2, KEY_MENU),
+ KEY(4, 3, KEY_BACK),
+ KEY(5, 2, KEY_SEND),
+ KEY(5, 3, KEY_HOME),
+#ifndef CONFIG_INPUT_AB8500_PONKEY
+ /* AB5500 ONSWa is also hooked up to this key */
+ KEY(8, 0, KEY_END),
+#endif
+ KEY(8, 1, KEY_VOLUMEUP),
+ KEY(8, 2, KEY_VOLUMEDOWN),
+};
+
+static struct matrix_keymap_data u5500_keymap_data = {
+ .keymap = u5500_keymap,
+ .keymap_size = ARRAY_SIZE(u5500_keymap),
+};
+
+static struct db5500_keypad_platform_data u5500_keypad_board = {
+ .init = db5500_kp_init,
+ .exit = db5500_kp_exit,
+ .gpio_input_pins = db5500_kp_rows,
+ .gpio_output_pins = db5500_kp_cols,
+ .keymap_data = &u5500_keymap_data,
+ .no_autorepeat = true,
+ .krow = ARRAY_SIZE(db5500_kp_rows),
+ .kcol = ARRAY_SIZE(db5500_kp_cols),
+ .debounce_ms = 40, /* milliseconds */
+ .switch_delay = 200, /* in jiffies */
+};
+
+#ifdef CONFIG_STM_I2S
+/*
+ * MSP
+ */
+
+#define MSP_DMA(num, eventline) \
+static struct stedma40_chan_cfg msp##num##_dma_rx = { \
+ .high_priority = true, \
+ .dir = STEDMA40_PERIPH_TO_MEM, \
+ .src_dev_type = eventline##_RX, \
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY, \
+ .src_info.psize = STEDMA40_PSIZE_LOG_4, \
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4, \
+}; \
+ \
+static struct stedma40_chan_cfg msp##num##_dma_tx = { \
+ .high_priority = true, \
+ .dir = STEDMA40_MEM_TO_PERIPH, \
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY, \
+ .dst_dev_type = eventline##_TX, \
+ .src_info.psize = STEDMA40_PSIZE_LOG_4, \
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4, \
}
+MSP_DMA(0, DB5500_DMA_DEV9_MSP0);
+MSP_DMA(1, DB5500_DMA_DEV10_MSP1);
+MSP_DMA(2, DB5500_DMA_DEV11_MSP2);
+
+static struct msp_i2s_platform_data u5500_msp0_data = {
+ .id = MSP_0_I2S_CONTROLLER,
+ .msp_i2s_dma_rx = &msp0_dma_rx,
+ .msp_i2s_dma_tx = &msp0_dma_tx,
+};
+
+static struct msp_i2s_platform_data u5500_msp1_data = {
+ .id = MSP_1_I2S_CONTROLLER,
+ .msp_i2s_dma_rx = &msp1_dma_rx,
+ .msp_i2s_dma_tx = &msp1_dma_tx,
+};
+
+static struct msp_i2s_platform_data u5500_msp2_data = {
+ .id = MSP_2_I2S_CONTROLLER,
+ .msp_i2s_dma_rx = &msp2_dma_rx,
+ .msp_i2s_dma_tx = &msp2_dma_tx,
+};
+
+static struct i2s_board_info stm_i2s_board_info[] __initdata = {
+ {
+ .modalias = "i2s_device.0",
+ .id = 0,
+ .chip_select = 0,
+ },
+ {
+ .modalias = "i2s_device.1",
+ .id = 1,
+ .chip_select = 1,
+ },
+ {
+ .modalias = "i2s_device.2",
+ .id = 2,
+ .chip_select = 2,
+ },
+};
+
+static void __init u5500_msp_init(void)
+{
+ db5500_add_msp0_i2s(&u5500_msp0_data);
+ db5500_add_msp1_i2s(&u5500_msp1_data);
+ db5500_add_msp2_i2s(&u5500_msp2_data);
+
+ i2s_register_board_info(ARRAY_AND_SIZE(stm_i2s_board_info));
+}
+#else
+static void __init u5500_msp_init(void)
+{
+}
+#endif
+
+/*
+ * SPI
+ */
+
+static struct pl022_ssp_controller u5500_spi3_data = {
+ .bus_id = 1,
+ .num_chipselect = 4, /* 3 possible CS lines + 1 for tests */
+};
+
+static void __init u5500_spi_init(void)
+{
+ db5500_add_spi3(&u5500_spi3_data);
+}
+
+static struct resource ab5500_resources[] = {
+ [0] = {
+ .start = IRQ_DB5500_PRCMU_ABB,
+ .end = IRQ_DB5500_PRCMU_ABB,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+
+#ifdef CONFIG_INPUT_AB5500_ACCDET
+static struct abx500_accdet_platform_data ab5500_accdet_pdata = {
+ .btn_keycode = KEY_MEDIA,
+ .accdet1_dbth = ACCDET1_TH_300mV | ACCDET1_DB_10ms,
+ .accdet2122_th = ACCDET21_TH_300mV | ACCDET22_TH_300mV,
+ .is_detection_inverted = false,
+ };
+#endif
+
static struct ab5500_platform_data ab5500_plf_data = {
.irq = {
- .base = 0,
- .count = 0,
+ .base = IRQ_AB5500_BASE,
+ .count = AB5500_NR_IRQS,
},
- .init_settings = NULL,
- .init_settings_sz = 0,
- .pm_power_off = false,
+ .pm_power_off = true,
+ .regulator = &u5500_ab5500_regulator_data,
+#ifdef CONFIG_INPUT_AB5500_ACCDET
+ .dev_data[AB5500_DEVID_ACCDET] = &ab5500_accdet_pdata,
+ .dev_data_sz[AB5500_DEVID_ACCDET] = sizeof(ab5500_accdet_pdata),
+#endif
+ .dev_data[AB5500_DEVID_LEDS] = &ab5500_hvleds_data,
+ .dev_data_sz[AB5500_DEVID_LEDS] = sizeof(ab5500_hvleds_data),
+ .init_settings = (struct abx500_init_settings[]){
+ {
+ .bank = 0x3,
+ .reg = 0x17,
+ .setting = 0x0F,
+ },
+ {
+ .bank = 0x3,
+ .reg = 0x18,
+ .setting = 0x10,
+ },
+ },
+ .init_settings_sz = 2,
+#if defined(CONFIG_AB5500_BM)
+ .dev_data[AB5500_DEVID_CHARGALG] = &abx500_bm_pt_data,
+ .dev_data_sz[AB5500_DEVID_CHARGALG] = sizeof(abx500_bm_pt_data),
+ .dev_data[AB5500_DEVID_CHARGER] = &abx500_bm_pt_data,
+ .dev_data_sz[AB5500_DEVID_CHARGER] = sizeof(abx500_bm_pt_data),
+ .dev_data[AB5500_DEVID_FG] = &abx500_bm_pt_data,
+ .dev_data_sz[AB5500_DEVID_FG] = sizeof(abx500_bm_pt_data),
+ .dev_data[AB5500_DEVID_BTEMP] = &abx500_bm_pt_data,
+ .dev_data_sz[AB5500_DEVID_BTEMP] = sizeof(abx500_bm_pt_data),
+#endif
+ .dev_data[AB5500_DEVID_ONSWA] = &ab5500_ponkey_data,
+ .dev_data_sz[AB5500_DEVID_ONSWA] = sizeof(ab5500_ponkey_data),
+ .dev_data[AB5500_DEVID_USB] = &abx500_usbgpio_plat_data,
+ .dev_data_sz[AB5500_DEVID_USB] = sizeof(abx500_usbgpio_plat_data),
};
-static struct platform_device ab5500_device = {
+static struct platform_device u5500_ab5500_device = {
.name = "ab5500-core",
.id = 0,
.dev = {
.platform_data = &ab5500_plf_data,
},
+ .num_resources = 1,
+ .resource = ab5500_resources,
+};
+
+static struct platform_device u5500_mloader_device = {
+ .name = "db5500_mloader",
+ .id = -1,
.num_resources = 0,
};
+static struct cryp_platform_data u5500_cryp1_platform_data = {
+ .mem_to_engine = {
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB5500_DMA_DEV48_CRYPTO1_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .mode = STEDMA40_MODE_LOGICAL,
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
+ },
+ .engine_to_mem = {
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB5500_DMA_DEV48_CRYPTO1_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .mode = STEDMA40_MODE_LOGICAL,
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
+ }
+};
+
+static struct hash_platform_data u5500_hash1_platform_data = {
+ .mem_to_engine = {
+.dir = STEDMA40_MEM_TO_PERIPH,
+.src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+.dst_dev_type = DB5500_DMA_DEV50_HASH1_TX,
+.src_info.data_width = STEDMA40_WORD_WIDTH,
+.dst_info.data_width = STEDMA40_WORD_WIDTH,
+.mode = STEDMA40_MODE_LOGICAL,
+.src_info.psize = STEDMA40_PSIZE_LOG_16,
+.dst_info.psize = STEDMA40_PSIZE_LOG_16,
+},
+};
+
static struct platform_device *u5500_platform_devices[] __initdata = {
- &ab5500_device,
+ &u5500_ab5500_device,
+#ifdef CONFIG_FB_MCDE
+ &u5500_mcde_device,
+#endif
+ &ux500_hwmem_device,
+ &u5500_b2r2_device,
+ &u5500_mloader_device,
+#ifdef CONFIG_U5500_MMIO
+ &u5500_mmio_device,
+#endif
+ &u5500_thsens_device,
};
+/*
+ * This function check whether it is Small S5500 board
+ * GPIO0 is HIGH for S5500
+ */
+bool is_s5500_board()
+{
+ int err , val ;
+
+ err = gpio_request(GPIO_BOARD_VERSION, "Board Version");
+ if (err) {
+ pr_err("Error %d while requesting GPIO for Board Version\n",
+ err);
+ return err;
+ }
+
+ err = gpio_direction_input(GPIO_BOARD_VERSION);
+ if (err) {
+ pr_err("Error %d while setting GPIO for Board Version"
+ "output mode\n", err);
+ return err;
+ }
+
+ val = gpio_get_value(GPIO_BOARD_VERSION);
+
+ gpio_free(GPIO_BOARD_VERSION);
+
+ return (val == 1);
+}
+
+
+static void __init u5500_i2c_init(void)
+{
+ db5500_add_i2c1(&u5500_i2c1_data);
+ db5500_add_i2c2(&u5500_i2c2_data);
+ db5500_add_i2c3(&u5500_i2c3_data);
+
+ i2c_register_board_info(1, ARRAY_AND_SIZE(u5500_i2c1_devices));
+ i2c_register_board_info(2, ARRAY_AND_SIZE(u5500_i2c2_devices));
+
+ if (cpu_is_u5500v1())
+ i2c_register_board_info(2, ARRAY_AND_SIZE(u5500v1_i2c2_sensor_devices));
+
+ if (cpu_is_u5500v2()) {
+ /*
+ * In V2 display is mounted in reverse direction,
+ * so need to change the intial
+ * settings of Accelerometer and Magnetometer
+ */
+ lsm303dlh_pdata.negative_x = 1;
+ lsm303dlh_pdata.negative_y = 1;
+ i2c_register_board_info(2, ARRAY_AND_SIZE(u5500v2_i2c2_sensor_devices));
+ }
+}
+
static void __init u5500_uart_init(void)
{
db5500_add_uart0(NULL);
db5500_add_uart1(NULL);
db5500_add_uart2(NULL);
+ db5500_add_uart3(NULL);
+}
+
+static void __init u5500_cryp1_hash1_init(void)
+{
+ db5500_add_cryp1(&u5500_cryp1_platform_data);
+ db5500_add_hash1(&u5500_hash1_platform_data);
}
static void __init u5500_init_machine(void)
{
+ u5500_regulators_init();
u5500_init_devices();
- nmk_config_pins(u5500_pins, ARRAY_SIZE(u5500_pins));
+ u5500_pins_init();
+
u5500_i2c_init();
+ u5500_msp_init();
+ u5500_spi_init();
+
u5500_sdi_init();
u5500_uart_init();
+ u5500_wlan_init();
+
+ db5500_add_keypad(&u5500_keypad_board);
+ u5500_cryp1_hash1_init();
+
+#ifdef CONFIG_TOUCHSCREEN_CYTTSP_SPI
+ u5500_cyttsp_init();
+#endif
+
platform_add_devices(u5500_platform_devices,
ARRAY_SIZE(u5500_platform_devices));
}
@@ -151,3 +669,11 @@ MACHINE_START(U5500, "ST-Ericsson U5500 Platform")
.timer = &ux500_timer,
.init_machine = u5500_init_machine,
MACHINE_END
+
+MACHINE_START(B5500, "ST-Ericsson U5500 Big Board")
+ .atag_offset = 0x00000100,
+ .map_io = u5500_map_io,
+ .init_irq = ux500_init_irq,
+ .timer = &ux500_timer,
+ .init_machine = u5500_init_machine,
+MACHINE_END
diff --git a/arch/arm/mach-ux500/board-u5500.h b/arch/arm/mach-ux500/board-u5500.h
new file mode 100644
index 00000000000..85762e5efb1
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __BOARD_U5500_H
+#define __BOARD_U5500_H
+
+#define GPIO_SDMMC_CD 180
+#define GPIO_MMC_CARD_CTRL 227
+#define GPIO_MMC_CARD_VSEL 185
+#define GPIO_BOARD_VERSION 0
+#define GPIO_PRIMARY_CAM_XSHUTDOWN 1
+#define GPIO_SECONDARY_CAM_XSHUTDOWN 2
+#define GPIO_CAMERA_PMIC_EN 212
+#define CYPRESS_TOUCH_INT_PIN 179
+#define CYPRESS_TOUCH_RST_GPIO 135
+#define CYPRESS_SLAVE_SELECT_GPIO 186
+
+
+struct ab5500_regulator_platform_data;
+extern struct ab5500_regulator_platform_data u5500_ab5500_regulator_data;
+
+extern void u5500_pins_init(void);
+extern void __init u5500_regulators_init(void);
+void u5500_cyttsp_init(void);
+bool is_s5500_board();
+
+#endif
diff --git a/arch/arm/mach-ux500/board-ux500-usb.h b/arch/arm/mach-ux500/board-ux500-usb.h
new file mode 100644
index 00000000000..6b35a181c0a
--- /dev/null
+++ b/arch/arm/mach-ux500/board-ux500-usb.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Saketh Ram Bommisetti <sakethram.bommisetti@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __BOARD_UX500_USB_H
+#define __BOARD_UX500_USB_H
+
+extern struct abx500_usbgpio_platform_data abx500_usbgpio_plat_data;
+
+#endif
diff --git a/arch/arm/mach-ux500/clock-db5500.c b/arch/arm/mach-ux500/clock-db5500.c
new file mode 100644
index 00000000000..163c48d9f24
--- /dev/null
+++ b/arch/arm/mach-ux500/clock-db5500.c
@@ -0,0 +1,715 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson SA
+ * Copyright (C) 2009 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include <plat/pincfg.h>
+
+#include <mach/hardware.h>
+
+#include "clock.h"
+#include "pins-db5500.h"
+
+static DEFINE_MUTEX(sysclk_mutex);
+static DEFINE_MUTEX(pll_mutex);
+
+/* SysClk operations. */
+static int sysclk_enable(struct clk *clk)
+{
+ return prcmu_request_clock(PRCMU_SYSCLK, true);
+}
+
+static void sysclk_disable(struct clk *clk)
+{
+
+ prcmu_request_clock(PRCMU_SYSCLK, false);
+ return;
+}
+
+static struct clkops sysclk_ops = {
+ .enable = sysclk_enable,
+ .disable = sysclk_disable,
+};
+
+static int rtc_clk_enable(struct clk *clk)
+{
+ return ab5500_clock_rtc_enable(clk->cg_sel, true);
+}
+
+static void rtc_clk_disable(struct clk *clk)
+{
+ int ret = ab5500_clock_rtc_enable(clk->cg_sel, false);
+
+ if (ret)
+ pr_err("clock: %s failed to disable: %d\n", clk->name, ret);
+}
+
+static struct clkops rtc_clk_ops = {
+ .enable = rtc_clk_enable,
+ .disable = rtc_clk_disable,
+};
+
+static pin_cfg_t clkout0_pins[] = {
+ GPIO161_CLKOUT_0 | PIN_OUTPUT_LOW,
+};
+
+static pin_cfg_t clkout1_pins[] = {
+ GPIO162_CLKOUT_1 | PIN_OUTPUT_LOW,
+};
+
+static int clkout0_enable(struct clk *clk)
+{
+ return nmk_config_pins(clkout0_pins, ARRAY_SIZE(clkout0_pins));
+}
+
+static void clkout0_disable(struct clk *clk)
+{
+ int r;
+
+ r = nmk_config_pins_sleep(clkout0_pins, ARRAY_SIZE(clkout0_pins));
+ if (!r)
+ return;
+
+ pr_err("clock: failed to disable %s.\n", clk->name);
+}
+
+static int clkout1_enable(struct clk *clk)
+{
+ return nmk_config_pins(clkout1_pins, ARRAY_SIZE(clkout0_pins));
+}
+
+static void clkout1_disable(struct clk *clk)
+{
+ int r;
+
+ r = nmk_config_pins_sleep(clkout1_pins, ARRAY_SIZE(clkout1_pins));
+ if (!r)
+ return;
+
+ pr_err("clock: failed to disable %s.\n", clk->name);
+}
+
+static struct clkops clkout0_ops = {
+ .enable = clkout0_enable,
+ .disable = clkout0_disable,
+};
+
+static struct clkops clkout1_ops = {
+ .enable = clkout1_enable,
+ .disable = clkout1_disable,
+};
+
+#define DEF_PER1_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U5500_CLKRST1_BASE, _cg_bit, &per1clk)
+#define DEF_PER2_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U5500_CLKRST2_BASE, _cg_bit, &per2clk)
+#define DEF_PER3_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U5500_CLKRST3_BASE, _cg_bit, &per3clk)
+#define DEF_PER5_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U5500_CLKRST5_BASE, _cg_bit, &per5clk)
+#define DEF_PER6_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U5500_CLKRST6_BASE, _cg_bit, &per6clk)
+
+#define DEF_PER1_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U5500_CLKRST1_BASE, _cg_bit, _parent, &per1clk)
+#define DEF_PER2_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U5500_CLKRST2_BASE, _cg_bit, _parent, &per2clk)
+#define DEF_PER3_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U5500_CLKRST3_BASE, _cg_bit, _parent, &per3clk)
+#define DEF_PER5_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U5500_CLKRST5_BASE, _cg_bit, _parent, &per5clk)
+#define DEF_PER6_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U5500_CLKRST6_BASE, _cg_bit, _parent, &per6clk)
+
+/* Clock sources. */
+
+static struct clk soc0_pll = {
+ .name = "soc0_pll",
+ .ops = &prcmu_clk_ops,
+ .mutex = &pll_mutex,
+ .cg_sel = PRCMU_PLLSOC0,
+};
+
+static struct clk soc1_pll = {
+ .name = "soc1_pll",
+ .ops = &prcmu_clk_ops,
+ .mutex = &pll_mutex,
+ .cg_sel = PRCMU_PLLSOC1,
+};
+
+static struct clk ddr_pll = {
+ .name = "ddr_pll",
+ .ops = &prcmu_clk_ops,
+ .mutex = &pll_mutex,
+ .cg_sel = PRCMU_PLLDDR,
+};
+
+static struct clk sysclk = {
+ .name = "sysclk",
+ .ops = &sysclk_ops,
+ .rate = 26000000,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk rtc32k = {
+ .name = "rtc32k",
+ .rate = 32768,
+};
+
+static struct clk kbd32k = {
+ .name = "kbd32k",
+ .rate = 32768,
+};
+
+static struct clk clk_dummy = {
+ .name = "dummy",
+};
+
+static struct clk rtc_clk1 = {
+ .name = "rtc_clk1",
+ .ops = &rtc_clk_ops,
+ .cg_sel = 1,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk clkout0 = {
+ .name = "clkout0",
+ .ops = &clkout0_ops,
+ .parent = &sysclk,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk clkout1 = {
+ .name = "clkout1",
+ .ops = &clkout1_ops,
+ .parent = &sysclk,
+ .mutex = &sysclk_mutex,
+};
+
+static DEFINE_MUTEX(parented_prcmu_mutex);
+
+#define DEF_PRCMU_CLK_PARENT(_name, _cg_sel, _rate, _parent) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcmu_clk_ops, \
+ .cg_sel = _cg_sel, \
+ .rate = _rate, \
+ .parent = _parent, \
+ .mutex = &parented_prcmu_mutex, \
+ }
+
+static DEFINE_MUTEX(prcmu_client_mutex);
+
+#define DEF_PRCMU_CLIENT_CLK(_name, _cg_sel, _rate) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcmu_clk_ops, \
+ .cg_sel = _cg_sel, \
+ .rate = _rate, \
+ .mutex = &prcmu_client_mutex, \
+ }
+
+static DEF_PRCMU_CLK(dmaclk, PRCMU_DMACLK, 200000000);
+static DEF_PRCMU_CLK(b2r2clk, PRCMU_B2R2CLK, 200000000);
+static DEF_PRCMU_CLK(sgaclk, PRCMU_SGACLK, 199900000);
+static DEF_PRCMU_CLK(uartclk, PRCMU_UARTCLK, 36360000);
+static DEF_PRCMU_CLK(msp02clk, PRCMU_MSP02CLK, 13000000);
+static DEF_PRCMU_CLIENT_CLK(msp1clk, PRCMU_MSP1CLK, 26000000);
+static DEF_PRCMU_CLIENT_CLK(cdclk, PRCMU_CDCLK, 26000000);
+static DEF_PRCMU_CLK(i2cclk, PRCMU_I2CCLK, 24000000);
+static DEF_PRCMU_CLK(irdaclk, PRCMU_IRDACLK, 48000000);
+static DEF_PRCMU_CLK(irrcclk, PRCMU_IRRCCLK, 48000000);
+static DEF_PRCMU_CLK(rngclk, PRCMU_RNGCLK, 26000000);
+static DEF_PRCMU_CLK(pwmclk, PRCMU_PWMCLK, 26000000);
+static DEF_PRCMU_CLK(sdmmcclk, PRCMU_SDMMCCLK, 50000000);
+static DEF_PRCMU_CLK(spare1clk, PRCMU_SPARE1CLK, 50000000);
+static DEF_PRCMU_CLK(per1clk, PRCMU_PER1CLK, 133330000);
+static DEF_PRCMU_CLK(per2clk, PRCMU_PER2CLK, 133330000);
+static DEF_PRCMU_CLK(per3clk, PRCMU_PER3CLK, 133330000);
+static DEF_PRCMU_CLK(per5clk, PRCMU_PER5CLK, 133330000);
+static DEF_PRCMU_CLK(per6clk, PRCMU_PER6CLK, 133330000);
+static DEF_PRCMU_CLK(hdmiclk, PRCMU_HDMICLK, 26000000);
+static DEF_PRCMU_CLK(apeatclk, PRCMU_APEATCLK, 200000000);
+static DEF_PRCMU_CLK(apetraceclk, PRCMU_APETRACECLK, 266000000);
+static DEF_PRCMU_CLK(mcdeclk, PRCMU_MCDECLK, 160000000);
+static DEF_PRCMU_CLK(tvclk, PRCMU_TVCLK, 40000000);
+static DEF_PRCMU_CLK(dsialtclk, PRCMU_DSIALTCLK, 400000000);
+static DEF_PRCMU_CLK(timclk, PRCMU_TIMCLK, 3250000);
+static DEF_PRCMU_CLK_PARENT(svaclk, PRCMU_SVACLK, 156000000, &soc1_pll);
+static DEF_PRCMU_CLK(siaclk, PRCMU_SIACLK, 133330000);
+
+/* PRCC PClocks */
+
+static DEF_PER1_PCLK(0, p1_pclk0);
+static DEF_PER1_PCLK(1, p1_pclk1);
+static DEF_PER1_PCLK(2, p1_pclk2);
+static DEF_PER1_PCLK(3, p1_pclk3);
+static DEF_PER1_PCLK(4, p1_pclk4);
+static DEF_PER1_PCLK(5, p1_pclk5);
+static DEF_PER1_PCLK(6, p1_pclk6);
+
+static DEF_PER2_PCLK(0, p2_pclk0);
+static DEF_PER2_PCLK(1, p2_pclk1);
+
+static DEF_PER3_PCLK(0, p3_pclk0);
+static DEF_PER3_PCLK(1, p3_pclk1);
+static DEF_PER3_PCLK(2, p3_pclk2);
+
+static DEF_PER5_PCLK(0, p5_pclk0);
+static DEF_PER5_PCLK(1, p5_pclk1);
+static DEF_PER5_PCLK(2, p5_pclk2);
+static DEF_PER5_PCLK(3, p5_pclk3);
+static DEF_PER5_PCLK(4, p5_pclk4);
+static DEF_PER5_PCLK(5, p5_pclk5);
+static DEF_PER5_PCLK(6, p5_pclk6);
+static DEF_PER5_PCLK(7, p5_pclk7);
+static DEF_PER5_PCLK(8, p5_pclk8);
+static DEF_PER5_PCLK(9, p5_pclk9);
+static DEF_PER5_PCLK(10, p5_pclk10);
+static DEF_PER5_PCLK(11, p5_pclk11);
+static DEF_PER5_PCLK(12, p5_pclk12);
+static DEF_PER5_PCLK(13, p5_pclk13);
+static DEF_PER5_PCLK(14, p5_pclk14);
+static DEF_PER5_PCLK(15, p5_pclk15);
+
+static DEF_PER6_PCLK(0, p6_pclk0);
+static DEF_PER6_PCLK(1, p6_pclk1);
+static DEF_PER6_PCLK(2, p6_pclk2);
+static DEF_PER6_PCLK(3, p6_pclk3);
+static DEF_PER6_PCLK(4, p6_pclk4);
+static DEF_PER6_PCLK(5, p6_pclk5);
+static DEF_PER6_PCLK(6, p6_pclk6);
+static DEF_PER6_PCLK(7, p6_pclk7);
+
+/* MSP0 */
+static DEF_PER1_KCLK(0, p1_msp0_kclk, &msp02clk);
+static DEF_PER_CLK(p1_msp0_clk, &p1_pclk0, &p1_msp0_kclk);
+
+/* SDI0 */
+static DEF_PER1_KCLK(1, p1_sdi0_kclk, &spare1clk); /* &sdmmcclk on v1 */
+static DEF_PER_CLK(p1_sdi0_clk, &p1_pclk1, &p1_sdi0_kclk);
+
+/* SDI2 */
+static DEF_PER1_KCLK(2, p1_sdi2_kclk, &sdmmcclk);
+static DEF_PER_CLK(p1_sdi2_clk, &p1_pclk2, &p1_sdi2_kclk);
+
+/* UART0 */
+static DEF_PER1_KCLK(3, p1_uart0_kclk, &uartclk);
+static DEF_PER_CLK(p1_uart0_clk, &p1_pclk3, &p1_uart0_kclk);
+
+/* I2C1 */
+static DEF_PER1_KCLK(4, p1_i2c1_kclk, &i2cclk);
+static DEF_PER_CLK(p1_i2c1_clk, &p1_pclk4, &p1_i2c1_kclk);
+
+/* PWM */
+static DEF_PER3_KCLK(0, p3_pwm_kclk, &pwmclk);
+static DEF_PER_CLK(p3_pwm_clk, &p3_pclk1, &p3_pwm_kclk);
+
+/* KEYPAD */
+static DEF_PER3_KCLK(0, p3_keypad_kclk, &kbd32k);
+static DEF_PER_CLK(p3_keypad_clk, &p3_pclk0, &p3_keypad_kclk);
+
+/* MSP2 */
+static DEF_PER5_KCLK(0, p5_msp2_kclk, &msp02clk);
+static DEF_PER_CLK(p5_msp2_clk, &p5_pclk0, &p5_msp2_kclk);
+
+/* UART1 */
+static DEF_PER5_KCLK(1, p5_uart1_kclk, &uartclk);
+static DEF_PER_CLK(p5_uart1_clk, &p5_pclk1, &p5_uart1_kclk);
+
+/* UART2 */
+static DEF_PER5_KCLK(2, p5_uart2_kclk, &uartclk);
+static DEF_PER_CLK(p5_uart2_clk, &p5_pclk2, &p5_uart2_kclk);
+
+/* UART3 */
+static DEF_PER5_KCLK(3, p5_uart3_kclk, &uartclk);
+static DEF_PER_CLK(p5_uart3_clk, &p5_pclk3, &p5_uart3_kclk);
+
+/* SDI1 */
+static DEF_PER5_KCLK(4, p5_sdi1_kclk, &sdmmcclk);
+static DEF_PER_CLK(p5_sdi1_clk, &p5_pclk4, &p5_sdi1_kclk);
+
+/* SDI3 */
+static DEF_PER5_KCLK(5, p5_sdi3_kclk, &sdmmcclk);
+static DEF_PER_CLK(p5_sdi3_clk, &p5_pclk5, &p5_sdi3_kclk);
+
+/* SDI4 */
+static DEF_PER5_KCLK(6, p5_sdi4_kclk, &sdmmcclk);
+static DEF_PER_CLK(p5_sdi4_clk, &p5_pclk6, &p5_sdi4_kclk);
+
+/* I2C2 */
+static DEF_PER5_KCLK(7, p5_i2c2_kclk, &i2cclk);
+static DEF_PER_CLK(p5_i2c2_clk, &p5_pclk7, &p5_i2c2_kclk);
+
+/* I2C3 */
+static DEF_PER5_KCLK(8, p5_i2c3_kclk, &i2cclk);
+static DEF_PER_CLK(p5_i2c3_clk, &p5_pclk8, &p5_i2c3_kclk);
+
+/* IRRC */
+static DEF_PER5_KCLK(9, p5_irrc_kclk, &irrcclk);
+static DEF_PER_CLK(p5_irrc_clk, &p5_pclk9, &p5_irrc_kclk);
+
+/* IRDA */
+static DEF_PER5_KCLK(10, p5_irda_kclk, &irdaclk);
+static DEF_PER_CLK(p5_irda_clk, &p5_pclk10, &p5_irda_kclk);
+
+/* RNG */
+static DEF_PER6_KCLK(0, p6_rng_kclk, &rngclk);
+static DEF_PER_CLK(p6_rng_clk, &p6_pclk0, &p6_rng_kclk);
+
+/* MTU:S */
+
+/* MTU0 */
+static DEF_PER_CLK(p6_mtu0_clk, &p6_pclk6, &timclk);
+
+/* MTU1 */
+static DEF_PER_CLK(p6_mtu1_clk, &p6_pclk7, &timclk);
+
+static struct clk *db5500_dbg_clks[] __initdata = {
+ /* Clock sources */
+ &soc0_pll,
+ &soc1_pll,
+ &ddr_pll,
+ &sysclk,
+ &rtc32k,
+
+ /* PRCMU clocks */
+ &sgaclk,
+ &siaclk,
+ &svaclk,
+ &uartclk,
+ &msp02clk,
+ &msp1clk,
+ &cdclk,
+ &i2cclk,
+ &irdaclk,
+ &irrcclk,
+ &sdmmcclk,
+ &spare1clk,
+ &per1clk,
+ &per2clk,
+ &per3clk,
+ &per5clk,
+ &per6clk,
+ &hdmiclk,
+ &apeatclk,
+ &apetraceclk,
+ &mcdeclk,
+ &dsialtclk,
+ &dmaclk,
+ &b2r2clk,
+ &tvclk,
+ &rngclk,
+ &pwmclk,
+
+ /* PRCC clocks */
+ &p1_pclk0,
+ &p1_pclk1,
+ &p1_pclk2,
+ &p1_pclk3,
+ &p1_pclk4,
+ &p1_pclk5,
+ &p1_pclk6,
+
+ &p2_pclk0,
+ &p2_pclk1,
+
+ &p3_pclk0,
+ &p3_pclk1,
+ &p3_pclk2,
+
+ &p5_pclk0,
+ &p5_pclk1,
+ &p5_pclk2,
+ &p5_pclk3,
+ &p5_pclk4,
+ &p5_pclk5,
+ &p5_pclk6,
+ &p5_pclk7,
+ &p5_pclk8,
+ &p5_pclk9,
+ &p5_pclk10,
+ &p5_pclk11,
+ &p5_pclk12,
+ &p5_pclk13,
+ &p5_pclk14,
+ &p5_pclk15,
+
+ &p6_pclk0,
+ &p6_pclk1,
+ &p6_pclk2,
+ &p6_pclk3,
+ &p6_pclk4,
+ &p6_pclk5,
+ &p6_pclk6,
+ &p6_pclk7,
+
+ /* Clock sources */
+ &clkout0,
+ &clkout1,
+ &rtc_clk1,
+};
+
+static struct clk_lookup u8500_common_clock_sources[] = {
+ CLK_LOOKUP(soc0_pll, NULL, "soc0_pll"),
+ CLK_LOOKUP(soc1_pll, NULL, "soc1_pll"),
+ CLK_LOOKUP(ddr_pll, NULL, "ddr_pll"),
+ CLK_LOOKUP(sysclk, NULL, "sysclk"),
+ CLK_LOOKUP(rtc32k, NULL, "clk32k"),
+};
+
+static struct clk_lookup db5500_prcmu_clocks[] = {
+ CLK_LOOKUP(sgaclk, "mali", NULL),
+ CLK_LOOKUP(siaclk, "mmio_camera", "sia"),
+ CLK_LOOKUP(svaclk, "hva", NULL),
+ CLK_LOOKUP(uartclk, "UART", NULL),
+ CLK_LOOKUP(msp02clk, "MSP02", NULL),
+ CLK_LOOKUP(msp1clk, "MSP_I2S.1", NULL),
+ CLK_LOOKUP(cdclk, "cable_detect.0", NULL),
+ CLK_LOOKUP(i2cclk, "I2C", NULL),
+ CLK_LOOKUP(sdmmcclk, "sdmmc", NULL),
+ CLK_LOOKUP(per1clk, "PERIPH1", NULL),
+ CLK_LOOKUP(per2clk, "PERIPH2", NULL),
+ CLK_LOOKUP(per3clk, "PERIPH3", NULL),
+ CLK_LOOKUP(per5clk, "PERIPH5", NULL),
+ CLK_LOOKUP(per6clk, "PERIPH6", NULL),
+ CLK_LOOKUP(hdmiclk, "mcde", "hdmi"),
+ CLK_LOOKUP(apeatclk, "apeat", NULL),
+ CLK_LOOKUP(apetraceclk, "apetrace", NULL),
+ CLK_LOOKUP(mcdeclk, "mcde", NULL),
+ CLK_LOOKUP(mcdeclk, "mcde", "mcde"),
+ CLK_LOOKUP(dmaclk, "dma40.0", NULL),
+ CLK_LOOKUP(b2r2clk, "b2r2", NULL),
+ CLK_LOOKUP(b2r2clk, "b2r2_bus", NULL),
+ CLK_LOOKUP(b2r2clk, "U8500-B2R2.0", NULL),
+ CLK_LOOKUP(tvclk, "tv", NULL),
+ CLK_LOOKUP(tvclk, "mcde", "tv"),
+};
+
+static struct clk_lookup db5500_prcc_clocks[] = {
+ CLK_LOOKUP(p1_msp0_clk, "MSP_I2S.0", NULL),
+ CLK_LOOKUP(p1_sdi0_clk, "sdi0", NULL),
+ CLK_LOOKUP(p1_sdi2_clk, "sdi2", NULL),
+ CLK_LOOKUP(p1_uart0_clk, "uart0", NULL),
+ CLK_LOOKUP(p1_i2c1_clk, "nmk-i2c.1", NULL),
+ CLK_LOOKUP(p1_pclk5, "gpio.0", NULL),
+ CLK_LOOKUP(p1_pclk5, "gpio.1", NULL),
+ CLK_LOOKUP(p1_pclk6, "fsmc", NULL),
+
+ CLK_LOOKUP(p2_pclk0, "musb-ux500.0", "usb"),
+ CLK_LOOKUP(p2_pclk1, "gpio.2", NULL),
+
+ CLK_LOOKUP(p3_keypad_clk, "db5500-keypad", NULL),
+ CLK_LOOKUP(p3_pwm_clk, "pwm", NULL),
+ CLK_LOOKUP(p3_pclk2, "gpio.4", NULL),
+
+ CLK_LOOKUP(p5_msp2_clk, "MSP_I2S.2", NULL),
+ CLK_LOOKUP(p5_uart1_clk, "uart1", NULL),
+ CLK_LOOKUP(p5_uart2_clk, "uart2", NULL),
+ CLK_LOOKUP(p5_uart3_clk, "uart3", NULL),
+ CLK_LOOKUP(p5_sdi1_clk, "sdi1", NULL),
+ CLK_LOOKUP(p5_sdi3_clk, "sdi3", NULL),
+ CLK_LOOKUP(p5_sdi4_clk, "sdi4", NULL),
+ CLK_LOOKUP(p5_i2c2_clk, "nmk-i2c.2", NULL),
+ CLK_LOOKUP(p5_i2c3_clk, "nmk-i2c.3", NULL),
+ CLK_LOOKUP(p5_irrc_clk, "irrc", NULL),
+ CLK_LOOKUP(p5_irda_clk, "irda", NULL),
+ CLK_LOOKUP(p5_pclk11, "spi0", NULL),
+ CLK_LOOKUP(p5_pclk12, "spi1", NULL),
+ CLK_LOOKUP(p5_pclk13, "spi2", NULL),
+ CLK_LOOKUP(p5_pclk14, "spi3", NULL),
+ CLK_LOOKUP(p5_pclk15, "gpio.5", NULL),
+ CLK_LOOKUP(p5_pclk15, "gpio.6", NULL),
+ CLK_LOOKUP(p5_pclk15, "gpio.7", NULL),
+
+ CLK_LOOKUP(p6_rng_clk, "rng", NULL),
+ CLK_LOOKUP(p6_pclk1, "cryp0", NULL),
+ CLK_LOOKUP(p6_pclk2, "hash0", NULL),
+ CLK_LOOKUP(p6_pclk3, "pka", NULL),
+ CLK_LOOKUP(p6_pclk4, "hash1", NULL),
+ CLK_LOOKUP(p6_pclk1, "cryp1", NULL),
+ CLK_LOOKUP(p6_pclk5, "cfgreg", NULL),
+ CLK_LOOKUP(p6_mtu0_clk, "mtu0", NULL),
+ CLK_LOOKUP(p6_mtu1_clk, "mtu1", NULL),
+
+ /*
+ * Dummy clock sets up the GPIOs.
+ */
+ CLK_LOOKUP(clk_dummy, "gpio.3", NULL),
+};
+
+static struct clk_lookup db5500_clkouts[] = {
+ CLK_LOOKUP(clkout1, "mmio_camera", "primary-cam"),
+ CLK_LOOKUP(clkout1, "mmio_camera", "secondary-cam"),
+};
+
+static struct clk_lookup u5500_clocks[] = {
+ CLK_LOOKUP(rtc_clk1, "cg2900-uart.0", "lpoclk"),
+};
+
+static const char *db5500_boot_clk[] __initdata = {
+ "spi0",
+ "spi1",
+ "spi2",
+ "spi3",
+ "uart0",
+ "uart1",
+ "uart2",
+ "uart3",
+ "sdi0",
+ "sdi1",
+ "sdi2",
+ "sdi3",
+ "sdi4",
+};
+
+static struct clk *boot_clks[ARRAY_SIZE(db5500_boot_clk)] __initdata;
+
+static int __init db5500_boot_clk_disable(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(db5500_boot_clk); i++) {
+ clk_disable(boot_clks[i]);
+ clk_put(boot_clks[i]);
+ }
+
+ return 0;
+}
+late_initcall_sync(db5500_boot_clk_disable);
+
+static void __init db5500_boot_clk_enable(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(db5500_boot_clk); i++) {
+ boot_clks[i] = clk_get_sys(db5500_boot_clk[i], NULL);
+ BUG_ON(IS_ERR(boot_clks[i]));
+ clk_enable(boot_clks[i]);
+ }
+}
+
+static void configure_clkouts(void)
+{
+ /* div parameter does not matter for sel0 REF_CLK */
+ WARN_ON(prcmu_config_clkout(DB5500_CLKOUT0,
+ DB5500_CLKOUT_REF_CLK_SEL0, 0));
+ WARN_ON(prcmu_config_clkout(DB5500_CLKOUT1,
+ DB5500_CLKOUT_REF_CLK_SEL0, 0));
+}
+
+static struct clk *db5500_clks_tobe_disabled[] __initdata = {
+ &siaclk,
+ &sgaclk,
+ &sdmmcclk,
+ &p1_pclk0,
+ &p1_pclk6,
+ &p3_keypad_clk,
+ &p3_pclk1,
+ &p5_pclk0,
+ &p5_pclk11,
+ &p5_pclk12,
+ &p5_pclk13,
+ &p5_pclk14,
+ &p6_pclk4,
+ &p6_pclk5,
+ &p6_pclk7,
+ &p5_uart1_clk,
+ &p5_uart2_clk,
+ &p5_uart3_clk,
+ &p5_sdi1_clk,
+ &p5_sdi3_clk,
+ &p5_sdi4_clk,
+ &p5_i2c3_clk,
+ &p5_irrc_clk,
+ &p5_irda_clk,
+};
+
+static int __init init_clock_states(void)
+{
+ int i = 0;
+ /*
+ * The following clks are shared with secure world.
+ * Currently this leads to a limitation where we need to
+ * enable them at all times.
+ */
+ clk_enable(&p6_pclk1);
+ clk_enable(&p6_pclk2);
+ clk_enable(&p6_pclk3);
+ clk_enable(&p6_rng_clk);
+
+ /*
+ * Disable clocks that are on at boot, but should be off.
+ */
+ for (i = 0; i < ARRAY_SIZE(db5500_clks_tobe_disabled); i++) {
+ if (!clk_enable(db5500_clks_tobe_disabled[i]))
+ clk_disable(db5500_clks_tobe_disabled[i]);
+ }
+ return 0;
+}
+late_initcall(init_clock_states);
+
+int __init db5500_clk_init(void)
+{
+ if (ux500_is_svp()) {
+ prcmu_clk_ops.enable = NULL;
+ prcmu_clk_ops.disable = NULL;
+ prcc_pclk_ops.enable = NULL;
+ prcc_pclk_ops.disable = NULL;
+ prcc_kclk_ops.enable = NULL;
+ prcc_kclk_ops.disable = NULL;
+ }
+ prcmu_clk_ops.get_rate = NULL;
+
+ if (cpu_is_u5500v1())
+ p1_sdi0_kclk.parent = &sdmmcclk;
+
+ clkdev_add_table(u8500_common_clock_sources,
+ ARRAY_SIZE(u8500_common_clock_sources));
+
+ clkdev_add_table(db5500_prcmu_clocks, ARRAY_SIZE(db5500_prcmu_clocks));
+ clkdev_add_table(db5500_prcc_clocks, ARRAY_SIZE(db5500_prcc_clocks));
+ clkdev_add_table(db5500_clkouts, ARRAY_SIZE(db5500_clkouts));
+ clkdev_add_table(u5500_clocks, ARRAY_SIZE(u5500_clocks));
+
+ db5500_boot_clk_enable();
+
+ /*
+ * The following clks are shared with secure world.
+ * Currently this leads to a limitation where we need to
+ * enable them at all times.
+ */
+ clk_enable(&p6_pclk1);
+ clk_enable(&p6_pclk2);
+ clk_enable(&p6_pclk3);
+ clk_enable(&p6_rng_clk);
+
+ configure_clkouts();
+
+ return 0;
+}
+
+int __init db5500_clk_debug_init(void)
+{
+ return dbx500_clk_debug_init(db5500_dbg_clks,
+ ARRAY_SIZE(db5500_dbg_clks));
+}
diff --git a/arch/arm/mach-ux500/clock-db8500.c b/arch/arm/mach-ux500/clock-db8500.c
new file mode 100644
index 00000000000..f03d8361157
--- /dev/null
+++ b/arch/arm/mach-ux500/clock-db8500.c
@@ -0,0 +1,1056 @@
+/*
+ * Copyright (C) 2009-2011 ST-Ericsson SA
+ * Copyright (C) 2009 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/mfd/ab8500/sysctrl.h>
+#include <linux/workqueue.h>
+#include <linux/regulator/consumer.h>
+
+#include <plat/pincfg.h>
+
+#include <mach/hardware.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include "clock.h"
+#include "pins-db8500.h"
+#include "product.h"
+
+static DEFINE_MUTEX(soc1_pll_mutex);
+static DEFINE_MUTEX(sysclk_mutex);
+static DEFINE_MUTEX(ab_ulpclk_mutex);
+static DEFINE_MUTEX(ab_intclk_mutex);
+static DEFINE_MUTEX(clkout0_mutex);
+
+static struct delayed_work sysclk_disable_work;
+
+/* PLL operations. */
+
+static unsigned long pll_get_rate(struct clk *clk)
+{
+ return prcmu_clock_rate(clk->cg_sel);
+}
+
+static struct clkops pll_ops = {
+ .get_rate = pll_get_rate,
+};
+
+/* SysClk operations. */
+
+static int request_sysclk(bool enable)
+{
+ static int requests;
+
+ if ((enable && (requests++ == 0)) || (!enable && (--requests == 0)))
+ return prcmu_request_clock(PRCMU_SYSCLK, enable);
+ return 0;
+}
+
+static int sysclk_enable(struct clk *clk)
+{
+ static bool swat_enable;
+ int r;
+
+ if (!swat_enable) {
+ r = ab8500_sysctrl_set(AB8500_SWATCTRL,
+ AB8500_SWATCTRL_SWATENABLE);
+ if (r)
+ return r;
+
+ swat_enable = true;
+ }
+
+ r = request_sysclk(true);
+ if (r)
+ return r;
+
+ if (clk->cg_sel) {
+ r = ab8500_sysctrl_set(AB8500_SYSULPCLKCTRL1, (u8)clk->cg_sel);
+ if (r)
+ (void)request_sysclk(false);
+ }
+ return r;
+}
+
+static void sysclk_disable(struct clk *clk)
+{
+ int r;
+
+ if (clk->cg_sel) {
+ r = ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
+ (u8)clk->cg_sel);
+ if (r)
+ goto disable_failed;
+ }
+ r = request_sysclk(false);
+ if (r)
+ goto disable_failed;
+ return;
+
+disable_failed:
+ pr_err("clock: failed to disable %s.\n", clk->name);
+}
+
+static struct clkops sysclk_ops = {
+ .enable = sysclk_enable,
+ .disable = sysclk_disable,
+};
+
+/* AB8500 UlpClk operations */
+
+static int ab_ulpclk_enable(struct clk *clk)
+{
+ int err;
+
+ if (clk->regulator == NULL) {
+ struct regulator *reg;
+
+ reg = regulator_get(NULL, "v-intcore");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+ clk->regulator = reg;
+ }
+ err = regulator_set_optimum_mode(clk->regulator, 1500);
+ if (unlikely(err < 0))
+ goto regulator_enable_error;
+ err = regulator_enable(clk->regulator);
+ if (unlikely(err))
+ goto regulator_enable_error;
+ err = ab8500_sysctrl_clear(AB8500_SYSULPCLKCONF,
+ AB8500_SYSULPCLKCONF_ULPCLKCONF_MASK);
+ if (unlikely(err))
+ goto enable_error;
+ err = ab8500_sysctrl_set(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_ULPCLKREQ);
+ if (unlikely(err))
+ goto enable_error;
+ /* Unknown/undocumented PLL locking time => wait 1 ms. */
+ mdelay(1);
+ return 0;
+
+enable_error:
+ (void)regulator_disable(clk->regulator);
+regulator_enable_error:
+ return err;
+}
+
+static void ab_ulpclk_disable(struct clk *clk)
+{
+ int err;
+
+ err = ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_ULPCLKREQ);
+ if (unlikely(regulator_disable(clk->regulator) || err))
+ goto out_err;
+
+ regulator_set_optimum_mode(clk->regulator, 0);
+
+ return;
+
+out_err:
+ pr_err("clock: %s failed to disable %s.\n", __func__, clk->name);
+}
+
+static struct clkops ab_ulpclk_ops = {
+ .enable = ab_ulpclk_enable,
+ .disable = ab_ulpclk_disable,
+};
+
+/* AB8500 intclk operations */
+
+enum ab_intclk_parent {
+ AB_INTCLK_PARENT_SYSCLK,
+ AB_INTCLK_PARENT_ULPCLK,
+ AB_INTCLK_PARENTS_END,
+ NUM_AB_INTCLK_PARENTS
+};
+
+static int ab_intclk_enable(struct clk *clk)
+{
+ if (clk->parent == clk->parents[AB_INTCLK_PARENT_ULPCLK]) {
+ return ab8500_sysctrl_write(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK,
+ (1 << AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_SHIFT));
+ }
+ return 0;
+}
+
+static void ab_intclk_disable(struct clk *clk)
+{
+ if (clk->parent == clk->parents[AB_INTCLK_PARENT_SYSCLK])
+ return;
+
+ if (ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK)) {
+ pr_err("clock: %s failed to disable %s.\n", __func__,
+ clk->name);
+ }
+}
+
+static int ab_intclk_set_parent(struct clk *clk, struct clk *parent)
+{
+ int err;
+
+ if (!clk->enabled)
+ return 0;
+
+ err = __clk_enable(parent, clk->mutex);
+
+ if (unlikely(err))
+ goto parent_enable_error;
+
+ if (parent == clk->parents[AB_INTCLK_PARENT_ULPCLK]) {
+ err = ab8500_sysctrl_write(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK,
+ (1 << AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_SHIFT));
+ } else {
+ err = ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK);
+ }
+ if (unlikely(err))
+ goto config_error;
+
+ __clk_disable(clk->parent, clk->mutex);
+
+ return 0;
+
+config_error:
+ __clk_disable(parent, clk->mutex);
+parent_enable_error:
+ return err;
+}
+
+static struct clkops ab_intclk_ops = {
+ .enable = ab_intclk_enable,
+ .disable = ab_intclk_disable,
+ .set_parent = ab_intclk_set_parent,
+};
+
+/* AB8500 audio clock operations */
+
+static int audioclk_enable(struct clk *clk)
+{
+ return ab8500_sysctrl_set(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_AUDIOCLKENA);
+}
+
+static void audioclk_disable(struct clk *clk)
+{
+ if (ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_AUDIOCLKENA)) {
+ pr_err("clock: %s failed to disable %s.\n", __func__,
+ clk->name);
+ }
+}
+
+static struct clkops audioclk_ops = {
+ .enable = audioclk_enable,
+ .disable = audioclk_disable,
+};
+
+/* Primary camera clock operations */
+static int clkout0_enable(struct clk *clk)
+{
+ int r;
+
+ if (clk->regulator == NULL) {
+ struct regulator *reg;
+
+ reg = regulator_get(NULL, "v-ape");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+ clk->regulator = reg;
+ }
+ r = regulator_enable(clk->regulator);
+ if (r)
+ goto regulator_failed;
+ r = prcmu_config_clkout(0, PRCMU_CLKSRC_CLK38M, 4);
+ if (r)
+ goto config_failed;
+ r = nmk_config_pin(GPIO227_CLKOUT1, false);
+ if (r)
+ goto gpio_failed;
+ return r;
+
+gpio_failed:
+ (void)prcmu_config_clkout(0, PRCMU_CLKSRC_CLK38M, 0);
+config_failed:
+ (void)regulator_disable(clk->regulator);
+regulator_failed:
+ return r;
+}
+
+static void clkout0_disable(struct clk *clk)
+{
+ int r;
+
+ r = nmk_config_pin((GPIO227_GPIO | PIN_OUTPUT_LOW), false);
+ if (r)
+ goto disable_failed;
+ (void)prcmu_config_clkout(0, PRCMU_CLKSRC_CLK38M, 0);
+ (void)regulator_disable(clk->regulator);
+ return;
+
+disable_failed:
+ pr_err("clock: failed to disable %s.\n", clk->name);
+}
+
+/* Touch screen/secondary camera clock operations. */
+static int clkout1_enable(struct clk *clk)
+{
+ int r;
+
+ if (clk->regulator == NULL) {
+ struct regulator *reg;
+
+ reg = regulator_get(NULL, "v-ape");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+ clk->regulator = reg;
+ }
+ r = regulator_enable(clk->regulator);
+ if (r)
+ goto regulator_failed;
+ r = prcmu_config_clkout(1, PRCMU_CLKSRC_SYSCLK, 4);
+ if (r)
+ goto config_failed;
+ r = nmk_config_pin(GPIO228_CLKOUT2, false);
+ if (r)
+ goto gpio_failed;
+ return r;
+
+gpio_failed:
+ (void)prcmu_config_clkout(1, PRCMU_CLKSRC_SYSCLK, 0);
+config_failed:
+ (void)regulator_disable(clk->regulator);
+regulator_failed:
+ return r;
+}
+
+static void clkout1_disable(struct clk *clk)
+{
+ int r;
+
+ r = nmk_config_pin((GPIO228_GPIO | PIN_OUTPUT_LOW), false);
+ if (r)
+ goto disable_failed;
+ (void)prcmu_config_clkout(1, PRCMU_CLKSRC_SYSCLK, 0);
+ (void)regulator_disable(clk->regulator);
+ return;
+
+disable_failed:
+ pr_err("clock: failed to disable %s.\n", clk->name);
+}
+
+static struct clkops clkout0_ops = {
+ .enable = clkout0_enable,
+ .disable = clkout0_disable,
+};
+
+static struct clkops clkout1_ops = {
+ .enable = clkout1_enable,
+ .disable = clkout1_disable,
+};
+
+#define DEF_PER1_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U8500_CLKRST1_BASE, _cg_bit, &per1clk)
+#define DEF_PER2_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U8500_CLKRST2_BASE, _cg_bit, &per2clk)
+#define DEF_PER3_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U8500_CLKRST3_BASE, _cg_bit, &per3clk)
+#define DEF_PER5_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U8500_CLKRST5_BASE, _cg_bit, &per5clk)
+#define DEF_PER6_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U8500_CLKRST6_BASE, _cg_bit, &per6clk)
+
+#define DEF_PER1_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U8500_CLKRST1_BASE, _cg_bit, _parent, &per1clk)
+#define DEF_PER2_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U8500_CLKRST2_BASE, _cg_bit, _parent, &per2clk)
+#define DEF_PER3_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U8500_CLKRST3_BASE, _cg_bit, _parent, &per3clk)
+#define DEF_PER5_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U8500_CLKRST5_BASE, _cg_bit, _parent, &per5clk)
+#define DEF_PER6_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U8500_CLKRST6_BASE, _cg_bit, _parent, &per6clk)
+
+/* Clock sources. */
+
+static struct clk soc0_pll = {
+ .name = "soc0_pll",
+ .ops = &pll_ops,
+ .cg_sel = PRCMU_PLLSOC0,
+};
+
+static struct clk soc1_pll = {
+ .name = "soc1_pll",
+ .ops = &prcmu_clk_ops,
+ .cg_sel = PRCMU_PLLSOC1,
+ .mutex = &soc1_pll_mutex,
+};
+
+static struct clk ddr_pll = {
+ .name = "ddr_pll",
+ .ops = &pll_ops,
+ .cg_sel = PRCMU_PLLDDR,
+};
+
+static struct clk ulp38m4 = {
+ .name = "ulp38m4",
+ .rate = 38400000,
+};
+
+static struct clk sysclk = {
+ .name = "sysclk",
+ .ops = &sysclk_ops,
+ .rate = 38400000,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk sysclk2 = {
+ .name = "sysclk2",
+ .ops = &sysclk_ops,
+ .cg_sel = AB8500_SYSULPCLKCTRL1_SYSCLKBUF2REQ,
+ .rate = 38400000,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk sysclk3 = {
+ .name = "sysclk3",
+ .ops = &sysclk_ops,
+ .cg_sel = AB8500_SYSULPCLKCTRL1_SYSCLKBUF3REQ,
+ .rate = 38400000,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk sysclk4 = {
+ .name = "sysclk4",
+ .ops = &sysclk_ops,
+ .cg_sel = AB8500_SYSULPCLKCTRL1_SYSCLKBUF4REQ,
+ .rate = 38400000,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk rtc32k = {
+ .name = "rtc32k",
+ .rate = 32768,
+};
+
+static struct clk clkout0 = {
+ .name = "clkout0",
+ .ops = &clkout0_ops,
+ .parent = &ulp38m4,
+ .rate = 9600000,
+ .mutex = &clkout0_mutex,
+};
+
+static struct clk clkout1 = {
+ .name = "clkout1",
+ .ops = &clkout1_ops,
+ .parent = &sysclk,
+ .rate = 9600000,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk ab_ulpclk = {
+ .name = "ab_ulpclk",
+ .ops = &ab_ulpclk_ops,
+ .rate = 38400000,
+ .mutex = &ab_ulpclk_mutex,
+};
+
+static struct clk *ab_intclk_parents[NUM_AB_INTCLK_PARENTS] = {
+ [AB_INTCLK_PARENT_SYSCLK] = &sysclk,
+ [AB_INTCLK_PARENT_ULPCLK] = &ab_ulpclk,
+ [AB_INTCLK_PARENTS_END] = NULL,
+};
+
+static struct clk ab_intclk = {
+ .name = "ab_intclk",
+ .ops = &ab_intclk_ops,
+ .mutex = &ab_intclk_mutex,
+ .parent = &sysclk,
+ .parents = ab_intclk_parents,
+};
+
+static struct clk audioclk = {
+ .name = "audioclk",
+ .ops = &audioclk_ops,
+ .mutex = &ab_intclk_mutex,
+ .parent = &ab_intclk,
+};
+
+static DEF_PRCMU_CLK(sgaclk, PRCMU_SGACLK, 320000000);
+static DEF_PRCMU_CLK(uartclk, PRCMU_UARTCLK, 38400000);
+static DEF_PRCMU_CLK(msp02clk, PRCMU_MSP02CLK, 19200000);
+static DEF_PRCMU_CLK(msp1clk, PRCMU_MSP1CLK, 19200000);
+static DEF_PRCMU_CLK(i2cclk, PRCMU_I2CCLK, 24000000);
+static DEF_PRCMU_CLK(slimclk, PRCMU_SLIMCLK, 19200000);
+static DEF_PRCMU_CLK(per1clk, PRCMU_PER1CLK, 133330000);
+static DEF_PRCMU_CLK(per2clk, PRCMU_PER2CLK, 133330000);
+static DEF_PRCMU_CLK(per3clk, PRCMU_PER3CLK, 133330000);
+static DEF_PRCMU_CLK(per5clk, PRCMU_PER5CLK, 133330000);
+static DEF_PRCMU_CLK(per6clk, PRCMU_PER6CLK, 133330000);
+static DEF_PRCMU_CLK(per7clk, PRCMU_PER7CLK, 100000000);
+static DEF_PRCMU_SCALABLE_CLK(lcdclk, PRCMU_LCDCLK);
+static DEF_PRCMU_OPP100_CLK(bmlclk, PRCMU_BMLCLK, 200000000);
+static DEF_PRCMU_SCALABLE_CLK(hsitxclk, PRCMU_HSITXCLK);
+static DEF_PRCMU_SCALABLE_CLK(hsirxclk, PRCMU_HSIRXCLK);
+static DEF_PRCMU_SCALABLE_CLK(hdmiclk, PRCMU_HDMICLK);
+static DEF_PRCMU_CLK(apeatclk, PRCMU_APEATCLK, 160000000);
+static DEF_PRCMU_CLK(apetraceclk, PRCMU_APETRACECLK, 160000000);
+static DEF_PRCMU_CLK(mcdeclk, PRCMU_MCDECLK, 160000000);
+static DEF_PRCMU_OPP100_CLK(ipi2cclk, PRCMU_IPI2CCLK, 24000000);
+static DEF_PRCMU_CLK(dsialtclk, PRCMU_DSIALTCLK, 384000000);
+static DEF_PRCMU_CLK(dmaclk, PRCMU_DMACLK, 200000000);
+static DEF_PRCMU_CLK(b2r2clk, PRCMU_B2R2CLK, 200000000);
+static DEF_PRCMU_SCALABLE_CLK(tvclk, PRCMU_TVCLK);
+/* TODO: For SSPCLK, the spec says 24MHz, while the old driver says 48MHz. */
+static DEF_PRCMU_CLK(sspclk, PRCMU_SSPCLK, 24000000);
+static DEF_PRCMU_CLK(rngclk, PRCMU_RNGCLK, 19200000);
+static DEF_PRCMU_CLK(uiccclk, PRCMU_UICCCLK, 48000000);
+static DEF_PRCMU_CLK(timclk, PRCMU_TIMCLK, 2400000);
+static DEF_PRCMU_CLK(sdmmcclk, PRCMU_SDMMCCLK, 50000000);
+
+/* PRCC PClocks */
+
+static DEF_PER1_PCLK(0, p1_pclk0);
+static DEF_PER1_PCLK(1, p1_pclk1);
+static DEF_PER1_PCLK(2, p1_pclk2);
+static DEF_PER1_PCLK(3, p1_pclk3);
+static DEF_PER1_PCLK(4, p1_pclk4);
+static DEF_PER1_PCLK(5, p1_pclk5);
+static DEF_PER1_PCLK(6, p1_pclk6);
+static DEF_PER1_PCLK(7, p1_pclk7);
+static DEF_PER1_PCLK(8, p1_pclk8);
+static DEF_PER1_PCLK(9, p1_pclk9);
+static DEF_PER1_PCLK(10, p1_pclk10);
+static DEF_PER1_PCLK(11, p1_pclk11);
+
+static DEF_PER2_PCLK(0, p2_pclk0);
+static DEF_PER2_PCLK(1, p2_pclk1);
+static DEF_PER2_PCLK(2, p2_pclk2);
+static DEF_PER2_PCLK(3, p2_pclk3);
+static DEF_PER2_PCLK(4, p2_pclk4);
+static DEF_PER2_PCLK(5, p2_pclk5);
+static DEF_PER2_PCLK(6, p2_pclk6);
+static DEF_PER2_PCLK(7, p2_pclk7);
+static DEF_PER2_PCLK(8, p2_pclk8);
+static DEF_PER2_PCLK(9, p2_pclk9);
+static DEF_PER2_PCLK(10, p2_pclk10);
+static DEF_PER2_PCLK(11, p2_pclk11);
+
+static DEF_PER3_PCLK(0, p3_pclk0);
+static DEF_PER3_PCLK(1, p3_pclk1);
+static DEF_PER3_PCLK(2, p3_pclk2);
+static DEF_PER3_PCLK(3, p3_pclk3);
+static DEF_PER3_PCLK(4, p3_pclk4);
+static DEF_PER3_PCLK(5, p3_pclk5);
+static DEF_PER3_PCLK(6, p3_pclk6);
+static DEF_PER3_PCLK(7, p3_pclk7);
+static DEF_PER3_PCLK(8, p3_pclk8);
+
+static DEF_PER5_PCLK(0, p5_pclk0);
+static DEF_PER5_PCLK(1, p5_pclk1);
+
+static DEF_PER6_PCLK(0, p6_pclk0);
+static DEF_PER6_PCLK(1, p6_pclk1);
+static DEF_PER6_PCLK(2, p6_pclk2);
+static DEF_PER6_PCLK(3, p6_pclk3);
+static DEF_PER6_PCLK(4, p6_pclk4);
+static DEF_PER6_PCLK(5, p6_pclk5);
+static DEF_PER6_PCLK(6, p6_pclk6);
+static DEF_PER6_PCLK(7, p6_pclk7);
+
+/* UART0 */
+static DEF_PER1_KCLK(0, p1_uart0_kclk, &uartclk);
+static DEF_PER_CLK(p1_uart0_clk, &p1_pclk0, &p1_uart0_kclk);
+
+/* UART1 */
+static DEF_PER1_KCLK(1, p1_uart1_kclk, &uartclk);
+static DEF_PER_CLK(p1_uart1_clk, &p1_pclk1, &p1_uart1_kclk);
+
+/* I2C1 */
+static DEF_PER1_KCLK(2, p1_i2c1_kclk, &i2cclk);
+static DEF_PER_CLK(p1_i2c1_clk, &p1_pclk2, &p1_i2c1_kclk);
+
+/* MSP0 */
+static DEF_PER1_KCLK(3, p1_msp0_kclk, &msp02clk);
+static DEF_PER_CLK(p1_msp0_clk, &p1_pclk3, &p1_msp0_kclk);
+
+/* MSP1 */
+static DEF_PER1_KCLK(4, p1_msp1_kclk, &msp1clk);
+static DEF_PER_CLK(p1_msp1_clk, &p1_pclk4, &p1_msp1_kclk);
+
+/* SDI0 */
+static DEF_PER1_KCLK(5, p1_sdi0_kclk, &sdmmcclk);
+static DEF_PER_CLK(p1_sdi0_clk, &p1_pclk5, &p1_sdi0_kclk);
+
+/* I2C2 */
+static DEF_PER1_KCLK(6, p1_i2c2_kclk, &i2cclk);
+static DEF_PER_CLK(p1_i2c2_clk, &p1_pclk6, &p1_i2c2_kclk);
+
+/* SLIMBUS0 */
+static DEF_PER1_KCLK(3, p1_slimbus0_kclk, &slimclk);
+static DEF_PER_CLK(p1_slimbus0_clk, &p1_pclk8, &p1_slimbus0_kclk);
+
+/* I2C4 */
+static DEF_PER1_KCLK(9, p1_i2c4_kclk, &i2cclk);
+static DEF_PER_CLK(p1_i2c4_clk, &p1_pclk10, &p1_i2c4_kclk);
+
+/* MSP3 */
+static DEF_PER1_KCLK(10, p1_msp3_kclk, &msp1clk);
+static DEF_PER_CLK(p1_msp3_clk, &p1_pclk11, &p1_msp3_kclk);
+
+/* I2C3 */
+static DEF_PER2_KCLK(0, p2_i2c3_kclk, &i2cclk);
+static DEF_PER_CLK(p2_i2c3_clk, &p2_pclk0, &p2_i2c3_kclk);
+
+/* SDI4 */
+static DEF_PER2_KCLK(2, p2_sdi4_kclk, &sdmmcclk);
+static DEF_PER_CLK(p2_sdi4_clk, &p2_pclk4, &p2_sdi4_kclk);
+
+/* MSP2 */
+static DEF_PER2_KCLK(3, p2_msp2_kclk, &msp02clk);
+static DEF_PER_CLK(p2_msp2_clk, &p2_pclk5, &p2_msp2_kclk);
+
+/* SDI1 */
+static DEF_PER2_KCLK(4, p2_sdi1_kclk, &sdmmcclk);
+static DEF_PER_CLK(p2_sdi1_clk, &p2_pclk6, &p2_sdi1_kclk);
+
+/* SDI3 */
+static DEF_PER2_KCLK(5, p2_sdi3_kclk, &sdmmcclk);
+static DEF_PER_CLK(p2_sdi3_clk, &p2_pclk7, &p2_sdi3_kclk);
+
+/* HSIR */
+static struct clk p2_ssirx_kclk = {
+ .name = "p2_ssirx_kclk",
+ .ops = &prcc_kclk_rec_ops,
+ .io_base = U8500_CLKRST2_BASE,
+ .cg_sel = BIT(6),
+ .parent = &hsirxclk,
+ .clock = &per2clk,
+};
+
+/* HSIT */
+static struct clk p2_ssitx_kclk = {
+ .name = "p2_ssitx_kclk",
+ .ops = &prcc_kclk_rec_ops,
+ .io_base = U8500_CLKRST2_BASE,
+ .cg_sel = BIT(7),
+ .parent = &hsitxclk,
+ .clock = &per2clk,
+};
+
+/* SSP0 */
+static DEF_PER3_KCLK(1, p3_ssp0_kclk, &sspclk);
+static DEF_PER_CLK(p3_ssp0_clk, &p3_pclk1, &p3_ssp0_kclk);
+
+/* SSP1 */
+static DEF_PER3_KCLK(2, p3_ssp1_kclk, &sspclk);
+static DEF_PER_CLK(p3_ssp1_clk, &p3_pclk2, &p3_ssp1_kclk);
+
+/* I2C0 */
+static DEF_PER3_KCLK(3, p3_i2c0_kclk, &i2cclk);
+static DEF_PER_CLK(p3_i2c0_clk, &p3_pclk3, &p3_i2c0_kclk);
+
+/* SDI2 */
+static DEF_PER3_KCLK(4, p3_sdi2_kclk, &sdmmcclk);
+static DEF_PER_CLK(p3_sdi2_clk, &p3_pclk4, &p3_sdi2_kclk);
+
+/* SKE */
+static DEF_PER3_KCLK(5, p3_ske_kclk, &rtc32k);
+static DEF_PER_CLK(p3_ske_clk, &p3_pclk5, &p3_ske_kclk);
+
+/* UART2 */
+static DEF_PER3_KCLK(6, p3_uart2_kclk, &uartclk);
+static DEF_PER_CLK(p3_uart2_clk, &p3_pclk6, &p3_uart2_kclk);
+
+/* SDI5 */
+static DEF_PER3_KCLK(7, p3_sdi5_kclk, &sdmmcclk);
+static DEF_PER_CLK(p3_sdi5_clk, &p3_pclk7, &p3_sdi5_kclk);
+
+/* RNG */
+static DEF_PER6_KCLK(0, p6_rng_kclk, &rngclk);
+static DEF_PER_CLK(p6_rng_clk, &p6_pclk0, &p6_rng_kclk);
+
+/* MTU:S */
+
+/* MTU0 */
+static DEF_PER_CLK(p6_mtu0_clk, &p6_pclk6, &timclk);
+
+/* MTU1 */
+static DEF_PER_CLK(p6_mtu1_clk, &p6_pclk7, &timclk);
+
+/*
+ * TODO: Ensure names match with devices and then remove unnecessary entries
+ * when all drivers use the clk API.
+ */
+
+static struct clk_lookup u8500_clocks[] = {
+ CLK_LOOKUP(soc0_pll, NULL, "soc0_pll"),
+ CLK_LOOKUP(soc1_pll, NULL, "soc1_pll"),
+ CLK_LOOKUP(ddr_pll, NULL, "ddr_pll"),
+ CLK_LOOKUP(ulp38m4, NULL, "ulp38m4"),
+ CLK_LOOKUP(sysclk, NULL, "sysclk"),
+ CLK_LOOKUP(rtc32k, NULL, "clk32k"),
+ CLK_LOOKUP(sysclk, "ab8500-usb.0", "sysclk"),
+ CLK_LOOKUP(sysclk, "ab8500-codec.0", "sysclk"),
+ CLK_LOOKUP(ab_ulpclk, "ab8500-codec.0", "ulpclk"),
+ CLK_LOOKUP(ab_intclk, "ab8500-codec.0", "intclk"),
+ CLK_LOOKUP(audioclk, "ab8500-codec.0", "audioclk"),
+ CLK_LOOKUP(ab_intclk, "ab8500-pwm.1", NULL),
+ CLK_LOOKUP(ab_intclk, "ab8500-pwm.2", NULL),
+ CLK_LOOKUP(ab_intclk, "ab8500-pwm.3", NULL),
+
+ CLK_LOOKUP(clkout0, "pri-cam", NULL),
+ CLK_LOOKUP(clkout1, "3-005c", NULL),
+ CLK_LOOKUP(clkout1, "3-005d", NULL),
+ CLK_LOOKUP(clkout1, "sec-cam", NULL),
+
+ /* prcmu */
+ CLK_LOOKUP(sgaclk, "mali", NULL),
+ CLK_LOOKUP(uartclk, "UART", NULL),
+ CLK_LOOKUP(msp02clk, "MSP02", NULL),
+ CLK_LOOKUP(i2cclk, "I2C", NULL),
+ CLK_LOOKUP(sdmmcclk, "sdmmc", NULL),
+ CLK_LOOKUP(slimclk, "slim", NULL),
+ CLK_LOOKUP(per1clk, "PERIPH1", NULL),
+ CLK_LOOKUP(per2clk, "PERIPH2", NULL),
+ CLK_LOOKUP(per3clk, "PERIPH3", NULL),
+ CLK_LOOKUP(per5clk, "PERIPH5", NULL),
+ CLK_LOOKUP(per6clk, "PERIPH6", NULL),
+ CLK_LOOKUP(per7clk, "PERIPH7", NULL),
+ CLK_LOOKUP(lcdclk, "lcd", NULL),
+ CLK_LOOKUP(bmlclk, "bml", NULL),
+ CLK_LOOKUP(p2_ssitx_kclk, "ste_hsi.0", "hsit_hsitxclk"),
+ CLK_LOOKUP(p2_ssirx_kclk, "ste_hsi.0", "hsir_hsirxclk"),
+ CLK_LOOKUP(lcdclk, "mcde", "lcd"),
+ CLK_LOOKUP(hdmiclk, "hdmi", NULL),
+ CLK_LOOKUP(hdmiclk, "mcde", "hdmi"),
+ CLK_LOOKUP(apeatclk, "apeat", NULL),
+ CLK_LOOKUP(apetraceclk, "apetrace", NULL),
+ CLK_LOOKUP(mcdeclk, "mcde", NULL),
+ CLK_LOOKUP(mcdeclk, "mcde", "mcde"),
+ CLK_LOOKUP(ipi2cclk, "ipi2", NULL),
+ CLK_LOOKUP(dmaclk, "dma40.0", NULL),
+ CLK_LOOKUP(b2r2clk, "b2r2", NULL),
+ CLK_LOOKUP(b2r2clk, "b2r2_bus", NULL),
+ CLK_LOOKUP(b2r2clk, "U8500-B2R2.0", NULL),
+ CLK_LOOKUP(tvclk, "tv", NULL),
+ CLK_LOOKUP(tvclk, "mcde", "tv"),
+ CLK_LOOKUP(msp1clk, "MSP1", NULL),
+ CLK_LOOKUP(dsialtclk, "dsialt", NULL),
+ CLK_LOOKUP(sspclk, "SSP", NULL),
+ CLK_LOOKUP(rngclk, "rngclk", NULL),
+ CLK_LOOKUP(uiccclk, "uicc", NULL),
+
+ /* PERIPH 1 */
+ CLK_LOOKUP(p1_msp3_clk, "msp3", NULL),
+ CLK_LOOKUP(p1_msp3_clk, "MSP_I2S.3", NULL),
+ CLK_LOOKUP(p1_msp3_kclk, "ab8500-codec.0", "msp3-kernel"),
+ CLK_LOOKUP(p1_pclk11, "ab8500-codec.0", "msp3-bus"),
+ CLK_LOOKUP(p1_uart0_clk, "uart0", NULL),
+ CLK_LOOKUP(p1_uart1_clk, "uart1", NULL),
+ CLK_LOOKUP(p1_i2c1_clk, "nmk-i2c.1", NULL),
+ CLK_LOOKUP(p1_msp0_clk, "msp0", NULL),
+ CLK_LOOKUP(p1_msp0_clk, "MSP_I2S.0", NULL),
+ CLK_LOOKUP(p1_sdi0_clk, "sdi0", NULL),
+ CLK_LOOKUP(p1_i2c2_clk, "nmk-i2c.2", NULL),
+ CLK_LOOKUP(p1_slimbus0_clk, "slimbus0", NULL),
+ CLK_LOOKUP(p1_pclk9, "gpio.0", NULL),
+ CLK_LOOKUP(p1_pclk9, "gpio.1", NULL),
+ CLK_LOOKUP(p1_pclk9, "gpioblock0", NULL),
+ CLK_LOOKUP(p1_msp1_clk, "msp1", NULL),
+ CLK_LOOKUP(p1_msp1_clk, "MSP_I2S.1", NULL),
+ CLK_LOOKUP(p1_msp1_kclk, "ab8500-codec.0", "msp1-kernel"),
+ CLK_LOOKUP(p1_pclk4, "ab8500-codec.0", "msp1-bus"),
+ CLK_LOOKUP(p1_pclk7, "spi3", NULL),
+ CLK_LOOKUP(p1_i2c4_clk, "nmk-i2c.4", NULL),
+
+ /* PERIPH 2 */
+ CLK_LOOKUP(p2_i2c3_clk, "nmk-i2c.3", NULL),
+ CLK_LOOKUP(p2_pclk1, "spi2", NULL),
+ CLK_LOOKUP(p2_pclk2, "spi1", NULL),
+ CLK_LOOKUP(p2_pclk3, "pwl", NULL),
+ CLK_LOOKUP(p2_sdi4_clk, "sdi4", NULL),
+ CLK_LOOKUP(p2_msp2_clk, "msp2", NULL),
+ CLK_LOOKUP(p2_msp2_clk, "MSP_I2S.2", NULL),
+ CLK_LOOKUP(p2_sdi1_clk, "sdi1", NULL),
+ CLK_LOOKUP(p2_sdi3_clk, "sdi3", NULL),
+ CLK_LOOKUP(p2_pclk8, "spi0", NULL),
+ CLK_LOOKUP(p2_pclk9, "ste_hsi.0", "hsir_hclk"),
+ CLK_LOOKUP(p2_pclk10, "ste_hsi.0", "hsit_hclk"),
+ CLK_LOOKUP(p2_pclk11, "gpio.6", NULL),
+ CLK_LOOKUP(p2_pclk11, "gpio.7", NULL),
+ CLK_LOOKUP(p2_pclk11, "gpioblock1", NULL),
+
+ /* PERIPH 3 */
+ CLK_LOOKUP(p3_pclk0, "fsmc", NULL),
+ CLK_LOOKUP(p3_i2c0_clk, "nmk-i2c.0", NULL),
+ CLK_LOOKUP(p3_sdi2_clk, "sdi2", NULL),
+ CLK_LOOKUP(p3_ske_clk, "ske", NULL),
+ CLK_LOOKUP(p3_ske_clk, "nmk-ske-keypad", NULL),
+ CLK_LOOKUP(p3_uart2_clk, "uart2", NULL),
+ CLK_LOOKUP(p3_sdi5_clk, "sdi5", NULL),
+ CLK_LOOKUP(p3_pclk8, "gpio.2", NULL),
+ CLK_LOOKUP(p3_pclk8, "gpio.3", NULL),
+ CLK_LOOKUP(p3_pclk8, "gpio.4", NULL),
+ CLK_LOOKUP(p3_pclk8, "gpio.5", NULL),
+ CLK_LOOKUP(p3_pclk8, "gpioblock2", NULL),
+ CLK_LOOKUP(p3_ssp0_clk, "ssp0", NULL),
+ CLK_LOOKUP(p3_ssp1_clk, "ssp1", NULL),
+
+ /* PERIPH 5 */
+ CLK_LOOKUP(p5_pclk1, "gpio.8", NULL),
+ CLK_LOOKUP(p5_pclk1, "gpioblock3", NULL),
+ CLK_LOOKUP(p5_pclk0, "musb-ux500.0", "usb"),
+
+ /* PERIPH 6 */
+ CLK_LOOKUP(p6_pclk1, "cryp0", NULL),
+ CLK_LOOKUP(p6_pclk2, "hash0", NULL),
+ CLK_LOOKUP(p6_pclk3, "pka", NULL),
+ CLK_LOOKUP(p6_pclk5, "cfgreg", NULL),
+ CLK_LOOKUP(p6_mtu0_clk, "mtu0", NULL),
+ CLK_LOOKUP(p6_mtu1_clk, "mtu1", NULL),
+ CLK_LOOKUP(p6_pclk4, "hash1", NULL),
+ CLK_LOOKUP(p6_pclk1, "cryp1", NULL),
+ CLK_LOOKUP(p6_rng_clk, "rng", NULL),
+
+};
+
+static struct clk_lookup u8500_v2_sysclks[] = {
+ CLK_LOOKUP(sysclk2, NULL, "sysclk2"),
+ CLK_LOOKUP(sysclk3, NULL, "sysclk3"),
+ CLK_LOOKUP(sysclk4, NULL, "sysclk4"),
+};
+
+static void sysclk_init_disable(struct work_struct *not_used)
+{
+ int i;
+
+ mutex_lock(&sysclk_mutex);
+
+ /* Enable SWAT */
+ if (ab8500_sysctrl_set(AB8500_SWATCTRL, AB8500_SWATCTRL_SWATENABLE))
+ goto err_swat;
+
+ for (i = 0; i < ARRAY_SIZE(u8500_v2_sysclks); i++) {
+ struct clk *clk = u8500_v2_sysclks[i].clk;
+
+ /* Disable sysclks */
+ if (!clk->enabled && clk->cg_sel) {
+ if (ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
+ (u8)clk->cg_sel))
+ goto err_sysclk;
+ }
+ }
+ goto unlock_and_exit;
+
+err_sysclk:
+ pr_err("clock: Disable %s failed", u8500_v2_sysclks[i].clk->name);
+ ab8500_sysctrl_clear(AB8500_SWATCTRL, AB8500_SWATCTRL_SWATENABLE);
+ goto unlock_and_exit;
+
+err_swat:
+ pr_err("clock: Enable SWAT failed");
+
+unlock_and_exit:
+ mutex_unlock(&sysclk_mutex);
+}
+
+static struct clk *db8500_dbg_clks[] __initdata = {
+ /* Clock sources */
+ &soc0_pll,
+ &soc1_pll,
+ &ddr_pll,
+ &ulp38m4,
+ &sysclk,
+ &rtc32k,
+ /* PRCMU clocks */
+ &sgaclk,
+ &uartclk,
+ &msp02clk,
+ &msp1clk,
+ &i2cclk,
+ &sdmmcclk,
+ &slimclk,
+ &per1clk,
+ &per2clk,
+ &per3clk,
+ &per5clk,
+ &per6clk,
+ &per7clk,
+ &lcdclk,
+ &bmlclk,
+ &hsitxclk,
+ &hsirxclk,
+ &hdmiclk,
+ &apeatclk,
+ &apetraceclk,
+ &mcdeclk,
+ &ipi2cclk,
+ &dsialtclk,
+ &dmaclk,
+ &b2r2clk,
+ &tvclk,
+ &sspclk,
+ &rngclk,
+ &uiccclk,
+ &sysclk2,
+ &clkout0,
+ &clkout1,
+ &p1_pclk0,
+ &p1_pclk1,
+ &p1_pclk2,
+ &p1_pclk3,
+ &p1_pclk4,
+ &p1_pclk5,
+ &p1_pclk6,
+ &p1_pclk7,
+ &p1_pclk8,
+ &p1_pclk9,
+ &p1_pclk10,
+ &p1_pclk11,
+ &p2_pclk0,
+ &p2_pclk1,
+ &p2_pclk2,
+ &p2_pclk3,
+ &p2_pclk4,
+ &p2_pclk5,
+ &p2_pclk6,
+ &p2_pclk7,
+ &p2_pclk8,
+ &p2_pclk9,
+ &p2_pclk10,
+ &p2_pclk11,
+ &p3_pclk0,
+ &p3_pclk1,
+ &p3_pclk2,
+ &p3_pclk3,
+ &p3_pclk4,
+ &p3_pclk5,
+ &p3_pclk6,
+ &p3_pclk7,
+ &p3_pclk8,
+ &p5_pclk0,
+ &p5_pclk1,
+ &p6_pclk0,
+ &p6_pclk1,
+ &p6_pclk2,
+ &p6_pclk3,
+ &p6_pclk4,
+ &p6_pclk5,
+ &p6_pclk6,
+ &p6_pclk7,
+};
+
+/* List of clocks which might be enabled from the bootloader */
+static struct clk *loader_enabled_clk[] __initdata = {
+ &p1_uart0_clk, /* uart0 */
+ &p1_uart1_clk, /* uart1 */
+ &p3_uart2_clk, /* uart2 */
+ &p1_pclk9, /* gpioblock0 */
+ &p2_pclk11, /* gpioblock1 */
+ &p3_pclk8, /* gpioblock2 */
+ &p5_pclk1, /* gpioblock3 */
+ &p6_mtu0_clk, /* mtu0 */
+ &p6_mtu1_clk, /* mtu1 */
+ &p3_ssp0_clk, /* ssp0 */
+ &p3_ssp1_clk, /* ssp1 */
+ &p2_pclk8, /* spi0 */
+ &p2_pclk2, /* spi1 */
+ &p2_pclk1, /* spi2 */
+ &p1_pclk7, /* spi3 */
+ &p1_msp0_clk, /* msp0 */
+ &p2_msp2_clk, /* msp2 */
+ &p3_i2c0_clk, /* nmk-i2c.0 */
+ &p1_i2c1_clk, /* nmk-i2c.1 */
+ &p1_i2c2_clk, /* nmk-i2c.2 */
+ &p2_i2c3_clk, /* nmk-i2c.3 */
+ &p1_i2c4_clk, /* nmk-i2c.4 */
+ &bmlclk, /* bml */
+ &dsialtclk, /* dsialt */
+ &hsirxclk, /* hsirx */
+ &hsitxclk, /* hsitx */
+ &ipi2cclk, /* ipi2 */
+ &lcdclk, /* mcde */
+ &per7clk, /* PERIPH7 */
+ &b2r2clk, /* b2r2_bus */
+};
+
+static int __init init_clock_states(void)
+{
+ unsigned int i = 0;
+
+ /*
+ * Disable peripheral clocks enabled by bootloadr/defualt
+ * but without drivers
+ */
+ for (i = 0; i < ARRAY_SIZE(loader_enabled_clk); i++)
+ if (!clk_enable(loader_enabled_clk[i]))
+ clk_disable(loader_enabled_clk[i]);
+
+ /*
+ * The following clks are shared with secure world.
+ * Currently this leads to a limitation where we need to
+ * enable them at all times.
+ */
+ clk_enable(&p6_pclk1);
+ clk_enable(&p6_pclk2);
+ clk_enable(&p6_pclk3);
+ clk_enable(&p6_rng_clk);
+
+ /*
+ * APEATCLK and APETRACECLK are enabled at boot and needed
+ * in order to debug with Lauterbach
+ */
+ if (!clk_enable(&apeatclk)) {
+ if (!ux500_jtag_enabled())
+ clk_disable(&apeatclk);
+ }
+ if (!clk_enable(&apetraceclk)) {
+ if (!ux500_jtag_enabled())
+ clk_disable(&apetraceclk);
+ }
+
+ INIT_DELAYED_WORK(&sysclk_disable_work, sysclk_init_disable);
+ schedule_delayed_work(&sysclk_disable_work, 10 * HZ);
+
+ return 0;
+}
+late_initcall(init_clock_states);
+
+int __init db8500_clk_init(void)
+{
+
+ clkdev_add_table(u8500_v2_sysclks,
+ ARRAY_SIZE(u8500_v2_sysclks));
+ clkdev_add_table(u8500_clocks,
+ ARRAY_SIZE(u8500_clocks));
+
+ return 0;
+}
+
+int __init db8500_clk_debug_init(void)
+{
+ return dbx500_clk_debug_init(db8500_dbg_clks,
+ ARRAY_SIZE(db8500_dbg_clks));
+}
diff --git a/arch/arm/mach-ux500/clock-debug.c b/arch/arm/mach-ux500/clock-debug.c
new file mode 100644
index 00000000000..1ebc69fe061
--- /dev/null
+++ b/arch/arm/mach-ux500/clock-debug.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <mach/hardware.h>
+
+#include "clock.h"
+
+struct clk_debug_info {
+ struct clk *clk;
+ struct dentry *dir;
+ struct dentry *enable;
+ struct dentry *requests;
+ int enabled;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *clk_dir;
+static struct dentry *clk_show;
+static struct dentry *clk_show_enabled_only;
+
+static struct clk_debug_info *cdi;
+static int num_clks;
+
+static int clk_show_print(struct seq_file *s, void *p)
+{
+ int i;
+ int enabled_only = (int)s->private;
+
+ seq_printf(s, "\n%-20s %10s %s\n", "name", "rate",
+ "enabled (kernel + debug)");
+ for (i = 0; i < num_clks; i++) {
+ if (enabled_only && !cdi[i].clk->enabled)
+ continue;
+ seq_printf(s,
+ "%-20s %10lu %5d + %d\n",
+ cdi[i].clk->name,
+ clk_get_rate(cdi[i].clk),
+ cdi[i].clk->enabled - cdi[i].enabled,
+ cdi[i].enabled);
+ }
+
+ return 0;
+}
+
+static int clk_show_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_show_print, inode->i_private);
+}
+
+static int clk_enable_print(struct seq_file *s, void *p)
+{
+ struct clk_debug_info *cdi = s->private;
+
+ return seq_printf(s, "%d\n", cdi->enabled);
+}
+
+static int clk_enable_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_enable_print, inode->i_private);
+}
+
+static ssize_t clk_enable_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct clk_debug_info *cdi;
+ long user_val;
+ int err;
+
+ cdi = ((struct seq_file *)(file->private_data))->private;
+
+ err = kstrtol_from_user(user_buf, count, 0, &user_val);
+
+ if (err)
+ return err;
+
+ if ((user_val > 0) && (!cdi->enabled)) {
+ err = clk_enable(cdi->clk);
+ if (err) {
+ pr_err("clock: clk_enable(%s) failed.\n",
+ cdi->clk->name);
+ return -EFAULT;
+ }
+ cdi->enabled = 1;
+ } else if ((user_val <= 0) && (cdi->enabled)) {
+ clk_disable(cdi->clk);
+ cdi->enabled = 0;
+ }
+ return count;
+}
+
+static int clk_requests_print(struct seq_file *s, void *p)
+{
+ struct clk_debug_info *cdi = s->private;
+
+ return seq_printf(s, "%d\n", cdi->clk->enabled);
+}
+
+static int clk_requests_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_requests_print, inode->i_private);
+}
+
+static const struct file_operations clk_enable_fops = {
+ .open = clk_enable_open,
+ .write = clk_enable_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations clk_requests_fops = {
+ .open = clk_requests_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations clk_show_fops = {
+ .open = clk_show_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int create_clk_dirs(struct clk_debug_info *cdi, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ cdi[i].dir = debugfs_create_dir(cdi[i].clk->name, clk_dir);
+ if (!cdi[i].dir)
+ goto no_dir;
+ }
+
+ for (i = 0; i < size; i++) {
+ cdi[i].enable = debugfs_create_file("enable",
+ (S_IRUGO | S_IWUGO),
+ cdi[i].dir, &cdi[i],
+ &clk_enable_fops);
+ if (!cdi[i].enable)
+ goto no_enable;
+ }
+ for (i = 0; i < size; i++) {
+ cdi[i].requests = debugfs_create_file("requests", S_IRUGO,
+ cdi[i].dir, &cdi[i],
+ &clk_requests_fops);
+ if (!cdi[i].requests)
+ goto no_requests;
+ }
+ return 0;
+
+no_requests:
+ while (i--)
+ debugfs_remove(cdi[i].requests);
+ i = size;
+no_enable:
+ while (i--)
+ debugfs_remove(cdi[i].enable);
+ i = size;
+no_dir:
+ while (i--)
+ debugfs_remove(cdi[i].dir);
+
+ return -ENOMEM;
+}
+
+int __init dbx500_clk_debug_init(struct clk **clks, int num)
+{
+ int i;
+
+ cdi = kcalloc(sizeof(struct clk_debug_info), num, GFP_KERNEL);
+ if (!cdi)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ cdi[i].clk = clks[i];
+
+ num_clks = num;
+
+ clk_dir = debugfs_create_dir("clk", NULL);
+ if (!clk_dir)
+ goto no_dir;
+
+ clk_show = debugfs_create_file("show", S_IRUGO, clk_dir, (void *)0,
+ &clk_show_fops);
+ if (!clk_show)
+ goto no_show;
+
+ clk_show_enabled_only = debugfs_create_file("show-enabled-only",
+ S_IRUGO, clk_dir, (void *)1,
+ &clk_show_fops);
+ if (!clk_show_enabled_only)
+ goto no_enabled_only;
+
+ if (create_clk_dirs(cdi, num))
+ goto no_clks;
+
+ return 0;
+
+no_clks:
+ debugfs_remove(clk_show_enabled_only);
+no_enabled_only:
+ debugfs_remove(clk_show);
+no_show:
+ debugfs_remove(clk_dir);
+no_dir:
+ kfree(cdi);
+ return -ENOMEM;
+}
+
+static int __init clk_debug_init(void)
+{
+ if (cpu_is_u8500())
+ db8500_clk_debug_init();
+ else if (cpu_is_u5500())
+ db5500_clk_debug_init();
+
+ return 0;
+}
+module_init(clk_debug_init);
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index e832664d1bd..108014ea55b 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -7,815 +7,476 @@
* published by the Free Software Foundation.
*/
#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/clkdev.h>
-#include <linux/cpufreq.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/ab8500/sysctrl.h>
+#include <linux/mfd/dbx500-prcmu.h>
-#include <plat/mtu.h>
-#include <mach/hardware.h>
#include "clock.h"
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-#include <linux/uaccess.h> /* for copy_from_user */
-static LIST_HEAD(clk_list);
-#endif
+#define PRCC_PCKEN 0x0
+#define PRCC_PCKDIS 0x4
+#define PRCC_KCKEN 0x8
+#define PRCC_KCKDIS 0xC
+#define PRCC_PCKSR 0x10
+#define PRCC_KCKSR 0x14
-#define PRCC_PCKEN 0x00
-#define PRCC_PCKDIS 0x04
-#define PRCC_KCKEN 0x08
-#define PRCC_KCKDIS 0x0C
-
-#define PRCM_YYCLKEN0_MGT_SET 0x510
-#define PRCM_YYCLKEN1_MGT_SET 0x514
-#define PRCM_YYCLKEN0_MGT_CLR 0x518
-#define PRCM_YYCLKEN1_MGT_CLR 0x51C
-#define PRCM_YYCLKEN0_MGT_VAL 0x520
-#define PRCM_YYCLKEN1_MGT_VAL 0x524
-
-#define PRCM_SVAMMDSPCLK_MGT 0x008
-#define PRCM_SIAMMDSPCLK_MGT 0x00C
-#define PRCM_SGACLK_MGT 0x014
-#define PRCM_UARTCLK_MGT 0x018
-#define PRCM_MSP02CLK_MGT 0x01C
-#define PRCM_MSP1CLK_MGT 0x288
-#define PRCM_I2CCLK_MGT 0x020
-#define PRCM_SDMMCCLK_MGT 0x024
-#define PRCM_SLIMCLK_MGT 0x028
-#define PRCM_PER1CLK_MGT 0x02C
-#define PRCM_PER2CLK_MGT 0x030
-#define PRCM_PER3CLK_MGT 0x034
-#define PRCM_PER5CLK_MGT 0x038
-#define PRCM_PER6CLK_MGT 0x03C
-#define PRCM_PER7CLK_MGT 0x040
-#define PRCM_LCDCLK_MGT 0x044
-#define PRCM_BMLCLK_MGT 0x04C
-#define PRCM_HSITXCLK_MGT 0x050
-#define PRCM_HSIRXCLK_MGT 0x054
-#define PRCM_HDMICLK_MGT 0x058
-#define PRCM_APEATCLK_MGT 0x05C
-#define PRCM_APETRACECLK_MGT 0x060
-#define PRCM_MCDECLK_MGT 0x064
-#define PRCM_IPI2CCLK_MGT 0x068
-#define PRCM_DSIALTCLK_MGT 0x06C
-#define PRCM_DMACLK_MGT 0x074
-#define PRCM_B2R2CLK_MGT 0x078
-#define PRCM_TVCLK_MGT 0x07C
-#define PRCM_TCR 0x1C8
-#define PRCM_TCR_STOPPED (1 << 16)
-#define PRCM_TCR_DOZE_MODE (1 << 17)
-#define PRCM_UNIPROCLK_MGT 0x278
-#define PRCM_SSPCLK_MGT 0x280
-#define PRCM_RNGCLK_MGT 0x284
-#define PRCM_UICCCLK_MGT 0x27C
-
-#define PRCM_MGT_ENABLE (1 << 8)
-
-static DEFINE_SPINLOCK(clocks_lock);
-
-static void __clk_enable(struct clk *clk)
-{
- if (clk->enabled++ == 0) {
- if (clk->parent_cluster)
- __clk_enable(clk->parent_cluster);
+DEFINE_MUTEX(clk_opp100_mutex);
+static DEFINE_SPINLOCK(clk_spin_lock);
+#define NO_LOCK &clk_spin_lock
- if (clk->parent_periph)
- __clk_enable(clk->parent_periph);
+static void __iomem *prcmu_base;
- if (clk->ops && clk->ops->enable)
- clk->ops->enable(clk);
+static void __clk_lock(struct clk *clk, void *last_lock, unsigned long *flags)
+{
+ if (clk->mutex != last_lock) {
+ if (clk->mutex == NULL)
+ spin_lock_irqsave(&clk_spin_lock, *flags);
+ else
+ mutex_lock(clk->mutex);
}
}
-int clk_enable(struct clk *clk)
+static void __clk_unlock(struct clk *clk, void *last_lock, unsigned long flags)
+{
+ if (clk->mutex != last_lock) {
+ if (clk->mutex == NULL)
+ spin_unlock_irqrestore(&clk_spin_lock, flags);
+ else
+ mutex_unlock(clk->mutex);
+ }
+}
+
+void __clk_disable(struct clk *clk, void *current_lock)
{
unsigned long flags;
- spin_lock_irqsave(&clocks_lock, flags);
- __clk_enable(clk);
- spin_unlock_irqrestore(&clocks_lock, flags);
+ if (clk == NULL)
+ return;
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
+ __clk_lock(clk, current_lock, &flags);
-static void __clk_disable(struct clk *clk)
-{
- if (--clk->enabled == 0) {
- if (clk->ops && clk->ops->disable)
+ if (clk->enabled && (--clk->enabled == 0)) {
+ if ((clk->ops != NULL) && (clk->ops->disable != NULL))
clk->ops->disable(clk);
+ __clk_disable(clk->parent, clk->mutex);
+ __clk_disable(clk->bus_parent, clk->mutex);
+ }
- if (clk->parent_periph)
- __clk_disable(clk->parent_periph);
+ __clk_unlock(clk, current_lock, flags);
- if (clk->parent_cluster)
- __clk_disable(clk->parent_cluster);
- }
+ return;
}
-void clk_disable(struct clk *clk)
+int __clk_enable(struct clk *clk, void *current_lock)
{
+ int err;
unsigned long flags;
- WARN_ON(!clk->enabled);
+ if (clk == NULL)
+ return 0;
- spin_lock_irqsave(&clocks_lock, flags);
- __clk_disable(clk);
- spin_unlock_irqrestore(&clocks_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
+ __clk_lock(clk, current_lock, &flags);
-/*
- * The MTU has a separate, rather complex muxing setup
- * with alternative parents (peripheral cluster or
- * ULP or fixed 32768 Hz) depending on settings
- */
-static unsigned long clk_mtu_get_rate(struct clk *clk)
-{
- void __iomem *addr;
- u32 tcr;
- int mtu = (int) clk->data;
- /*
- * One of these is selected eventually
- * TODO: Replace the constant with a reference
- * to the ULP source once this is modeled.
- */
- unsigned long clk32k = 32768;
- unsigned long mturate;
- unsigned long retclk;
-
- if (cpu_is_u5500())
- addr = __io_address(U5500_PRCMU_BASE);
- else if (cpu_is_u8500())
- addr = __io_address(U8500_PRCMU_BASE);
- else
- ux500_unknown_soc();
+ if (!clk->enabled) {
+ err = __clk_enable(clk->bus_parent, clk->mutex);
+ if (unlikely(err))
+ goto bus_parent_error;
- /*
- * On a startup, always conifgure the TCR to the doze mode;
- * bootloaders do it for us. Do this in the kernel too.
- */
- writel(PRCM_TCR_DOZE_MODE, addr + PRCM_TCR);
+ err = __clk_enable(clk->parent, clk->mutex);
+ if (unlikely(err))
+ goto parent_error;
- tcr = readl(addr + PRCM_TCR);
+ if ((clk->ops != NULL) && (clk->ops->enable != NULL)) {
+ err = clk->ops->enable(clk);
+ if (unlikely(err))
+ goto enable_error;
+ }
+ }
+ clk->enabled++;
- /* Get the rate from the parent as a default */
- if (clk->parent_periph)
- mturate = clk_get_rate(clk->parent_periph);
- else if (clk->parent_cluster)
- mturate = clk_get_rate(clk->parent_cluster);
- else
- /* We need to be connected SOMEWHERE */
- BUG();
+ __clk_unlock(clk, current_lock, flags);
- /* Return the clock selected for this MTU */
- if (tcr & (1 << mtu))
- retclk = clk32k;
- else
- retclk = mturate;
+ return 0;
+
+enable_error:
+ __clk_disable(clk->parent, clk->mutex);
+parent_error:
+ __clk_disable(clk->bus_parent, clk->mutex);
+bus_parent_error:
+
+ __clk_unlock(clk, current_lock, flags);
- pr_info("MTU%d clock rate: %lu Hz\n", mtu, retclk);
- return retclk;
+ return err;
}
-unsigned long clk_get_rate(struct clk *clk)
+unsigned long __clk_get_rate(struct clk *clk, void *current_lock)
{
unsigned long rate;
+ unsigned long flags;
- /*
- * If there is a custom getrate callback for this clock,
- * it will take precedence.
- */
- if (clk->get_rate)
- return clk->get_rate(clk);
-
- if (clk->ops && clk->ops->get_rate)
- return clk->ops->get_rate(clk);
-
- rate = clk->rate;
- if (!rate) {
- if (clk->parent_periph)
- rate = clk_get_rate(clk->parent_periph);
- else if (clk->parent_cluster)
- rate = clk_get_rate(clk->parent_cluster);
- }
+ if (clk == NULL)
+ return 0;
+
+ __clk_lock(clk, current_lock, &flags);
+
+ if ((clk->ops != NULL) && (clk->ops->get_rate != NULL))
+ rate = clk->ops->get_rate(clk);
+ else if (clk->rate)
+ rate = clk->rate;
+ else
+ rate = __clk_get_rate(clk->parent, clk->mutex);
+
+ __clk_unlock(clk, current_lock, flags);
return rate;
}
-EXPORT_SYMBOL(clk_get_rate);
-long clk_round_rate(struct clk *clk, unsigned long rate)
+static long __clk_round_rate(struct clk *clk, unsigned long rate)
{
- /*TODO*/
- return rate;
+ if ((clk->ops != NULL) && (clk->ops->round_rate != NULL))
+ return clk->ops->round_rate(clk, rate);
+
+ return -ENOSYS;
}
-EXPORT_SYMBOL(clk_round_rate);
-int clk_set_rate(struct clk *clk, unsigned long rate)
+static int __clk_set_rate(struct clk *clk, unsigned long rate)
{
- clk->rate = rate;
- return 0;
+ if ((clk->ops != NULL) && (clk->ops->set_rate != NULL))
+ return clk->ops->set_rate(clk, rate);
+
+ return -ENOSYS;
}
-EXPORT_SYMBOL(clk_set_rate);
-static void clk_prcmu_enable(struct clk *clk)
+int clk_enable(struct clk *clk)
{
- void __iomem *cg_set_reg = __io_address(U8500_PRCMU_BASE)
- + PRCM_YYCLKEN0_MGT_SET + clk->prcmu_cg_off;
+ if (clk == NULL)
+ return -EINVAL;
- writel(1 << clk->prcmu_cg_bit, cg_set_reg);
+ return __clk_enable(clk, NO_LOCK);
}
+EXPORT_SYMBOL(clk_enable);
-static void clk_prcmu_disable(struct clk *clk)
+void clk_disable(struct clk *clk)
{
- void __iomem *cg_clr_reg = __io_address(U8500_PRCMU_BASE)
- + PRCM_YYCLKEN0_MGT_CLR + clk->prcmu_cg_off;
- writel(1 << clk->prcmu_cg_bit, cg_clr_reg);
+ if (clk == NULL)
+ return;
+
+ WARN_ON(!clk->enabled);
+ __clk_disable(clk, NO_LOCK);
}
+EXPORT_SYMBOL(clk_disable);
-/* ED doesn't have the combined set/clr registers */
-static void clk_prcmu_ed_enable(struct clk *clk)
+unsigned long clk_get_rate(struct clk *clk)
{
- void __iomem *addr = __io_address(U8500_PRCMU_BASE)
- + clk->prcmu_cg_mgt;
+ if (clk == NULL)
+ return 0;
- writel(readl(addr) | PRCM_MGT_ENABLE, addr);
+ return __clk_get_rate(clk, NO_LOCK);
}
+EXPORT_SYMBOL(clk_get_rate);
-static void clk_prcmu_ed_disable(struct clk *clk)
+long clk_round_rate(struct clk *clk, unsigned long rate)
{
- void __iomem *addr = __io_address(U8500_PRCMU_BASE)
- + clk->prcmu_cg_mgt;
+ long rounded_rate;
+ unsigned long flags;
+
+ if (clk == NULL)
+ return -EINVAL;
- writel(readl(addr) & ~PRCM_MGT_ENABLE, addr);
+ __clk_lock(clk, NO_LOCK, &flags);
+
+ rounded_rate = __clk_round_rate(clk, rate);
+
+ __clk_unlock(clk, NO_LOCK, flags);
+
+ return rounded_rate;
}
+EXPORT_SYMBOL(clk_round_rate);
-static struct clkops clk_prcmu_ops = {
- .enable = clk_prcmu_enable,
- .disable = clk_prcmu_disable,
-};
+long clk_round_rate_rec(struct clk *clk, unsigned long rate)
+{
+ long rounded_rate;
+ unsigned long flags;
-static unsigned int clkrst_base[] = {
- [1] = U8500_CLKRST1_BASE,
- [2] = U8500_CLKRST2_BASE,
- [3] = U8500_CLKRST3_BASE,
- [5] = U8500_CLKRST5_BASE,
- [6] = U8500_CLKRST6_BASE,
- [7] = U8500_CLKRST7_BASE_ED,
-};
+ if ((clk == NULL) || (clk->parent == NULL))
+ return -EINVAL;
+
+ __clk_lock(clk->parent, clk->mutex, &flags);
+
+ rounded_rate = __clk_round_rate(clk->parent, rate);
+
+ __clk_unlock(clk->parent, clk->mutex, flags);
-static void clk_prcc_enable(struct clk *clk)
+ return rounded_rate;
+}
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
{
- void __iomem *addr = __io_address(clkrst_base[clk->cluster]);
+ int err;
+ unsigned long flags;
- if (clk->prcc_kernel != -1)
- writel(1 << clk->prcc_kernel, addr + PRCC_KCKEN);
+ if (clk == NULL)
+ return -EINVAL;
- if (clk->prcc_bus != -1)
- writel(1 << clk->prcc_bus, addr + PRCC_PCKEN);
+ __clk_lock(clk, NO_LOCK, &flags);
+
+ err = __clk_set_rate(clk, rate);
+
+ __clk_unlock(clk, NO_LOCK, flags);
+
+ return err;
}
+EXPORT_SYMBOL(clk_set_rate);
-static void clk_prcc_disable(struct clk *clk)
+int clk_set_rate_rec(struct clk *clk, unsigned long rate)
{
- void __iomem *addr = __io_address(clkrst_base[clk->cluster]);
+ int err;
+ unsigned long flags;
+
+ if ((clk == NULL) || (clk->parent == NULL))
+ return -EINVAL;
+
+ __clk_lock(clk->parent, clk->mutex, &flags);
+
+ err = __clk_set_rate(clk->parent, rate);
- if (clk->prcc_bus != -1)
- writel(1 << clk->prcc_bus, addr + PRCC_PCKDIS);
+ __clk_unlock(clk->parent, clk->mutex, flags);
- if (clk->prcc_kernel != -1)
- writel(1 << clk->prcc_kernel, addr + PRCC_KCKDIS);
+ return err;
}
-static struct clkops clk_prcc_ops = {
- .enable = clk_prcc_enable,
- .disable = clk_prcc_disable,
-};
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ int err = 0;
+ unsigned long flags;
+ struct clk **p;
-static struct clk clk_32khz = {
- .name = "clk_32khz",
- .rate = 32000,
-};
+ if ((clk == NULL) || (clk->parents == NULL))
+ return -EINVAL;
+ for (p = clk->parents; *p != parent; p++) {
+ if (*p == NULL) /* invalid parent */
+ return -EINVAL;
+ }
-/*
- * PRCMU level clock gating
- */
+ __clk_lock(clk, NO_LOCK, &flags);
-/* Bank 0 */
-static DEFINE_PRCMU_CLK(svaclk, 0x0, 2, SVAMMDSPCLK);
-static DEFINE_PRCMU_CLK(siaclk, 0x0, 3, SIAMMDSPCLK);
-static DEFINE_PRCMU_CLK(sgaclk, 0x0, 4, SGACLK);
-static DEFINE_PRCMU_CLK_RATE(uartclk, 0x0, 5, UARTCLK, 38400000);
-static DEFINE_PRCMU_CLK(msp02clk, 0x0, 6, MSP02CLK);
-static DEFINE_PRCMU_CLK(msp1clk, 0x0, 7, MSP1CLK); /* v1 */
-static DEFINE_PRCMU_CLK_RATE(i2cclk, 0x0, 8, I2CCLK, 48000000);
-static DEFINE_PRCMU_CLK_RATE(sdmmcclk, 0x0, 9, SDMMCCLK, 100000000);
-static DEFINE_PRCMU_CLK(slimclk, 0x0, 10, SLIMCLK);
-static DEFINE_PRCMU_CLK(per1clk, 0x0, 11, PER1CLK);
-static DEFINE_PRCMU_CLK(per2clk, 0x0, 12, PER2CLK);
-static DEFINE_PRCMU_CLK(per3clk, 0x0, 13, PER3CLK);
-static DEFINE_PRCMU_CLK(per5clk, 0x0, 14, PER5CLK);
-static DEFINE_PRCMU_CLK_RATE(per6clk, 0x0, 15, PER6CLK, 133330000);
-static DEFINE_PRCMU_CLK_RATE(per7clk, 0x0, 16, PER7CLK, 100000000);
-static DEFINE_PRCMU_CLK(lcdclk, 0x0, 17, LCDCLK);
-static DEFINE_PRCMU_CLK(bmlclk, 0x0, 18, BMLCLK);
-static DEFINE_PRCMU_CLK(hsitxclk, 0x0, 19, HSITXCLK);
-static DEFINE_PRCMU_CLK(hsirxclk, 0x0, 20, HSIRXCLK);
-static DEFINE_PRCMU_CLK(hdmiclk, 0x0, 21, HDMICLK);
-static DEFINE_PRCMU_CLK(apeatclk, 0x0, 22, APEATCLK);
-static DEFINE_PRCMU_CLK(apetraceclk, 0x0, 23, APETRACECLK);
-static DEFINE_PRCMU_CLK(mcdeclk, 0x0, 24, MCDECLK);
-static DEFINE_PRCMU_CLK(ipi2clk, 0x0, 25, IPI2CCLK);
-static DEFINE_PRCMU_CLK(dsialtclk, 0x0, 26, DSIALTCLK); /* v1 */
-static DEFINE_PRCMU_CLK(dmaclk, 0x0, 27, DMACLK);
-static DEFINE_PRCMU_CLK(b2r2clk, 0x0, 28, B2R2CLK);
-static DEFINE_PRCMU_CLK(tvclk, 0x0, 29, TVCLK);
-static DEFINE_PRCMU_CLK(uniproclk, 0x0, 30, UNIPROCLK); /* v1 */
-static DEFINE_PRCMU_CLK_RATE(sspclk, 0x0, 31, SSPCLK, 48000000); /* v1 */
-
-/* Bank 1 */
-static DEFINE_PRCMU_CLK(rngclk, 0x4, 0, RNGCLK); /* v1 */
-static DEFINE_PRCMU_CLK(uiccclk, 0x4, 1, UICCCLK); /* v1 */
+ if ((clk->ops != NULL) && (clk->ops->set_parent != NULL)) {
+ err = clk->ops->set_parent(clk, parent);
+ if (err)
+ goto unlock_and_return;
+ } else if (clk->enabled) {
+ err = __clk_enable(parent, clk->mutex);
+ if (err)
+ goto unlock_and_return;
+ __clk_disable(clk->parent, clk->mutex);
+ }
-/*
- * PRCC level clock gating
- * Format: per#, clk, PCKEN bit, KCKEN bit, parent
- */
+ clk->parent = parent;
-/* Peripheral Cluster #1 */
-static DEFINE_PRCC_CLK(1, i2c4, 10, 9, &clk_i2cclk);
-static DEFINE_PRCC_CLK(1, gpio0, 9, -1, NULL);
-static DEFINE_PRCC_CLK(1, slimbus0, 8, 8, &clk_slimclk);
-static DEFINE_PRCC_CLK(1, spi3_ed, 7, 7, NULL);
-static DEFINE_PRCC_CLK(1, spi3_v1, 7, -1, NULL);
-static DEFINE_PRCC_CLK(1, i2c2, 6, 6, &clk_i2cclk);
-static DEFINE_PRCC_CLK(1, sdi0, 5, 5, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(1, msp1_ed, 4, 4, &clk_msp02clk);
-static DEFINE_PRCC_CLK(1, msp1_v1, 4, 4, &clk_msp1clk);
-static DEFINE_PRCC_CLK(1, msp0, 3, 3, &clk_msp02clk);
-static DEFINE_PRCC_CLK(1, i2c1, 2, 2, &clk_i2cclk);
-static DEFINE_PRCC_CLK(1, uart1, 1, 1, &clk_uartclk);
-static DEFINE_PRCC_CLK(1, uart0, 0, 0, &clk_uartclk);
-
-/* Peripheral Cluster #2 */
-
-static DEFINE_PRCC_CLK(2, gpio1_ed, 12, -1, NULL);
-static DEFINE_PRCC_CLK(2, ssitx_ed, 11, -1, NULL);
-static DEFINE_PRCC_CLK(2, ssirx_ed, 10, -1, NULL);
-static DEFINE_PRCC_CLK(2, spi0_ed, 9, -1, NULL);
-static DEFINE_PRCC_CLK(2, sdi3_ed, 8, 6, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, sdi1_ed, 7, 5, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, msp2_ed, 6, 4, &clk_msp02clk);
-static DEFINE_PRCC_CLK(2, sdi4_ed, 4, 2, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, pwl_ed, 3, 1, NULL);
-static DEFINE_PRCC_CLK(2, spi1_ed, 2, -1, NULL);
-static DEFINE_PRCC_CLK(2, spi2_ed, 1, -1, NULL);
-static DEFINE_PRCC_CLK(2, i2c3_ed, 0, 0, &clk_i2cclk);
-
-static DEFINE_PRCC_CLK(2, gpio1_v1, 11, -1, NULL);
-static DEFINE_PRCC_CLK(2, ssitx_v1, 10, 7, NULL);
-static DEFINE_PRCC_CLK(2, ssirx_v1, 9, 6, NULL);
-static DEFINE_PRCC_CLK(2, spi0_v1, 8, -1, NULL);
-static DEFINE_PRCC_CLK(2, sdi3_v1, 7, 5, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, sdi1_v1, 6, 4, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, msp2_v1, 5, 3, &clk_msp02clk);
-static DEFINE_PRCC_CLK(2, sdi4_v1, 4, 2, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, pwl_v1, 3, 1, NULL);
-static DEFINE_PRCC_CLK(2, spi1_v1, 2, -1, NULL);
-static DEFINE_PRCC_CLK(2, spi2_v1, 1, -1, NULL);
-static DEFINE_PRCC_CLK(2, i2c3_v1, 0, 0, &clk_i2cclk);
-
-/* Peripheral Cluster #3 */
-static DEFINE_PRCC_CLK(3, gpio2, 8, -1, NULL);
-static DEFINE_PRCC_CLK(3, sdi5, 7, 7, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(3, uart2, 6, 6, &clk_uartclk);
-static DEFINE_PRCC_CLK(3, ske, 5, 5, &clk_32khz);
-static DEFINE_PRCC_CLK(3, sdi2, 4, 4, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(3, i2c0, 3, 3, &clk_i2cclk);
-static DEFINE_PRCC_CLK(3, ssp1_ed, 2, 2, &clk_i2cclk);
-static DEFINE_PRCC_CLK(3, ssp0_ed, 1, 1, &clk_i2cclk);
-static DEFINE_PRCC_CLK(3, ssp1_v1, 2, 2, &clk_sspclk);
-static DEFINE_PRCC_CLK(3, ssp0_v1, 1, 1, &clk_sspclk);
-static DEFINE_PRCC_CLK(3, fsmc, 0, -1, NULL);
-
-/* Peripheral Cluster #4 is in the always on domain */
-
-/* Peripheral Cluster #5 */
-static DEFINE_PRCC_CLK(5, gpio3, 1, -1, NULL);
-static DEFINE_PRCC_CLK(5, usb_ed, 0, 0, &clk_i2cclk);
-static DEFINE_PRCC_CLK(5, usb_v1, 0, 0, NULL);
-
-/* Peripheral Cluster #6 */
-
-/* MTU ID in data */
-static DEFINE_PRCC_CLK_CUSTOM(6, mtu1_v1, 8, -1, NULL, clk_mtu_get_rate, 1);
-static DEFINE_PRCC_CLK_CUSTOM(6, mtu0_v1, 7, -1, NULL, clk_mtu_get_rate, 0);
-static DEFINE_PRCC_CLK(6, cfgreg_v1, 6, 6, NULL);
-static DEFINE_PRCC_CLK(6, dmc_ed, 6, 6, NULL);
-static DEFINE_PRCC_CLK(6, hash1, 5, -1, NULL);
-static DEFINE_PRCC_CLK(6, unipro_v1, 4, 1, &clk_uniproclk);
-static DEFINE_PRCC_CLK(6, cryp1_ed, 4, -1, NULL);
-static DEFINE_PRCC_CLK(6, pka, 3, -1, NULL);
-static DEFINE_PRCC_CLK(6, hash0, 2, -1, NULL);
-static DEFINE_PRCC_CLK(6, cryp0, 1, -1, NULL);
-static DEFINE_PRCC_CLK(6, rng_ed, 0, 0, &clk_i2cclk);
-static DEFINE_PRCC_CLK(6, rng_v1, 0, 0, &clk_rngclk);
-
-/* Peripheral Cluster #7 */
-
-static DEFINE_PRCC_CLK(7, tzpc0_ed, 4, -1, NULL);
-/* MTU ID in data */
-static DEFINE_PRCC_CLK_CUSTOM(7, mtu1_ed, 3, -1, NULL, clk_mtu_get_rate, 1);
-static DEFINE_PRCC_CLK_CUSTOM(7, mtu0_ed, 2, -1, NULL, clk_mtu_get_rate, 0);
-static DEFINE_PRCC_CLK(7, wdg_ed, 1, -1, NULL);
-static DEFINE_PRCC_CLK(7, cfgreg_ed, 0, -1, NULL);
-
-static struct clk clk_dummy_apb_pclk = {
- .name = "apb_pclk",
-};
+unlock_and_return:
+ __clk_unlock(clk, NO_LOCK, flags);
-static struct clk_lookup u8500_common_clks[] = {
- CLK(dummy_apb_pclk, NULL, "apb_pclk"),
-
- /* Peripheral Cluster #1 */
- CLK(gpio0, "gpio.0", NULL),
- CLK(gpio0, "gpio.1", NULL),
- CLK(slimbus0, "slimbus0", NULL),
- CLK(i2c2, "nmk-i2c.2", NULL),
- CLK(sdi0, "sdi0", NULL),
- CLK(msp0, "msp0", NULL),
- CLK(i2c1, "nmk-i2c.1", NULL),
- CLK(uart1, "uart1", NULL),
- CLK(uart0, "uart0", NULL),
-
- /* Peripheral Cluster #3 */
- CLK(gpio2, "gpio.2", NULL),
- CLK(gpio2, "gpio.3", NULL),
- CLK(gpio2, "gpio.4", NULL),
- CLK(gpio2, "gpio.5", NULL),
- CLK(sdi5, "sdi5", NULL),
- CLK(uart2, "uart2", NULL),
- CLK(ske, "ske", NULL),
- CLK(ske, "nmk-ske-keypad", NULL),
- CLK(sdi2, "sdi2", NULL),
- CLK(i2c0, "nmk-i2c.0", NULL),
- CLK(fsmc, "fsmc", NULL),
-
- /* Peripheral Cluster #5 */
- CLK(gpio3, "gpio.8", NULL),
-
- /* Peripheral Cluster #6 */
- CLK(hash1, "hash1", NULL),
- CLK(pka, "pka", NULL),
- CLK(hash0, "hash0", NULL),
- CLK(cryp0, "cryp0", NULL),
-
- /* PRCMU level clock gating */
-
- /* Bank 0 */
- CLK(svaclk, "sva", NULL),
- CLK(siaclk, "sia", NULL),
- CLK(sgaclk, "sga", NULL),
- CLK(slimclk, "slim", NULL),
- CLK(lcdclk, "lcd", NULL),
- CLK(bmlclk, "bml", NULL),
- CLK(hsitxclk, "stm-hsi.0", NULL),
- CLK(hsirxclk, "stm-hsi.1", NULL),
- CLK(hdmiclk, "hdmi", NULL),
- CLK(apeatclk, "apeat", NULL),
- CLK(apetraceclk, "apetrace", NULL),
- CLK(mcdeclk, "mcde", NULL),
- CLK(ipi2clk, "ipi2", NULL),
- CLK(dmaclk, "dma40.0", NULL),
- CLK(b2r2clk, "b2r2", NULL),
- CLK(tvclk, "tv", NULL),
-};
+ return err;
+}
-static struct clk_lookup u8500_ed_clks[] = {
- /* Peripheral Cluster #1 */
- CLK(spi3_ed, "spi3", NULL),
- CLK(msp1_ed, "msp1", NULL),
-
- /* Peripheral Cluster #2 */
- CLK(gpio1_ed, "gpio.6", NULL),
- CLK(gpio1_ed, "gpio.7", NULL),
- CLK(ssitx_ed, "ssitx", NULL),
- CLK(ssirx_ed, "ssirx", NULL),
- CLK(spi0_ed, "spi0", NULL),
- CLK(sdi3_ed, "sdi3", NULL),
- CLK(sdi1_ed, "sdi1", NULL),
- CLK(msp2_ed, "msp2", NULL),
- CLK(sdi4_ed, "sdi4", NULL),
- CLK(pwl_ed, "pwl", NULL),
- CLK(spi1_ed, "spi1", NULL),
- CLK(spi2_ed, "spi2", NULL),
- CLK(i2c3_ed, "nmk-i2c.3", NULL),
-
- /* Peripheral Cluster #3 */
- CLK(ssp1_ed, "ssp1", NULL),
- CLK(ssp0_ed, "ssp0", NULL),
-
- /* Peripheral Cluster #5 */
- CLK(usb_ed, "musb-ux500.0", "usb"),
-
- /* Peripheral Cluster #6 */
- CLK(dmc_ed, "dmc", NULL),
- CLK(cryp1_ed, "cryp1", NULL),
- CLK(rng_ed, "rng", NULL),
-
- /* Peripheral Cluster #7 */
- CLK(tzpc0_ed, "tzpc0", NULL),
- CLK(mtu1_ed, "mtu1", NULL),
- CLK(mtu0_ed, "mtu0", NULL),
- CLK(wdg_ed, "wdg", NULL),
- CLK(cfgreg_ed, "cfgreg", NULL),
-};
+/* PRCMU clock operations. */
-static struct clk_lookup u8500_v1_clks[] = {
- /* Peripheral Cluster #1 */
- CLK(i2c4, "nmk-i2c.4", NULL),
- CLK(spi3_v1, "spi3", NULL),
- CLK(msp1_v1, "msp1", NULL),
-
- /* Peripheral Cluster #2 */
- CLK(gpio1_v1, "gpio.6", NULL),
- CLK(gpio1_v1, "gpio.7", NULL),
- CLK(ssitx_v1, "ssitx", NULL),
- CLK(ssirx_v1, "ssirx", NULL),
- CLK(spi0_v1, "spi0", NULL),
- CLK(sdi3_v1, "sdi3", NULL),
- CLK(sdi1_v1, "sdi1", NULL),
- CLK(msp2_v1, "msp2", NULL),
- CLK(sdi4_v1, "sdi4", NULL),
- CLK(pwl_v1, "pwl", NULL),
- CLK(spi1_v1, "spi1", NULL),
- CLK(spi2_v1, "spi2", NULL),
- CLK(i2c3_v1, "nmk-i2c.3", NULL),
-
- /* Peripheral Cluster #3 */
- CLK(ssp1_v1, "ssp1", NULL),
- CLK(ssp0_v1, "ssp0", NULL),
-
- /* Peripheral Cluster #5 */
- CLK(usb_v1, "musb-ux500.0", "usb"),
-
- /* Peripheral Cluster #6 */
- CLK(mtu1_v1, "mtu1", NULL),
- CLK(mtu0_v1, "mtu0", NULL),
- CLK(cfgreg_v1, "cfgreg", NULL),
- CLK(hash1, "hash1", NULL),
- CLK(unipro_v1, "unipro", NULL),
- CLK(rng_v1, "rng", NULL),
-
- /* PRCMU level clock gating */
-
- /* Bank 0 */
- CLK(uniproclk, "uniproclk", NULL),
- CLK(dsialtclk, "dsialt", NULL),
-
- /* Bank 1 */
- CLK(rngclk, "rng", NULL),
- CLK(uiccclk, "uicc", NULL),
-};
+static int prcmu_clk_enable(struct clk *clk)
+{
+ return prcmu_request_clock(clk->cg_sel, true);
+}
-#ifdef CONFIG_DEBUG_FS
-/*
- * debugfs support to trace clock tree hierarchy and attributes with
- * powerdebug
- */
-static struct dentry *clk_debugfs_root;
+static void prcmu_clk_disable(struct clk *clk)
+{
+ if (prcmu_request_clock(clk->cg_sel, false)) {
+ pr_err("clock: %s failed to disable %s.\n", __func__,
+ clk->name);
+ }
+}
-void __init clk_debugfs_add_table(struct clk_lookup *cl, size_t num)
+static int request_ape_opp100(bool enable)
{
- while (num--) {
- /* Check that the clock has not been already registered */
- if (!(cl->clk->list.prev != cl->clk->list.next))
- list_add_tail(&cl->clk->list, &clk_list);
+ static unsigned int requests;
+
+ if (enable) {
+ if (0 == requests++) {
+ return prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
+ "clock", 100);
+ }
+ } else if (1 == requests--) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "clock");
+ }
+ return 0;
+}
- cl++;
+static int prcmu_opp100_clk_enable(struct clk *clk)
+{
+ int r;
+
+ r = request_ape_opp100(true);
+ if (r) {
+ pr_err("clock: %s failed to request APE OPP 100%% for %s.\n",
+ __func__, clk->name);
+ return r;
}
+ return prcmu_request_clock(clk->cg_sel, true);
}
-static ssize_t usecount_dbg_read(struct file *file, char __user *buf,
- size_t size, loff_t *off)
+static void prcmu_opp100_clk_disable(struct clk *clk)
{
- struct clk *clk = file->f_dentry->d_inode->i_private;
- char cusecount[128];
- unsigned int len;
+ if (prcmu_request_clock(clk->cg_sel, false))
+ goto out_error;
+ if (request_ape_opp100(false))
+ goto out_error;
+ return;
+
+out_error:
+ pr_err("clock: %s failed to disable %s.\n", __func__, clk->name);
+}
- len = sprintf(cusecount, "%u\n", clk->enabled);
- return simple_read_from_buffer(buf, size, off, cusecount, len);
+static unsigned long prcmu_clk_get_rate(struct clk *clk)
+{
+ return prcmu_clock_rate(clk->cg_sel);
}
-static ssize_t rate_dbg_read(struct file *file, char __user *buf,
- size_t size, loff_t *off)
+static long prcmu_clk_round_rate(struct clk *clk, unsigned long rate)
{
- struct clk *clk = file->f_dentry->d_inode->i_private;
- char crate[128];
- unsigned int rate;
- unsigned int len;
-
- rate = clk_get_rate(clk);
- len = sprintf(crate, "%u\n", rate);
- return simple_read_from_buffer(buf, size, off, crate, len);
+ return prcmu_round_clock_rate(clk->cg_sel, rate);
}
-static const struct file_operations usecount_fops = {
- .read = usecount_dbg_read,
+static int prcmu_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ return prcmu_set_clock_rate(clk->cg_sel, rate);
+}
+
+struct clkops prcmu_clk_ops = {
+ .enable = prcmu_clk_enable,
+ .disable = prcmu_clk_disable,
+ .get_rate = prcmu_clk_get_rate,
};
-static const struct file_operations set_rate_fops = {
- .read = rate_dbg_read,
+struct clkops prcmu_scalable_clk_ops = {
+ .enable = prcmu_clk_enable,
+ .disable = prcmu_clk_disable,
+ .get_rate = prcmu_clk_get_rate,
+ .round_rate = prcmu_clk_round_rate,
+ .set_rate = prcmu_clk_set_rate,
};
-static struct dentry *clk_debugfs_register_dir(struct clk *c,
- struct dentry *p_dentry)
-{
- struct dentry *d, *clk_d;
- const char *p = c->name;
-
- if (!p)
- p = "BUG";
-
- clk_d = debugfs_create_dir(p, p_dentry);
- if (!clk_d)
- return NULL;
-
- d = debugfs_create_file("usecount", S_IRUGO,
- clk_d, c, &usecount_fops);
- if (!d)
- goto err_out;
- d = debugfs_create_file("rate", S_IRUGO,
- clk_d, c, &set_rate_fops);
- if (!d)
- goto err_out;
- /*
- * TODO : not currently available in ux500
- * d = debugfs_create_x32("flags", S_IRUGO, clk_d, (u32 *)&c->flags);
- * if (!d)
- * goto err_out;
- */
-
- return clk_d;
-
-err_out:
- debugfs_remove_recursive(clk_d);
- return NULL;
-}
+struct clkops prcmu_opp100_clk_ops = {
+ .enable = prcmu_opp100_clk_enable,
+ .disable = prcmu_opp100_clk_disable,
+ .get_rate = prcmu_clk_get_rate,
+};
+
+/* PRCC clock operations. */
-static int clk_debugfs_register_one(struct clk *c)
+static int prcc_pclk_enable(struct clk *clk)
{
- struct clk *pa = c->parent_periph;
- struct clk *bpa = c->parent_cluster;
-
- if (!(bpa && !pa)) {
- c->dent = clk_debugfs_register_dir(c,
- pa ? pa->dent : clk_debugfs_root);
- if (!c->dent)
- return -ENOMEM;
- }
+ void __iomem *io_base = __io_address(clk->io_base);
- if (bpa) {
- c->dent_bus = clk_debugfs_register_dir(c,
- bpa->dent_bus ? bpa->dent_bus : bpa->dent);
- if ((!c->dent_bus) && (c->dent)) {
- debugfs_remove_recursive(c->dent);
- c->dent = NULL;
- return -ENOMEM;
- }
- }
+ writel(clk->cg_sel, (io_base + PRCC_PCKEN));
+ while (!(readl(io_base + PRCC_PCKSR) & clk->cg_sel))
+ cpu_relax();
return 0;
}
-static int clk_debugfs_register(struct clk *c)
+static void prcc_pclk_disable(struct clk *clk)
{
- int err;
- struct clk *pa = c->parent_periph;
- struct clk *bpa = c->parent_cluster;
-
- if (pa && (!pa->dent && !pa->dent_bus)) {
- err = clk_debugfs_register(pa);
- if (err)
- return err;
- }
+ void __iomem *io_base = __io_address(clk->io_base);
- if (bpa && (!bpa->dent && !bpa->dent_bus)) {
- err = clk_debugfs_register(bpa);
- if (err)
- return err;
- }
-
- if ((!c->dent) && (!c->dent_bus)) {
- err = clk_debugfs_register_one(c);
- if (err)
- return err;
- }
- return 0;
+ writel(clk->cg_sel, (io_base + PRCC_PCKDIS));
}
-static int __init clk_debugfs_init(void)
+struct clkops prcc_pclk_ops = {
+ .enable = prcc_pclk_enable,
+ .disable = prcc_pclk_disable,
+};
+
+static int prcc_kclk_enable(struct clk *clk)
{
- struct clk *c;
- struct dentry *d;
int err;
+ void __iomem *io_base = __io_address(clk->io_base);
- d = debugfs_create_dir("clock", NULL);
- if (!d)
- return -ENOMEM;
- clk_debugfs_root = d;
+ err = __clk_enable(clk->clock, clk->mutex);
+ if (err)
+ return err;
- list_for_each_entry(c, &clk_list, list) {
- err = clk_debugfs_register(c);
- if (err)
- goto err_out;
- }
- return 0;
-err_out:
- debugfs_remove_recursive(clk_debugfs_root);
- return err;
-}
+ writel(clk->cg_sel, (io_base + PRCC_KCKEN));
+ while (!(readl(io_base + PRCC_KCKSR) & clk->cg_sel))
+ cpu_relax();
-late_initcall(clk_debugfs_init);
-#endif /* defined(CONFIG_DEBUG_FS) */
+ __clk_disable(clk->clock, clk->mutex);
-unsigned long clk_smp_twd_rate = 400000000;
+ return 0;
+}
-unsigned long clk_smp_twd_get_rate(struct clk *clk)
+static void prcc_kclk_disable(struct clk *clk)
{
- return clk_smp_twd_rate;
+ void __iomem *io_base = __io_address(clk->io_base);
+
+ (void)__clk_enable(clk->clock, clk->mutex);
+ writel(clk->cg_sel, (io_base + PRCC_KCKDIS));
+ __clk_disable(clk->clock, clk->mutex);
}
-static struct clk clk_smp_twd = {
- .get_rate = clk_smp_twd_get_rate,
- .name = "smp_twd",
+struct clkops prcc_kclk_ops = {
+ .enable = prcc_kclk_enable,
+ .disable = prcc_kclk_disable,
};
-static struct clk_lookup clk_smp_twd_lookup = {
- .dev_id = "smp_twd",
- .clk = &clk_smp_twd,
+struct clkops prcc_kclk_rec_ops = {
+ .enable = prcc_kclk_enable,
+ .disable = prcc_kclk_disable,
+ .round_rate = clk_round_rate_rec,
+ .set_rate = clk_set_rate_rec,
};
#ifdef CONFIG_CPU_FREQ
+extern unsigned long dbx500_cpufreq_getfreq(void);
-static int clk_twd_cpufreq_transition(struct notifier_block *nb,
- unsigned long state, void *data)
+unsigned long clk_smp_twd_get_rate(struct clk *clk)
{
- struct cpufreq_freqs *f = data;
-
- if (state == CPUFREQ_PRECHANGE) {
- /* Save frequency in simple Hz */
- clk_smp_twd_rate = f->new * 1000;
- }
-
- return NOTIFY_OK;
+ return dbx500_cpufreq_getfreq() / 2;
}
-static struct notifier_block clk_twd_cpufreq_nb = {
- .notifier_call = clk_twd_cpufreq_transition,
+static struct clkops clk_smp_twd_ops = {
+ .get_rate = clk_smp_twd_get_rate,
};
-static int clk_init_smp_twd_cpufreq(void)
-{
- return cpufreq_register_notifier(&clk_twd_cpufreq_nb,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-late_initcall(clk_init_smp_twd_cpufreq);
+static struct clk clk_smp_twd = {
+ .name = "smp_twd",
+ .ops = &clk_smp_twd_ops,
+};
+static struct clk_lookup clk_smp_twd_lookup = {
+ .clk = &clk_smp_twd,
+ .dev_id = "smp_twd",
+};
#endif
int __init clk_init(void)
{
- if (cpu_is_u8500ed()) {
- clk_prcmu_ops.enable = clk_prcmu_ed_enable;
- clk_prcmu_ops.disable = clk_prcmu_ed_disable;
- clk_per6clk.rate = 100000000;
+ if (cpu_is_u8500()) {
+ prcmu_base = __io_address(U8500_PRCMU_BASE);
} else if (cpu_is_u5500()) {
- /* Clock tree for U5500 not implemented yet */
- clk_prcc_ops.enable = clk_prcc_ops.disable = NULL;
- clk_prcmu_ops.enable = clk_prcmu_ops.disable = NULL;
- clk_uartclk.rate = 36360000;
- clk_sdmmcclk.rate = 99900000;
+ prcmu_base = __io_address(U5500_PRCMU_BASE);
+ } else {
+ pr_err("clock: Unknown DB Asic.\n");
+ return -EIO;
}
- clkdev_add_table(u8500_common_clks, ARRAY_SIZE(u8500_common_clks));
- if (cpu_is_u8500ed())
- clkdev_add_table(u8500_ed_clks, ARRAY_SIZE(u8500_ed_clks));
- else
- clkdev_add_table(u8500_v1_clks, ARRAY_SIZE(u8500_v1_clks));
+ if (cpu_is_u8500())
+ db8500_clk_init();
+ else if (cpu_is_u5500())
+ db5500_clk_init();
+#ifdef CONFIG_CPU_FREQ
clkdev_add(&clk_smp_twd_lookup);
-
-#ifdef CONFIG_DEBUG_FS
- clk_debugfs_add_table(u8500_common_clks, ARRAY_SIZE(u8500_common_clks));
- if (cpu_is_u8500ed())
- clk_debugfs_add_table(u8500_ed_clks, ARRAY_SIZE(u8500_ed_clks));
- else
- clk_debugfs_add_table(u8500_v1_clks, ARRAY_SIZE(u8500_v1_clks));
#endif
+
return 0;
}
diff --git a/arch/arm/mach-ux500/clock.h b/arch/arm/mach-ux500/clock.h
index 07449070522..39d8a61d79b 100644
--- a/arch/arm/mach-ux500/clock.h
+++ b/arch/arm/mach-ux500/clock.h
@@ -1,11 +1,55 @@
/*
- * Copyright (C) 2010 ST-Ericsson
+ * Copyright (C) 2010 ST-Ericsson SA
* Copyright (C) 2009 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#ifndef UX500_CLOCK_H
+#define UX500_CLOCK_H
+
+#include <linux/clkdev.h>
+
+/**
+ * struct clk
+ * @ops: The hardware specific operations defined for the clock.
+ * @name: The name of the clock.
+ * @mutex: The mutex to lock when operating on the clock. %NULL means that
+ * the common clock spinlock will be used.
+ * @enabled: A reference counter of the enable requests for the clock.
+ * @opp100: A flag saying whether the clock is requested to run at the
+ * OPP 100%% frequency.
+ * @rate: The frequency of the clock. For scalable and scaling clocks,
+ * this is the OPP 100%% frequency.
+ * @io_base: An IO memory base address, meaningful only when considered
+ * together with the defined @ops.
+ * @cg_sel: Clock gate selector, meaningful only when considered together
+ * with the specified @ops.
+ * @parent: The current (or only) parent clock of the clock.
+ * @bus_parent: The (optional) auxiliary bus clock "parent" of the clock.
+ * @parents: A list of the possible parents the clock can have. This should
+ * be a %NULL-terminated &struct_clk array. Present if and only
+ * if clk_set_parent() is implemented for the clock.
+ * @regulator: The regulator needed to have the clock functional, if any.
+ * @clock: The clock needed to control the clock, if any.
+ */
+struct clk {
+ const struct clkops *ops;
+ const char *name;
+ struct mutex *mutex;
+ unsigned int enabled;
+ bool opp100;
+ unsigned long rate;
+ unsigned int io_base;
+ u32 cg_sel;
+ struct clk *parent;
+ struct clk *bus_parent;
+ struct clk **parents;
+ struct regulator *regulator;
+ struct clk *clock;
+ struct list_head list;
+};
/**
* struct clkops - ux500 clock operations
@@ -18,134 +62,119 @@
* NULL, the rate in the struct clk will be used.
*/
struct clkops {
- void (*enable) (struct clk *);
- void (*disable) (struct clk *);
- unsigned long (*get_rate) (struct clk *);
+ int (*enable)(struct clk *);
+ void (*disable)(struct clk *);
+ unsigned long (*get_rate)(struct clk *);
+ int (*set_rate)(struct clk *, unsigned long);
+ long (*round_rate)(struct clk *, unsigned long);
+ int (*set_parent)(struct clk *, struct clk *);
};
-/**
- * struct clk - ux500 clock structure
- * @ops: pointer to clkops struct used to control this clock
- * @name: name, for debugging
- * @enabled: refcount. positive if enabled, zero if disabled
- * @get_rate: custom callback for getting the clock rate
- * @data: custom per-clock data for example for the get_rate
- * callback
- * @rate: fixed rate for clocks which don't implement
- * ops->getrate
- * @prcmu_cg_off: address offset of the combined enable/disable register
- * (used on u8500v1)
- * @prcmu_cg_bit: bit in the combined enable/disable register (used on
- * u8500v1)
- * @prcmu_cg_mgt: address of the enable/disable register (used on
- * u8500ed)
- * @cluster: peripheral cluster number
- * @prcc_bus: bit for the bus clock in the peripheral's CLKRST
- * @prcc_kernel: bit for the kernel clock in the peripheral's CLKRST.
- * -1 if no kernel clock exists.
- * @parent_cluster: pointer to parent's cluster clk struct
- * @parent_periph: pointer to parent's peripheral clk struct
- *
- * Peripherals are organised into clusters, and each cluster has an associated
- * bus clock. Some peripherals also have a parent peripheral clock.
- *
- * In order to enable a clock for a peripheral, we need to enable:
- * (1) the parent cluster (bus) clock at the PRCMU level
- * (2) the parent peripheral clock (if any) at the PRCMU level
- * (3) the peripheral's bus & kernel clock at the PRCC level
- *
- * (1) and (2) are handled by defining clk structs (DEFINE_PRCMU_CLK) for each
- * of the cluster and peripheral clocks, and hooking these as the parents of
- * the individual peripheral clocks.
- *
- * (3) is handled by specifying the bits in the PRCC control registers required
- * to enable these clocks and modifying them in the ->enable and
- * ->disable callbacks of the peripheral clocks (DEFINE_PRCC_CLK).
- *
- * This structure describes both the PRCMU-level clocks and PRCC-level clocks.
- * The prcmu_* fields are only used for the PRCMU clocks, and the cluster,
- * prcc, and parent pointers are only used for the PRCC-level clocks.
- */
-struct clk {
- const struct clkops *ops;
- const char *name;
- unsigned int enabled;
- unsigned long (*get_rate)(struct clk *);
- void *data;
-
- unsigned long rate;
- struct list_head list;
+extern struct clkops prcmu_clk_ops;
+extern struct clkops prcmu_scalable_clk_ops;
+extern struct clkops prcmu_opp100_clk_ops;
+extern struct mutex clk_opp100_mutex;
+extern struct clkops prcc_pclk_ops;
+extern struct clkops prcc_kclk_ops;
+extern struct clkops prcc_kclk_rec_ops;
+extern struct clkops sga_clk_ops;
- /* These three are only for PRCMU clks */
+#define CLK_LOOKUP(_clk, _dev_id, _con_id) \
+ { .dev_id = _dev_id, .con_id = _con_id, .clk = &_clk }
- unsigned int prcmu_cg_off;
- unsigned int prcmu_cg_bit;
- unsigned int prcmu_cg_mgt;
-
- /* The rest are only for PRCC clks */
-
- int cluster;
- unsigned int prcc_bus;
- unsigned int prcc_kernel;
-
- struct clk *parent_cluster;
- struct clk *parent_periph;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *dent; /* For visible tree hierarchy */
- struct dentry *dent_bus; /* For visible tree hierarchy */
-#endif
-};
+/* Define PRCMU Clock */
+#define DEF_PRCMU_CLK(_name, _cg_sel, _rate) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcmu_clk_ops, \
+ .cg_sel = _cg_sel, \
+ .rate = _rate, \
+ }
-#define DEFINE_PRCMU_CLK(_name, _cg_off, _cg_bit, _reg) \
-struct clk clk_##_name = { \
- .name = #_name, \
- .ops = &clk_prcmu_ops, \
- .prcmu_cg_off = _cg_off, \
- .prcmu_cg_bit = _cg_bit, \
- .prcmu_cg_mgt = PRCM_##_reg##_MGT \
+#define DEF_PRCMU_SCALABLE_CLK(_name, _cg_sel) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcmu_scalable_clk_ops, \
+ .cg_sel = _cg_sel, \
}
-#define DEFINE_PRCMU_CLK_RATE(_name, _cg_off, _cg_bit, _reg, _rate) \
-struct clk clk_##_name = { \
- .name = #_name, \
- .ops = &clk_prcmu_ops, \
- .prcmu_cg_off = _cg_off, \
- .prcmu_cg_bit = _cg_bit, \
- .rate = _rate, \
- .prcmu_cg_mgt = PRCM_##_reg##_MGT \
+/* Use this for clocks that are only defined at OPP 100%. */
+#define DEF_PRCMU_OPP100_CLK(_name, _cg_sel, _rate) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcmu_opp100_clk_ops, \
+ .cg_sel = _cg_sel, \
+ .rate = _rate, \
+ .mutex = &clk_opp100_mutex, \
}
-#define DEFINE_PRCC_CLK(_pclust, _name, _bus_en, _kernel_en, _kernclk) \
-struct clk clk_##_name = { \
- .name = #_name, \
- .ops = &clk_prcc_ops, \
- .cluster = _pclust, \
- .prcc_bus = _bus_en, \
- .prcc_kernel = _kernel_en, \
- .parent_cluster = &clk_per##_pclust##clk, \
- .parent_periph = _kernclk \
+/* Define PRCC clock */
+#define DEF_PRCC_PCLK(_name, _io_base, _cg_bit, _parent) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcc_pclk_ops, \
+ .io_base = _io_base, \
+ .cg_sel = BIT(_cg_bit), \
+ .parent = _parent, \
}
-#define DEFINE_PRCC_CLK_CUSTOM(_pclust, _name, _bus_en, _kernel_en, _kernclk, _callback, _data) \
-struct clk clk_##_name = { \
- .name = #_name, \
- .ops = &clk_prcc_ops, \
- .cluster = _pclust, \
- .prcc_bus = _bus_en, \
- .prcc_kernel = _kernel_en, \
- .parent_cluster = &clk_per##_pclust##clk, \
- .parent_periph = _kernclk, \
- .get_rate = _callback, \
- .data = (void *) _data \
+#define DEF_PRCC_KCLK(_name, _io_base, _cg_bit, _parent, _clock) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcc_kclk_ops, \
+ .io_base = _io_base, \
+ .cg_sel = BIT(_cg_bit), \
+ .parent = _parent, \
+ .clock = _clock, \
}
+#define DEF_PER_CLK(_name, _bus_parent, _parent) \
+ struct clk _name = { \
+ .name = #_name, \
+ .parent = _parent, \
+ .bus_parent = _bus_parent, \
+ }
-#define CLK(_clk, _devname, _conname) \
- { \
- .clk = &clk_##_clk, \
- .dev_id = _devname, \
- .con_id = _conname, \
+#define DEF_MTU_CLK(_cg_sel, _name, _bus_parent) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &mtu_clk_ops, \
+ .cg_sel = _cg_sel, \
+ .bus_parent = _bus_parent, \
}
-int __init clk_db8500_ed_fixup(void);
+/* Functions defined in clock.c */
int __init clk_init(void);
+void clks_register(struct clk_lookup *clks, size_t num);
+int __clk_enable(struct clk *clk, void *current_lock);
+void __clk_disable(struct clk *clk, void *current_lock);
+unsigned long __clk_get_rate(struct clk *clk, void *current_lock);
+long clk_round_rate_rec(struct clk *clk, unsigned long rate);
+int clk_set_rate_rec(struct clk *clk, unsigned long rate);
+
+#ifdef CONFIG_DEBUG_FS
+int dbx500_clk_debug_init(struct clk **clks, int num);
+#else
+static inline int dbx500_clk_debug_init(struct clk **clks, int num)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_UX500_SOC_DB8500
+int __init db8500_clk_init(void);
+int __init db8500_clk_debug_init(void);
+#else
+static inline int db8500_clk_init(void) { return 0; }
+static inline int db8500_clk_debug_init(void) { return 0;}
+#endif
+
+#ifdef CONFIG_UX500_SOC_DB5500
+int __init db5500_clk_init(void);
+int __init db5500_clk_debug_init(void);
+#else
+static inline int db5500_clk_init(void) { return 0; }
+static inline int db5500_clk_debug_init(void) { return 0;}
+#endif
+
+#endif
diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c
index 9de1af00809..70fa6c0efc9 100644
--- a/arch/arm/mach-ux500/cpu-db5500.c
+++ b/arch/arm/mach-ux500/cpu-db5500.c
@@ -9,20 +9,23 @@
#include <linux/amba/bus.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/gpio/nomadik.h>
-#include <asm/mach/map.h>
#include <asm/pmu.h>
+#include <asm/mach/map.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
-#include <plat/gpio-nomadik.h>
+#include <linux/gpio.h>
#include <mach/hardware.h>
#include <mach/devices.h>
#include <mach/setup.h>
#include <mach/irqs.h>
#include <mach/usb.h>
+#include <mach/ste-dma40-db5500.h>
#include "devices-db5500.h"
-#include "ste-dma40-db5500.h"
static struct map_desc u5500_uart_io_desc[] __initdata = {
__IO_DEV_DESC(U5500_UART0_BASE, SZ_4K),
@@ -35,8 +38,11 @@ static struct map_desc u5500_io_desc[] __initdata = {
__IO_DEV_DESC(U5500_L2CC_BASE, SZ_4K),
__IO_DEV_DESC(U5500_TWD_BASE, SZ_4K),
__IO_DEV_DESC(U5500_MTU0_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_MTU1_BASE, SZ_4K),
__IO_DEV_DESC(U5500_SCU_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_RTC_BASE, SZ_4K),
__IO_DEV_DESC(U5500_BACKUPRAM0_BASE, SZ_8K),
+ __MEM_DEV_DESC(U5500_BOOT_ROM_BASE, SZ_1M),
__IO_DEV_DESC(U5500_GPIO0_BASE, SZ_4K),
__IO_DEV_DESC(U5500_GPIO1_BASE, SZ_4K),
@@ -45,26 +51,11 @@ static struct map_desc u5500_io_desc[] __initdata = {
__IO_DEV_DESC(U5500_GPIO4_BASE, SZ_4K),
__IO_DEV_DESC(U5500_PRCMU_BASE, SZ_4K),
__IO_DEV_DESC(U5500_PRCMU_TCDM_BASE, SZ_4K),
-};
-
-static struct resource db5500_pmu_resources[] = {
- [0] = {
- .start = IRQ_DB5500_PMU0,
- .end = IRQ_DB5500_PMU0,
- .flags = IORESOURCE_IRQ,
- },
- [1] = {
- .start = IRQ_DB5500_PMU1,
- .end = IRQ_DB5500_PMU1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device db5500_pmu_device = {
- .name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
- .num_resources = ARRAY_SIZE(db5500_pmu_resources),
- .resource = db5500_pmu_resources,
+ __IO_DEV_DESC(U5500_CLKRST1_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_CLKRST2_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_CLKRST3_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_CLKRST5_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_CLKRST6_BASE, SZ_4K),
};
static struct resource mbox0_resources[] = {
@@ -151,11 +142,15 @@ static struct platform_device mbox2_device = {
.num_resources = ARRAY_SIZE(mbox2_resources),
};
+static struct platform_device db5500_prcmu_device = {
+ .name = "db5500-prcmu",
+};
+
static struct platform_device *db5500_platform_devs[] __initdata = {
- &db5500_pmu_device,
&mbox0_device,
&mbox1_device,
&mbox2_device,
+ &db5500_prcmu_device,
};
static resource_size_t __initdata db5500_gpio_base[] = {
@@ -179,6 +174,38 @@ static void __init db5500_add_gpios(void)
IRQ_DB5500_GPIO0, &pdata);
}
+static u8 db5500_revision;
+
+bool cpu_is_u5500v1()
+{
+ return db5500_revision == 0xA0;
+}
+
+bool cpu_is_u5500v2()
+{
+ return db5500_revision == 0xB0;
+}
+
+static void db5500_rev_init(void)
+{
+ const char *version = "UNKNOWN";
+ unsigned int asicid;
+
+ /* As in devicemaps_init() */
+ local_flush_tlb_all();
+ flush_cache_all();
+
+ asicid = readl_relaxed(__io_address(U5500_ASIC_ID_ADDRESS));
+ db5500_revision = asicid & 0xff;
+
+ if (cpu_is_u5500v1())
+ version = "1.0";
+ else if (cpu_is_u5500v2())
+ version = "2.0";
+
+ pr_info("DB5500 v%s [%#010x]\n", version, asicid);
+}
+
void __init u5500_map_io(void)
{
/*
@@ -191,6 +218,27 @@ void __init u5500_map_io(void)
iotable_init(u5500_io_desc, ARRAY_SIZE(u5500_io_desc));
_PRCMU_BASE = __io_address(U5500_PRCMU_BASE);
+
+ db5500_rev_init();
+}
+
+static void __init db5500_pmu_init(void)
+{
+ struct resource res[] = {
+ [0] = {
+ .start = IRQ_DB5500_PMU0,
+ .end = IRQ_DB5500_PMU0,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = IRQ_DB5500_PMU1,
+ .end = IRQ_DB5500_PMU1,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+
+ platform_device_register_simple("arm-pmu", ARM_PMU_DEVICE_CPU,
+ res, ARRAY_SIZE(res));
}
static int usb_db5500_rx_dma_cfg[] = {
@@ -217,7 +265,12 @@ static int usb_db5500_tx_dma_cfg[] = {
void __init u5500_init_devices(void)
{
+#ifdef CONFIG_STM_TRACE
+ /* Early init for STM tracing */
+ /* platform_device_register(&u5500_stm_device); */
+#endif
db5500_add_gpios();
+ db5500_pmu_init();
db5500_dma_init();
db5500_add_rtc();
db5500_add_usb(usb_db5500_rx_dma_cfg, usb_db5500_tx_dma_cfg);
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 13e8890a8b8..cf051682ca4 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008-2009 ST-Ericsson
+ * Copyright (C) 2008-2009 ST-Ericsson SA
*
* Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
*
@@ -14,19 +14,23 @@
#include <linux/amba/bus.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/gpio/nomadik.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/sys_soc.h>
#include <asm/mach/map.h>
#include <asm/pmu.h>
-#include <plat/gpio-nomadik.h>
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <mach/reboot_reasons.h>
#include <mach/usb.h>
+#include <mach/ste-dma40-db8500.h>
#include "devices-db8500.h"
-#include "ste-dma40-db8500.h"
/* minimum static i/o mapping required to boot U8500 platforms */
static struct map_desc u8500_uart_io_desc[] __initdata = {
@@ -40,8 +44,11 @@ static struct map_desc u8500_io_desc[] __initdata = {
__IO_DEV_DESC(U8500_L2CC_BASE, SZ_4K),
__IO_DEV_DESC(U8500_TWD_BASE, SZ_4K),
__IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_MTU1_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_RTC_BASE, SZ_4K),
__IO_DEV_DESC(U8500_SCU_BASE, SZ_4K),
__IO_DEV_DESC(U8500_BACKUPRAM0_BASE, SZ_8K),
+ __MEM_DEV_DESC(U8500_BOOT_ROM_BASE, SZ_1M),
__IO_DEV_DESC(U8500_CLKRST1_BASE, SZ_4K),
__IO_DEV_DESC(U8500_CLKRST2_BASE, SZ_4K),
@@ -54,19 +61,6 @@ static struct map_desc u8500_io_desc[] __initdata = {
__IO_DEV_DESC(U8500_GPIO1_BASE, SZ_4K),
__IO_DEV_DESC(U8500_GPIO2_BASE, SZ_4K),
__IO_DEV_DESC(U8500_GPIO3_BASE, SZ_4K),
-};
-
-static struct map_desc u8500_ed_io_desc[] __initdata = {
- __IO_DEV_DESC(U8500_MTU0_BASE_ED, SZ_4K),
- __IO_DEV_DESC(U8500_CLKRST7_BASE_ED, SZ_8K),
-};
-
-static struct map_desc u8500_v1_io_desc[] __initdata = {
- __IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_PRCMU_TCDM_BASE_V1, SZ_4K),
-};
-
-static struct map_desc u8500_v2_io_desc[] __initdata = {
__IO_DEV_DESC(U8500_PRCMU_TCDM_BASE, SZ_4K),
};
@@ -77,17 +71,17 @@ void __init u8500_map_io(void)
*/
iotable_init(u8500_uart_io_desc, ARRAY_SIZE(u8500_uart_io_desc));
+ /*
+ * STE NMF CM driver only used on the U8500 allocate using
+ * dma_alloc_coherent:
+ * 8M for SIA and SVA data + 2M for SIA code + 2M for SVA code
+ */
+ init_consistent_dma_size(SZ_16M);
+
ux500_map_io();
iotable_init(u8500_io_desc, ARRAY_SIZE(u8500_io_desc));
- if (cpu_is_u8500ed())
- iotable_init(u8500_ed_io_desc, ARRAY_SIZE(u8500_ed_io_desc));
- else if (cpu_is_u8500v1())
- iotable_init(u8500_v1_io_desc, ARRAY_SIZE(u8500_v1_io_desc));
- else if (cpu_is_u8500v2())
- iotable_init(u8500_v2_io_desc, ARRAY_SIZE(u8500_v2_io_desc));
-
_PRCMU_BASE = __io_address(U8500_PRCMU_BASE);
}
@@ -136,36 +130,20 @@ static struct platform_device db8500_prcmu_device = {
};
static struct platform_device *platform_devs[] __initdata = {
- &u8500_dma40_device,
+ &u8500_gpio_devs[0],
+ &u8500_gpio_devs[1],
+ &u8500_gpio_devs[2],
+ &u8500_gpio_devs[3],
+ &u8500_gpio_devs[4],
+ &u8500_gpio_devs[5],
+ &u8500_gpio_devs[6],
+ &u8500_gpio_devs[7],
+ &u8500_gpio_devs[8],
&db8500_pmu_device,
&db8500_prcmu_device,
+ &u8500_wdt_device,
};
-static resource_size_t __initdata db8500_gpio_base[] = {
- U8500_GPIOBANK0_BASE,
- U8500_GPIOBANK1_BASE,
- U8500_GPIOBANK2_BASE,
- U8500_GPIOBANK3_BASE,
- U8500_GPIOBANK4_BASE,
- U8500_GPIOBANK5_BASE,
- U8500_GPIOBANK6_BASE,
- U8500_GPIOBANK7_BASE,
- U8500_GPIOBANK8_BASE,
-};
-
-static void __init db8500_add_gpios(void)
-{
- struct nmk_gpio_platform_data pdata = {
- /* No custom data yet */
- };
-
- if (cpu_is_u8500v2())
- pdata.supports_sleepmode = true;
-
- dbx500_add_gpios(ARRAY_AND_SIZE(db8500_gpio_base),
- IRQ_DB8500_GPIO0, &pdata);
-}
-
static int usb_db8500_rx_dma_cfg[] = {
DB8500_DMA_DEV38_USB_OTG_IEP_1_9,
DB8500_DMA_DEV37_USB_OTG_IEP_2_10,
@@ -193,11 +171,13 @@ static int usb_db8500_tx_dma_cfg[] = {
*/
void __init u8500_init_devices(void)
{
- if (cpu_is_u8500ed())
- dma40_u8500ed_fixup();
+#ifdef CONFIG_STM_TRACE
+ /* Early init for STM tracing */
+ platform_device_register(&u8500_stm_device);
+#endif
+ db8500_dma_init();
db8500_add_rtc();
- db8500_add_gpios();
db8500_add_usb(usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg);
platform_device_register_simple("cpufreq-u8500", -1, NULL, 0);
@@ -205,3 +185,82 @@ void __init u8500_init_devices(void)
return ;
}
+
+#ifdef CONFIG_SYS_SOC
+#define U8500_BB_UID_BASE (U8500_BACKUPRAM1_BASE + 0xFC0)
+#define U8500_BB_UID_LENGTH 5
+
+static ssize_t ux500_get_machine(char *buf, struct sysfs_soc_info *si)
+{
+ return sprintf(buf, "DB%2x00\n", dbx500_id.partnumber);
+}
+
+static ssize_t ux500_get_soc_id(char *buf, struct sysfs_soc_info *si)
+{
+ void __iomem *uid_base;
+ int i;
+ ssize_t sz = 0;
+
+ if (dbx500_id.partnumber == 0x85) {
+ uid_base = __io_address(U8500_BB_UID_BASE);
+ for (i = 0; i < U8500_BB_UID_LENGTH; i++)
+ sz += sprintf(buf + sz, "%08x", readl(uid_base + i * sizeof(u32)));
+ sz += sprintf(buf + sz, "\n");
+ }
+ else {
+ /* Don't know where it is located for U5500 */
+ sz = sprintf(buf, "N/A\n");
+ }
+
+ return sz;
+}
+
+static ssize_t ux500_get_revision(char *buf, struct sysfs_soc_info *si)
+{
+ unsigned int rev = dbx500_id.revision;
+
+ if (rev == 0x01)
+ return sprintf(buf, "%s\n", "ED");
+ else if (rev >= 0xA0)
+ return sprintf(buf, "%d.%d\n" , (rev >> 4) - 0xA + 1, rev & 0xf);
+
+ return sprintf(buf, "%s", "Unknown\n");
+}
+
+static ssize_t ux500_get_process(char *buf, struct sysfs_soc_info *si)
+{
+ if (dbx500_id.process == 0x00)
+ return sprintf(buf, "Standard\n");
+
+ return sprintf(buf, "%02xnm\n", dbx500_id.process);
+}
+
+static ssize_t ux500_get_reset_code(char *buf, struct sysfs_soc_info *si)
+{
+ return sprintf(buf, "0x%04x\n", prcmu_get_reset_code());
+}
+
+static ssize_t ux500_get_reset_reason(char *buf, struct sysfs_soc_info *si)
+{
+ return sprintf(buf, "%s\n",
+ reboot_reason_string(prcmu_get_reset_code()));
+}
+
+static struct sysfs_soc_info soc_info[] = {
+ SYSFS_SOC_ATTR_CALLBACK("machine", ux500_get_machine),
+ SYSFS_SOC_ATTR_VALUE("family", "Ux500"),
+ SYSFS_SOC_ATTR_CALLBACK("soc_id", ux500_get_soc_id),
+ SYSFS_SOC_ATTR_CALLBACK("revision", ux500_get_revision),
+ SYSFS_SOC_ATTR_CALLBACK("process", ux500_get_process),
+ SYSFS_SOC_ATTR_CALLBACK("reset_code", ux500_get_reset_code),
+ SYSFS_SOC_ATTR_CALLBACK("reset_reason", ux500_get_reset_reason),
+};
+
+static int __init ux500_sys_soc_init(void)
+{
+ return register_sysfs_soc(soc_info, ARRAY_SIZE(soc_info));
+}
+
+module_init(ux500_sys_soc_init);
+#endif
+
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index f4185749437..cdd16aa76d1 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -11,6 +11,8 @@
#include <linux/mfd/db8500-prcmu.h>
#include <linux/mfd/db5500-prcmu.h>
#include <linux/clksrc-dbx500-prcmu.h>
+#include <linux/delay.h>
+#include <linux/mfd/dbx500-prcmu.h>
#include <asm/hardware/gic.h>
#include <asm/mach/map.h>
@@ -19,11 +21,24 @@
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
+#include <mach/reboot_reasons.h>
#include "clock.h"
void __iomem *_PRCMU_BASE;
+static void ux500_restart(char mode, const char *cmd)
+{
+ unsigned short reset_code;
+
+ reset_code = reboot_reason_code(cmd);
+ prcmu_system_reset(reset_code);
+
+ mdelay(1000);
+ printk("Reboot via PRCMU failed -- System halted\n");
+ while (1);
+}
+
void __init ux500_init_irq(void)
{
void __iomem *dist_base;
@@ -48,5 +63,7 @@ void __init ux500_init_irq(void)
db5500_prcmu_early_init();
if (cpu_is_u8500())
db8500_prcmu_early_init();
+
+ arm_pm_restart = ux500_restart;
clk_init();
}
diff --git a/arch/arm/mach-ux500/dcache.c b/arch/arm/mach-ux500/dcache.c
new file mode 100644
index 00000000000..b117d4e8283
--- /dev/null
+++ b/arch/arm/mach-ux500/dcache.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Cache handler integration and data cache helpers.
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/dma-mapping.h>
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/system.h>
+
+/*
+ * Values are derived from measurements on HREFP_1.1_V32_OM_S10 running
+ * u8500-android-2.2_r1.1_v0.21.
+ *
+ * A lot of time can be spent trying to figure out the perfect breakpoints but
+ * for now I've chosen the following simple way.
+ *
+ * breakpoint = best_case + (worst_case - best_case) * 0.666
+ * The breakpoint is moved slightly towards the worst case because a full
+ * clean/flush affects the entire system so we should be a bit careful.
+ *
+ * BEST CASE:
+ * Best case is that the cache is empty and the system is idling. The case
+ * where the cache contains only targeted data could be better in some cases
+ * but it's hard to do measurements and calculate on that case so I choose the
+ * easier alternative.
+ *
+ * inner_clean_breakpoint = time_2_range_clean_on_empty_cache(
+ * complete_clean_on_empty_cache_time)
+ * inner_flush_breakpoint = time_2_range_flush_on_empty_cache(
+ * complete_flush_on_empty_cache_time)
+ *
+ * outer_clean_breakpoint = time_2_range_clean_on_empty_cache(
+ * complete_clean_on_empty_cache_time)
+ * outer_flush_breakpoint = time_2_range_flush_on_empty_cache(
+ * complete_flush_on_empty_cache_time)
+ *
+ * WORST CASE:
+ * Worst case is that the cache is filled with dirty non targeted data that
+ * will be used after the synchronization and the system is under heavy load.
+ *
+ * inner_clean_breakpoint = time_2_range_clean_on_empty_cache(
+ * complete_clean_on_full_cache_time * 1.5)
+ * Times 1.5 because it runs on both cores half the time.
+ * inner_flush_breakpoint = time_2_range_flush_on_empty_cache(
+ * complete_flush_on_full_cache_time * 1.5 +
+ * complete_flush_on_full_cache_time / 2)
+ * Plus "complete_flush_on_full_cache_time / 2" because all data has to be read
+ * back, here we assume that both cores can fill their cache simultaneously
+ * (seems to be the case as operations on full and empty inner cache takes
+ * roughly the same amount of time ie the bus to outer is not the bottle neck).
+ *
+ * outer_clean_breakpoint = time_2_range_clean_on_empty_cache(
+ * complete_clean_on_full_cache_time +
+ * (complete_clean_on_full_cache_time -
+ * complete_clean_on_empty_cache_time))
+ * Plus "(complete_flush_on_full_cache_time -
+ * complete_flush_on_empty_cache_time)" because no one else can work when we
+ * hog the bus with our unecessary transfer.
+ * outer_flush_breakpoint = time_2_range_flush_on_empty_cache(
+ * complete_flush_on_full_cache_time * 2 +
+ * (complete_flush_on_full_cache_time -
+ * complete_flush_on_empty_cache_time) * 2)
+ *
+ * These values might have to be updated if changes are made to the CPU, L2$,
+ * memory bus or memory.
+ */
+/* 28930 */
+static const u32 inner_clean_breakpoint = 21324 + (32744 - 21324) * 0.666;
+/* 36224 */
+static const u32 inner_flush_breakpoint = 21324 + (43697 - 21324) * 0.666;
+/* 254069 */
+static const u32 outer_clean_breakpoint = 68041 + (347363 - 68041) * 0.666;
+/* 485414 */
+static const u32 outer_flush_breakpoint = 68041 + (694727 - 68041) * 0.666;
+
+static void __clean_inner_dcache_all(void *param);
+static void clean_inner_dcache_all(void);
+
+static void __flush_inner_dcache_all(void *param);
+static void flush_inner_dcache_all(void);
+
+static bool is_cache_exclusive(void);
+
+void drain_cpu_write_buf(void)
+{
+ dsb();
+ outer_cache.sync();
+}
+
+void clean_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only,
+ bool *cleaned_everything)
+{
+ /*
+ * There is no problem with exclusive caches here as the Cortex-A9
+ * documentation (8.1.4. Exclusive L2 cache) says that when a dirty
+ * line is moved from L2 to L1 it is first written to mem. Because
+ * of this there is no way a line can avoid the clean by jumping
+ * between the cache levels.
+ */
+ *cleaned_everything = true;
+
+ if (length < inner_clean_breakpoint) {
+ /* Inner clean range */
+ dmac_map_area(vaddr, length, DMA_TO_DEVICE);
+ *cleaned_everything = false;
+ } else {
+ clean_inner_dcache_all();
+ }
+
+ if (!inner_only) {
+ /*
+ * There is currently no outer_cache.clean_all() so we use
+ * flush instead, which is ok as clean is a subset of flush.
+ * Clean range and flush range take the same amount of time
+ * so we can use outer_flush_breakpoint here.
+ */
+ if (length < outer_flush_breakpoint) {
+ outer_cache.clean_range(paddr, paddr + length);
+ *cleaned_everything = false;
+ } else {
+ outer_cache.flush_all();
+ }
+ }
+}
+
+void flush_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only,
+ bool *flushed_everything)
+{
+ /*
+ * There might still be stale data in the caches after this call if the
+ * cache levels are exclusive. The follwing can happen.
+ * 1. Clean L1 moves the data to L2.
+ * 2. Speculative prefetch, preemption or loads on the other core moves
+ * all the data back to L1, any dirty data will be written to mem as a
+ * result of this.
+ * 3. Flush L2 does nothing as there is no targeted data in L2.
+ * 4. Flush L1 moves the data to L2. Notice that this does not happen
+ * when the cache levels are non-exclusive as clean pages are not
+ * written to L2 in that case.
+ * 5. Stale data is still present in L2!
+ * I see two possible solutions, don't use exclusive caches or
+ * (temporarily) disable prefetching to L1, preeemption and the other
+ * core.
+ *
+ * A situation can occur where the operation does not seem atomic from
+ * the other core's point of view, even on a non-exclusive cache setup.
+ * Replace step 2 in the previous scenarion with a write from the other
+ * core. The other core will write on top of the old data but the
+ * result will not be written to memory. One would expect either that
+ * the write was performed on top of the old data and was written to
+ * memory (the write occured before the flush) or that the write was
+ * performed on top of the new data and was not written to memory (the
+ * write occured after the flush). The same problem can occur with one
+ * core if kernel preemption is enabled. The solution is to
+ * (temporarily) disable the other core and preemption. I can't think
+ * of any situation where this would be a problem and disabling the
+ * other core for the duration of this call is mighty expensive so for
+ * now I just ignore the problem.
+ */
+
+ *flushed_everything = true;
+
+ if (!inner_only) {
+ /*
+ * Beautiful solution for the exclusive problems :)
+ */
+ if (is_cache_exclusive())
+ panic("%s can't handle exclusive CPU caches\n",
+ __func__);
+
+ if (length < inner_clean_breakpoint) {
+ /* Inner clean range */
+ dmac_map_area(vaddr, length, DMA_TO_DEVICE);
+ *flushed_everything = false;
+ } else {
+ clean_inner_dcache_all();
+ }
+
+ if (length < outer_flush_breakpoint) {
+ outer_cache.flush_range(paddr, paddr + length);
+ *flushed_everything = false;
+ } else {
+ outer_cache.flush_all();
+ }
+ }
+
+ if (length < inner_flush_breakpoint) {
+ /* Inner flush range */
+ dmac_flush_range(vaddr, (void *)((u32)vaddr + length));
+ *flushed_everything = false;
+ } else {
+ flush_inner_dcache_all();
+ }
+}
+
+bool speculative_data_prefetch(void)
+{
+ return true;
+}
+
+u32 get_dcache_granularity(void)
+{
+ return 32;
+}
+
+/*
+ * Local functions
+ */
+
+static void __clean_inner_dcache_all(void *param)
+{
+ __cpuc_clean_dcache_all();
+}
+
+static void clean_inner_dcache_all(void)
+{
+ on_each_cpu(__clean_inner_dcache_all, NULL, 1);
+}
+
+static void __flush_inner_dcache_all(void *param)
+{
+ __cpuc_flush_dcache_all();
+}
+
+static void flush_inner_dcache_all(void)
+{
+ on_each_cpu(__flush_inner_dcache_all, NULL, 1);
+}
+
+static bool is_cache_exclusive(void)
+{
+ static const u32 CA9_ACTLR_EXCL = 0x80;
+
+ u32 armv7_actlr;
+
+ asm (
+ "mrc p15, 0, %0, c1, c0, 1"
+ : "=r" (armv7_actlr)
+ );
+
+ if (armv7_actlr & CA9_ACTLR_EXCL)
+ return true;
+ else
+ return false;
+}
diff --git a/arch/arm/mach-ux500/devices-common.c b/arch/arm/mach-ux500/devices-common.c
index c563e5418d8..1e179b9dad4 100644
--- a/arch/arm/mach-ux500/devices-common.c
+++ b/arch/arm/mach-ux500/devices-common.c
@@ -6,16 +6,25 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
+#include <linux/pm.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
-#include <plat/gpio-nomadik.h>
+#ifdef CONFIG_FB_MCDE
+#include <video/mcde_display.h>
+#include <video/mcde_display-av8100.h>
+#include <video/mcde_fb.h>
+#endif
#include <mach/hardware.h>
+#include <mach/pm.h>
#include "devices-common.h"
@@ -38,6 +47,7 @@ dbx500_add_amba_device(const char *name, resource_size_t base,
dev->dma_mask = DMA_BIT_MASK(32);
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ dev->dev.pm_domain = &ux500_amba_dev_power_domain;
dev->irq[0] = irq;
dev->irq[1] = NO_IRQ;
@@ -68,6 +78,7 @@ dbx500_add_platform_device(const char *name, int id, void *pdata,
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+ dev->dev.pm_domain = &ux500_dev_power_domain;
ret = platform_device_add_resources(dev, res, resnum);
if (ret)
@@ -108,6 +119,22 @@ dbx500_add_platform_device_4k1irq(const char *name, int id,
ARRAY_SIZE(resources));
}
+struct platform_device *
+dbx500_add_platform_device_noirq(const char *name, int id,
+ resource_size_t base, void *pdata)
+{
+ struct resource resources[] = {
+ [0] = {
+ .start = base,
+ .end = base + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ }
+ };
+
+ return dbx500_add_platform_device(name, id, pdata, resources,
+ ARRAY_SIZE(resources));
+}
+
static struct platform_device *
dbx500_add_gpio(int id, resource_size_t addr, int irq,
struct nmk_gpio_platform_data *pdata)
@@ -140,7 +167,70 @@ void dbx500_add_gpios(resource_size_t *base, int num, int irq,
pdata->first_gpio = first;
pdata->first_irq = NOMADIK_GPIO_TO_IRQ(first);
pdata->num_gpio = 32;
-
dbx500_add_gpio(i, base[i], irq, pdata);
}
}
+
+#ifdef CONFIG_FB_MCDE
+void hdmi_fb_onoff(struct mcde_display_device *ddev,
+ bool enable, u8 cea, u8 vesa_cea_nr)
+{
+ struct fb_info *fbi;
+ u16 w, h;
+ u16 vw, vh;
+ u32 rotate = FB_ROTATE_UR;
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ dev_dbg(&ddev->dev, "en:%d cea:%d nr:%d\n", enable, cea, vesa_cea_nr);
+
+ if (enable) {
+ if (ddev->enabled) {
+ dev_dbg(&ddev->dev, "Display is already enabled.\n");
+ return;
+ }
+
+ /* Create fb */
+ if (ddev->fbi == NULL) {
+ /* Note: change when dynamic buffering is available */
+ int buffering = 2;
+
+ /* Get default values */
+ mcde_dss_get_native_resolution(ddev, &w, &h);
+ vw = w;
+ vh = h * buffering;
+
+ if (vesa_cea_nr != 0)
+ ddev->ceanr_convert(ddev, cea, vesa_cea_nr,
+ buffering, &w, &h, &vw, &vh);
+
+ fbi = mcde_fb_create(ddev, w, h, vw, vh,
+ ddev->default_pixel_format, rotate);
+
+ if (IS_ERR(fbi)) {
+ dev_warn(&ddev->dev,
+ "Failed to create fb for display %s\n",
+ ddev->name);
+ goto hdmi_fb_onoff_end;
+ } else {
+ dev_info(&ddev->dev,
+ "Framebuffer created (%s)\n",
+ ddev->name);
+ }
+ driver_data->fbdevname = (char *)dev_name(fbi->dev);
+ }
+ } else {
+ if (!ddev->enabled) {
+ dev_dbg(&ddev->dev, "Display %s is already disabled.\n",
+ ddev->name);
+ return;
+ }
+ mcde_fb_destroy(ddev);
+ }
+
+hdmi_fb_onoff_end:
+ return;
+}
+EXPORT_SYMBOL(hdmi_fb_onoff);
+#endif
+
diff --git a/arch/arm/mach-ux500/devices-common.h b/arch/arm/mach-ux500/devices-common.h
index 7825705033b..bbf551f6318 100644
--- a/arch/arm/mach-ux500/devices-common.h
+++ b/arch/arm/mach-ux500/devices-common.h
@@ -8,6 +8,8 @@
#ifndef __DEVICES_COMMON_H
#define __DEVICES_COMMON_H
+#include <linux/amba/serial.h>
+
extern struct amba_device *
dbx500_add_amba_device(const char *name, resource_size_t base,
int irq, void *pdata, unsigned int periphid);
@@ -17,18 +19,24 @@ dbx500_add_platform_device_4k1irq(const char *name, int id,
resource_size_t base,
int irq, void *pdata);
-struct spi_master_cntlr;
+extern struct platform_device *
+dbx500_add_platform_device_noirq(const char *name, int id,
+ resource_size_t base, void *pdata);
+
+struct stm_msp_controller;
static inline struct amba_device *
dbx500_add_msp_spi(const char *name, resource_size_t base, int irq,
- struct spi_master_cntlr *pdata)
+ struct stm_msp_controller *pdata)
{
return dbx500_add_amba_device(name, base, irq, pdata, 0);
}
+struct pl022_ssp_controller;
+
static inline struct amba_device *
dbx500_add_spi(const char *name, resource_size_t base, int irq,
- struct spi_master_cntlr *pdata,
+ struct pl022_ssp_controller *pdata,
u32 periphid)
{
return dbx500_add_amba_device(name, base, irq, pdata, periphid);
@@ -79,6 +87,25 @@ dbx500_add_rtc(resource_size_t base, int irq)
return dbx500_add_amba_device("rtc-pl031", base, irq, NULL, 0);
}
+struct cryp_platform_data;
+
+static inline struct platform_device *
+dbx500_add_cryp1(int id, resource_size_t base, int irq,
+ struct cryp_platform_data *pdata)
+{
+ return dbx500_add_platform_device_4k1irq("cryp1", id, base, irq,
+ pdata);
+}
+
+struct hash_platform_data;
+
+static inline struct platform_device *
+dbx500_add_hash1(int id, resource_size_t base,
+ struct hash_platform_data *pdata)
+{
+ return dbx500_add_platform_device_noirq("hash1", id, base, pdata);
+}
+
struct nmk_gpio_platform_data;
void dbx500_add_gpios(resource_size_t *base, int num, int irq,
diff --git a/arch/arm/mach-ux500/devices-db5500.c b/arch/arm/mach-ux500/devices-db5500.c
new file mode 100644
index 00000000000..299e3f06479
--- /dev/null
+++ b/arch/arm/mach-ux500/devices-db5500.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ *
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * for the System Trace Module part.
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/nomadik.h>
+
+#include <mach/hardware.h>
+#include <mach/devices.h>
+
+#ifdef CONFIG_FB_MCDE
+#include <video/mcde.h>
+#endif
+#include <mach/db5500-regs.h>
+
+#include <linux/mfd/dbx500-prcmu.h>
+#include <mach/pm.h>
+
+#define GPIO_DATA(_name, first, num) \
+ { \
+ .name = _name, \
+ .first_gpio = first, \
+ .first_irq = NOMADIK_GPIO_TO_IRQ(first), \
+ .num_gpio = num, \
+ .get_secondary_status = ux500_pm_gpio_read_wake_up_status, \
+ .set_ioforce = ux500_pm_prcmu_set_ioforce, \
+ }
+
+#define GPIO_RESOURCE(block) \
+ { \
+ .start = U5500_GPIOBANK##block##_BASE, \
+ .end = U5500_GPIOBANK##block##_BASE + 127, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = IRQ_DB5500_GPIO##block, \
+ .end = IRQ_DB5500_GPIO##block, \
+ .flags = IORESOURCE_IRQ, \
+ }, \
+ { \
+ .start = IRQ_DB5500_PRCMU_GPIO##block, \
+ .end = IRQ_DB5500_PRCMU_GPIO##block, \
+ .flags = IORESOURCE_IRQ, \
+ }
+
+#define GPIO_DEVICE(block) \
+ { \
+ .name = "gpio", \
+ .id = block, \
+ .num_resources = 3, \
+ .resource = &u5500_gpio_resources[block * 3], \
+ .dev = { \
+ .platform_data = &u5500_gpio_data[block], \
+ }, \
+ }
+
+static struct nmk_gpio_platform_data u5500_gpio_data[] = {
+ GPIO_DATA("GPIO-0-31", 0, 32),
+ GPIO_DATA("GPIO-32-63", 32, 4), /* 36..63 not routed to pin */
+ GPIO_DATA("GPIO-64-95", 64, 19), /* 83..95 not routed to pin */
+ GPIO_DATA("GPIO-96-127", 96, 6), /* 102..127 not routed to pin */
+ GPIO_DATA("GPIO-128-159", 128, 21), /* 149..159 not routed to pin */
+ GPIO_DATA("GPIO-160-191", 160, 32),
+ GPIO_DATA("GPIO-192-223", 192, 32),
+ GPIO_DATA("GPIO-224-255", 224, 4), /* 228..255 not routed to pin */
+};
+
+static struct resource u5500_gpio_resources[] = {
+ GPIO_RESOURCE(0),
+ GPIO_RESOURCE(1),
+ GPIO_RESOURCE(2),
+ GPIO_RESOURCE(3),
+ GPIO_RESOURCE(4),
+ GPIO_RESOURCE(5),
+ GPIO_RESOURCE(6),
+ GPIO_RESOURCE(7),
+};
+
+struct platform_device u5500_gpio_devs[] = {
+ GPIO_DEVICE(0),
+ GPIO_DEVICE(1),
+ GPIO_DEVICE(2),
+ GPIO_DEVICE(3),
+ GPIO_DEVICE(4),
+ GPIO_DEVICE(5),
+ GPIO_DEVICE(6),
+ GPIO_DEVICE(7),
+};
+
+#define U5500_PWM_SIZE 0x20
+static struct resource u5500_pwm0_resource[] = {
+ {
+ .name = "PWM_BASE",
+ .start = U5500_PWM_BASE,
+ .end = U5500_PWM_BASE + U5500_PWM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource u5500_pwm1_resource[] = {
+ {
+ .name = "PWM_BASE",
+ .start = U5500_PWM_BASE + U5500_PWM_SIZE,
+ .end = U5500_PWM_BASE + U5500_PWM_SIZE * 2 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource u5500_pwm2_resource[] = {
+ {
+ .name = "PWM_BASE",
+ .start = U5500_PWM_BASE + U5500_PWM_SIZE * 2,
+ .end = U5500_PWM_BASE + U5500_PWM_SIZE * 3 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource u5500_pwm3_resource[] = {
+ {
+ .name = "PWM_BASE",
+ .start = U5500_PWM_BASE + U5500_PWM_SIZE * 3,
+ .end = U5500_PWM_BASE + U5500_PWM_SIZE * 4 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device u5500_pwm0_device = {
+ .id = 0,
+ .name = "pwm",
+ .resource = u5500_pwm0_resource,
+ .num_resources = ARRAY_SIZE(u5500_pwm0_resource),
+};
+
+struct platform_device u5500_pwm1_device = {
+ .id = 1,
+ .name = "pwm",
+ .resource = u5500_pwm1_resource,
+ .num_resources = ARRAY_SIZE(u5500_pwm1_resource),
+};
+
+struct platform_device u5500_pwm2_device = {
+ .id = 2,
+ .name = "pwm",
+ .resource = u5500_pwm2_resource,
+ .num_resources = ARRAY_SIZE(u5500_pwm2_resource),
+};
+
+struct platform_device u5500_pwm3_device = {
+ .id = 3,
+ .name = "pwm",
+ .resource = u5500_pwm3_resource,
+ .num_resources = ARRAY_SIZE(u5500_pwm3_resource),
+};
+
+#ifdef CONFIG_FB_MCDE
+static struct resource mcde_resources[] = {
+ [0] = {
+ .name = MCDE_IO_AREA,
+ .start = U5500_MCDE_BASE,
+ .end = U5500_MCDE_BASE + U5500_MCDE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = MCDE_IO_AREA,
+ .start = U5500_DSI_LINK1_BASE,
+ .end = U5500_DSI_LINK1_BASE + U5500_DSI_LINK_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .name = MCDE_IO_AREA,
+ .start = U5500_DSI_LINK2_BASE,
+ .end = U5500_DSI_LINK2_BASE + U5500_DSI_LINK_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [3] = {
+ .name = MCDE_IRQ,
+ .start = IRQ_DB5500_DISP,
+ .end = IRQ_DB5500_DISP,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int mcde_platform_enable_dsipll(void)
+{
+ return prcmu_enable_dsipll();
+}
+
+static int mcde_platform_disable_dsipll(void)
+{
+ return prcmu_disable_dsipll();
+}
+
+static int mcde_platform_set_display_clocks(void)
+{
+ return prcmu_set_display_clocks();
+}
+
+static struct mcde_platform_data mcde_pdata = {
+ .num_dsilinks = 2,
+ .syncmux = 0x01,
+ .num_channels = 2,
+ .num_overlays = 3,
+ .regulator_mcde_epod_id = "vsupply",
+ .regulator_esram_epod_id = "v-esram12",
+#ifdef CONFIG_MCDE_DISPLAY_DSI
+ .clock_dsi_id = "hdmi",
+ .clock_dsi_lp_id = "tv",
+#endif
+ .clock_mcde_id = "mcde",
+ .platform_set_clocks = mcde_platform_set_display_clocks,
+ .platform_enable_dsipll = mcde_platform_enable_dsipll,
+ .platform_disable_dsipll = mcde_platform_disable_dsipll,
+};
+
+struct platform_device u5500_mcde_device = {
+ .name = "mcde",
+ .id = -1,
+ .dev = {
+ .platform_data = &mcde_pdata,
+ },
+ .num_resources = ARRAY_SIZE(mcde_resources),
+ .resource = mcde_resources,
+};
+#endif
+
+static struct resource b2r2_resources[] = {
+ [0] = {
+ .start = U5500_B2R2_BASE,
+ .end = U5500_B2R2_BASE + ((4*1024)-1),
+ .name = "b2r2_base",
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "B2R2_IRQ",
+ .start = IRQ_DB5500_B2R2,
+ .end = IRQ_DB5500_B2R2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device u5500_b2r2_device = {
+ .name = "b2r2",
+ .id = 0,
+ .dev = {
+ .init_name = "b2r2_bus",
+ .coherent_dma_mask = ~0,
+ },
+ .num_resources = ARRAY_SIZE(b2r2_resources),
+ .resource = b2r2_resources,
+};
+
+static struct resource u5500_thsens_resources[] = {
+ [0] = {
+ .name = "IRQ_HOTMON_LOW",
+ .start = IRQ_DB5500_PRCMU_TEMP_SENSOR_LOW,
+ .end = IRQ_DB5500_PRCMU_TEMP_SENSOR_LOW,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .name = "IRQ_HOTMON_HIGH",
+ .start = IRQ_DB5500_PRCMU_TEMP_SENSOR_HIGH,
+ .end = IRQ_DB5500_PRCMU_TEMP_SENSOR_HIGH,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device u5500_thsens_device = {
+ .name = "db5500_temp",
+ .resource = u5500_thsens_resources,
+ .num_resources = ARRAY_SIZE(u5500_thsens_resources),
+};
diff --git a/arch/arm/mach-ux500/devices-db5500.h b/arch/arm/mach-ux500/devices-db5500.h
index 0c4bccd02b9..a6536edd2d1 100644
--- a/arch/arm/mach-ux500/devices-db5500.h
+++ b/arch/arm/mach-ux500/devices-db5500.h
@@ -17,6 +17,16 @@
#define db5500_add_i2c3(pdata) \
dbx500_add_i2c(3, U5500_I2C3_BASE, IRQ_DB5500_I2C3, pdata)
+struct db5500_keypad_platform_data;
+
+static inline struct platform_device *
+db5500_add_keypad(struct db5500_keypad_platform_data *pdata)
+{
+ return dbx500_add_platform_device_4k1irq("db5500-keypad", -1,
+ U5500_KEYPAD_BASE,
+ IRQ_DB5500_KBD, pdata);
+}
+
#define db5500_add_msp0_i2s(pdata) \
dbx500_add_msp_i2s(0, U5500_MSP0_BASE, IRQ_DB5500_MSP0, pdata)
#define db5500_add_msp1_i2s(pdata) \
@@ -37,21 +47,21 @@
#define db5500_add_usb(rx_cfg, tx_cfg) \
ux500_add_usb(U5500_USBOTG_BASE, IRQ_DB5500_USBOTG, rx_cfg, tx_cfg)
-#define db5500_add_sdi0(pdata) \
+#define db5500_add_sdi0(pdata, pid) \
dbx500_add_sdi("sdi0", U5500_SDI0_BASE, IRQ_DB5500_SDMMC0, pdata, \
- 0x10480180)
-#define db5500_add_sdi1(pdata) \
+ pid)
+#define db5500_add_sdi1(pdata, pid) \
dbx500_add_sdi("sdi1", U5500_SDI1_BASE, IRQ_DB5500_SDMMC1, pdata, \
- 0x10480180)
-#define db5500_add_sdi2(pdata) \
- dbx500_add_sdi("sdi2", U5500_SDI2_BASE, IRQ_DB5500_SDMMC2, pdata \
- 0x10480180)
-#define db5500_add_sdi3(pdata) \
- dbx500_add_sdi("sdi3", U5500_SDI3_BASE, IRQ_DB5500_SDMMC3, pdata \
- 0x10480180)
-#define db5500_add_sdi4(pdata) \
- dbx500_add_sdi("sdi4", U5500_SDI4_BASE, IRQ_DB5500_SDMMC4, pdata \
- 0x10480180)
+ pid)
+#define db5500_add_sdi2(pdata, pid) \
+ dbx500_add_sdi("sdi2", U5500_SDI2_BASE, IRQ_DB5500_SDMMC2, pdata, \
+ pid)
+#define db5500_add_sdi3(pdata, pid) \
+ dbx500_add_sdi("sdi3", U5500_SDI3_BASE, IRQ_DB5500_SDMMC3, pdata, \
+ pid)
+#define db5500_add_sdi4(pdata, pid) \
+ dbx500_add_sdi("sdi4", U5500_SDI4_BASE, IRQ_DB5500_SDMMC4, pdata, \
+ pid)
/* This one has a bad peripheral ID in the U5500 silicon */
#define db5500_add_spi0(pdata) \
@@ -61,10 +71,10 @@
dbx500_add_spi("spi1", U5500_SPI1_BASE, IRQ_DB5500_SPI1, pdata, \
0x10080023)
#define db5500_add_spi2(pdata) \
- dbx500_add_spi("spi2", U5500_SPI2_BASE, IRQ_DB5500_SPI2, pdata \
+ dbx500_add_spi("spi2", U5500_SPI2_BASE, IRQ_DB5500_SPI2, pdata, \
0x10080023)
#define db5500_add_spi3(pdata) \
- dbx500_add_spi("spi3", U5500_SPI3_BASE, IRQ_DB5500_SPI3, pdata \
+ dbx500_add_spi("spi3", U5500_SPI3_BASE, IRQ_DB5500_SPI3, pdata, \
0x10080023)
#define db5500_add_uart0(plat) \
@@ -76,4 +86,9 @@
#define db5500_add_uart3(plat) \
dbx500_add_uart("uart3", U5500_UART3_BASE, IRQ_DB5500_UART3, plat)
+#define db5500_add_cryp1(pdata) \
+ dbx500_add_cryp1(-1, U5500_CRYP1_BASE, IRQ_DB5500_CRYP1, pdata)
+#define db5500_add_hash1(pdata) \
+ dbx500_add_hash1(-1, U5500_HASH1_BASE, pdata)
+
#endif
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index 73b17404b19..5ebf5b42621 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -10,171 +10,423 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl022.h>
+#include <plat/pincfg.h>
#include <plat/ste_dma40.h>
+#include <mach/devices.h>
#include <mach/hardware.h>
#include <mach/setup.h>
+#include <mach/pm.h>
+#ifdef CONFIG_FB_MCDE
+#include <video/mcde.h>
+#endif
+#include <linux/mfd/dbx500-prcmu.h>
+#ifdef CONFIG_HSI
+#include <mach/hsi.h>
+#endif
+#include <mach/ste-dma40-db8500.h>
-#include "ste-dma40-db8500.h"
+#include "pins-db8500.h"
-static struct resource dma40_resources[] = {
+#define GPIO_DATA(_name, first, num) \
+ { \
+ .name = _name, \
+ .first_gpio = first, \
+ .first_irq = NOMADIK_GPIO_TO_IRQ(first), \
+ .num_gpio = num, \
+ .get_secondary_status = ux500_pm_gpio_read_wake_up_status, \
+ .set_ioforce = ux500_pm_prcmu_set_ioforce, \
+ .supports_sleepmode = true, \
+ }
+
+#define GPIO_RESOURCE(block) \
+ { \
+ .start = U8500_GPIOBANK##block##_BASE, \
+ .end = U8500_GPIOBANK##block##_BASE + 127, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = IRQ_DB8500_GPIO##block, \
+ .end = IRQ_DB8500_GPIO##block, \
+ .flags = IORESOURCE_IRQ, \
+ }, \
+ { \
+ .start = IRQ_PRCMU_GPIO##block, \
+ .end = IRQ_PRCMU_GPIO##block, \
+ .flags = IORESOURCE_IRQ, \
+ }
+
+#define GPIO_DEVICE(block) \
+ { \
+ .name = "gpio", \
+ .id = block, \
+ .num_resources = 3, \
+ .resource = &u8500_gpio_resources[block * 3], \
+ .dev = { \
+ .platform_data = &u8500_gpio_data[block], \
+ }, \
+ }
+
+static struct nmk_gpio_platform_data u8500_gpio_data[] = {
+ GPIO_DATA("GPIO-0-31", 0, 32),
+ GPIO_DATA("GPIO-32-63", 32, 5), /* 37..63 not routed to pin */
+ GPIO_DATA("GPIO-64-95", 64, 32),
+ GPIO_DATA("GPIO-96-127", 96, 2), /* 98..127 not routed to pin */
+ GPIO_DATA("GPIO-128-159", 128, 32),
+ GPIO_DATA("GPIO-160-191", 160, 12), /* 172..191 not routed to pin */
+ GPIO_DATA("GPIO-192-223", 192, 32),
+ GPIO_DATA("GPIO-224-255", 224, 7), /* 231..255 not routed to pin */
+ GPIO_DATA("GPIO-256-288", 256, 12), /* 268..288 not routed to pin */
+};
+
+static struct resource u8500_gpio_resources[] = {
+ GPIO_RESOURCE(0),
+ GPIO_RESOURCE(1),
+ GPIO_RESOURCE(2),
+ GPIO_RESOURCE(3),
+ GPIO_RESOURCE(4),
+ GPIO_RESOURCE(5),
+ GPIO_RESOURCE(6),
+ GPIO_RESOURCE(7),
+ GPIO_RESOURCE(8),
+};
+
+struct platform_device u8500_gpio_devs[] = {
+ GPIO_DEVICE(0),
+ GPIO_DEVICE(1),
+ GPIO_DEVICE(2),
+ GPIO_DEVICE(3),
+ GPIO_DEVICE(4),
+ GPIO_DEVICE(5),
+ GPIO_DEVICE(6),
+ GPIO_DEVICE(7),
+ GPIO_DEVICE(8),
+};
+
+static struct resource u8500_shrm_resources[] = {
[0] = {
- .start = U8500_DMA_BASE,
- .end = U8500_DMA_BASE + SZ_4K - 1,
+ .start = U8500_SHRM_GOP_INTERRUPT_BASE,
+ .end = U8500_SHRM_GOP_INTERRUPT_BASE + ((4*4)-1),
+ .name = "shrm_gop_register_base",
.flags = IORESOURCE_MEM,
- .name = "base",
},
[1] = {
- .start = U8500_DMA_LCPA_BASE,
- .end = U8500_DMA_LCPA_BASE + 2 * SZ_1K - 1,
- .flags = IORESOURCE_MEM,
- .name = "lcpa",
+ .start = IRQ_CA_WAKE_REQ_V1,
+ .end = IRQ_CA_WAKE_REQ_V1,
+ .name = "ca_irq_wake_req",
+ .flags = IORESOURCE_IRQ,
},
[2] = {
- .start = IRQ_DB8500_DMA,
- .end = IRQ_DB8500_DMA,
+ .start = IRQ_AC_READ_NOTIFICATION_0_V1,
+ .end = IRQ_AC_READ_NOTIFICATION_0_V1,
+ .name = "ac_read_notification_0_irq",
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .start = IRQ_AC_READ_NOTIFICATION_1_V1,
+ .end = IRQ_AC_READ_NOTIFICATION_1_V1,
+ .name = "ac_read_notification_1_irq",
+ .flags = IORESOURCE_IRQ,
+ },
+ [4] = {
+ .start = IRQ_CA_MSG_PEND_NOTIFICATION_0_V1,
+ .end = IRQ_CA_MSG_PEND_NOTIFICATION_0_V1,
+ .name = "ca_msg_pending_notification_0_irq",
+ .flags = IORESOURCE_IRQ,
+ },
+ [5] = {
+ .start = IRQ_CA_MSG_PEND_NOTIFICATION_1_V1,
+ .end = IRQ_CA_MSG_PEND_NOTIFICATION_1_V1,
+ .name = "ca_msg_pending_notification_1_irq",
.flags = IORESOURCE_IRQ,
}
};
-/* Default configuration for physcial memcpy */
-struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
- .mode = STEDMA40_MODE_PHYSICAL,
- .dir = STEDMA40_MEM_TO_MEM,
+struct platform_device u8500_shrm_device = {
+ .name = "u8500_shrm",
+ .id = 0,
+ .dev = {
+ .init_name = "shrm_bus",
+ .coherent_dma_mask = ~0,
+ },
+
+ .num_resources = ARRAY_SIZE(u8500_shrm_resources),
+ .resource = u8500_shrm_resources
+};
+
+#ifdef CONFIG_FB_MCDE
+static struct resource mcde_resources[] = {
+ [0] = {
+ .name = MCDE_IO_AREA,
+ .start = U8500_MCDE_BASE,
+ .end = U8500_MCDE_BASE + U8500_MCDE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = MCDE_IO_AREA,
+ .start = U8500_DSI_LINK1_BASE,
+ .end = U8500_DSI_LINK1_BASE + U8500_DSI_LINK_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .name = MCDE_IO_AREA,
+ .start = U8500_DSI_LINK2_BASE,
+ .end = U8500_DSI_LINK2_BASE + U8500_DSI_LINK_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [3] = {
+ .name = MCDE_IO_AREA,
+ .start = U8500_DSI_LINK3_BASE,
+ .end = U8500_DSI_LINK3_BASE + U8500_DSI_LINK_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [4] = {
+ .name = MCDE_IRQ,
+ .start = IRQ_DB8500_DISP,
+ .end = IRQ_DB8500_DISP,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int mcde_platform_enable_dsipll(void)
+{
+ return prcmu_enable_dsipll();
+}
+
+static int mcde_platform_disable_dsipll(void)
+{
+ return prcmu_disable_dsipll();
+}
+
+static int mcde_platform_set_display_clocks(void)
+{
+ return prcmu_set_display_clocks();
+}
+
+static struct mcde_platform_data mcde_u8500_pdata = {
+ .num_dsilinks = 3,
+ /*
+ * [0] = 3: 24 bits DPI: connect LSB Ch B to D[0:7]
+ * [3] = 4: 24 bits DPI: connect MID Ch B to D[24:31]
+ * [4] = 5: 24 bits DPI: connect MSB Ch B to D[32:39]
+ *
+ * [1] = 3: TV out : connect LSB Ch B to D[8:15]
+ */
+#define DONT_CARE 0
+ .outmux = { 3, 3, DONT_CARE, 4, 5 },
+#undef DONT_CARE
+ .syncmux = 0x00, /* DPI channel A and B on output pins A and B resp */
+ .num_channels = 4,
+ .num_overlays = 6,
+#ifdef CONFIG_MCDE_DISPLAY_DSI
+ .regulator_vana_id = "vdddsi1v2",
+#endif
+ .regulator_mcde_epod_id = "vsupply",
+ .regulator_esram_epod_id = "v-esram34",
+#ifdef CONFIG_MCDE_DISPLAY_DSI
+ .clock_dsi_id = "hdmi",
+ .clock_dsi_lp_id = "tv",
+#endif
+ .clock_dpi_id = "lcd",
+ .clock_mcde_id = "mcde",
+ .platform_set_clocks = mcde_platform_set_display_clocks,
+ .platform_enable_dsipll = mcde_platform_enable_dsipll,
+ .platform_disable_dsipll = mcde_platform_disable_dsipll,
+};
+
+struct platform_device u8500_mcde_device = {
+ .name = "mcde",
+ .id = -1,
+ .dev = {
+ .platform_data = &mcde_u8500_pdata,
+ },
+ .num_resources = ARRAY_SIZE(mcde_resources),
+ .resource = mcde_resources,
+};
+#endif /* CONFIG_FB_MCDE */
- .src_info.data_width = STEDMA40_BYTE_WIDTH,
- .src_info.psize = STEDMA40_PSIZE_PHY_1,
- .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+static struct resource b2r2_resources[] = {
+ [0] = {
+ .start = U8500_B2R2_BASE,
+ .end = U8500_B2R2_BASE + ((4*1024)-1),
+ .name = "b2r2_base",
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "B2R2_IRQ",
+ .start = IRQ_DB8500_B2R2,
+ .end = IRQ_DB8500_B2R2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
- .dst_info.data_width = STEDMA40_BYTE_WIDTH,
- .dst_info.psize = STEDMA40_PSIZE_PHY_1,
- .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+struct platform_device u8500_b2r2_device = {
+ .name = "b2r2",
+ .id = 0,
+ .dev = {
+ .init_name = "b2r2_bus",
+ .coherent_dma_mask = ~0,
+ },
+ .num_resources = ARRAY_SIZE(b2r2_resources),
+ .resource = b2r2_resources,
};
-/* Default configuration for logical memcpy */
-struct stedma40_chan_cfg dma40_memcpy_conf_log = {
- .dir = STEDMA40_MEM_TO_MEM,
- .src_info.data_width = STEDMA40_BYTE_WIDTH,
- .src_info.psize = STEDMA40_PSIZE_LOG_1,
- .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+/*
+ * WATCHDOG
+ */
- .dst_info.data_width = STEDMA40_BYTE_WIDTH,
- .dst_info.psize = STEDMA40_PSIZE_LOG_1,
- .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+struct platform_device u8500_wdt_device = {
+ .name = "u8500_wdt",
+ .id = -1,
};
+#ifdef CONFIG_HSI
/*
- * Mapping between destination event lines and physical device address.
- * The event line is tied to a device and therefore the address is constant.
- * When the address comes from a primecell it will be configured in runtime
- * and we set the address to -1 as a placeholder.
+ * HSI
*/
-static const dma_addr_t dma40_tx_map[DB8500_DMA_NR_DEV] = {
- /* MUSB - these will be runtime-reconfigured */
- [DB8500_DMA_DEV39_USB_OTG_OEP_8] = -1,
- [DB8500_DMA_DEV16_USB_OTG_OEP_7_15] = -1,
- [DB8500_DMA_DEV17_USB_OTG_OEP_6_14] = -1,
- [DB8500_DMA_DEV18_USB_OTG_OEP_5_13] = -1,
- [DB8500_DMA_DEV19_USB_OTG_OEP_4_12] = -1,
- [DB8500_DMA_DEV36_USB_OTG_OEP_3_11] = -1,
- [DB8500_DMA_DEV37_USB_OTG_OEP_2_10] = -1,
- [DB8500_DMA_DEV38_USB_OTG_OEP_1_9] = -1,
- /* PrimeCells - run-time configured */
- [DB8500_DMA_DEV0_SPI0_TX] = -1,
- [DB8500_DMA_DEV1_SD_MMC0_TX] = -1,
- [DB8500_DMA_DEV2_SD_MMC1_TX] = -1,
- [DB8500_DMA_DEV3_SD_MMC2_TX] = -1,
- [DB8500_DMA_DEV8_SSP0_TX] = -1,
- [DB8500_DMA_DEV9_SSP1_TX] = -1,
- [DB8500_DMA_DEV11_UART2_TX] = -1,
- [DB8500_DMA_DEV12_UART1_TX] = -1,
- [DB8500_DMA_DEV13_UART0_TX] = -1,
- [DB8500_DMA_DEV28_SD_MM2_TX] = -1,
- [DB8500_DMA_DEV29_SD_MM0_TX] = -1,
- [DB8500_DMA_DEV32_SD_MM1_TX] = -1,
- [DB8500_DMA_DEV33_SPI2_TX] = -1,
- [DB8500_DMA_DEV35_SPI1_TX] = -1,
- [DB8500_DMA_DEV40_SPI3_TX] = -1,
- [DB8500_DMA_DEV41_SD_MM3_TX] = -1,
- [DB8500_DMA_DEV42_SD_MM4_TX] = -1,
- [DB8500_DMA_DEV43_SD_MM5_TX] = -1,
-};
-
-/* Mapping between source event lines and physical device address */
-static const dma_addr_t dma40_rx_map[DB8500_DMA_NR_DEV] = {
- /* MUSB - these will be runtime-reconfigured */
- [DB8500_DMA_DEV39_USB_OTG_IEP_8] = -1,
- [DB8500_DMA_DEV16_USB_OTG_IEP_7_15] = -1,
- [DB8500_DMA_DEV17_USB_OTG_IEP_6_14] = -1,
- [DB8500_DMA_DEV18_USB_OTG_IEP_5_13] = -1,
- [DB8500_DMA_DEV19_USB_OTG_IEP_4_12] = -1,
- [DB8500_DMA_DEV36_USB_OTG_IEP_3_11] = -1,
- [DB8500_DMA_DEV37_USB_OTG_IEP_2_10] = -1,
- [DB8500_DMA_DEV38_USB_OTG_IEP_1_9] = -1,
- /* PrimeCells */
- [DB8500_DMA_DEV0_SPI0_RX] = -1,
- [DB8500_DMA_DEV1_SD_MMC0_RX] = -1,
- [DB8500_DMA_DEV2_SD_MMC1_RX] = -1,
- [DB8500_DMA_DEV3_SD_MMC2_RX] = -1,
- [DB8500_DMA_DEV8_SSP0_RX] = -1,
- [DB8500_DMA_DEV9_SSP1_RX] = -1,
- [DB8500_DMA_DEV11_UART2_RX] = -1,
- [DB8500_DMA_DEV12_UART1_RX] = -1,
- [DB8500_DMA_DEV13_UART0_RX] = -1,
- [DB8500_DMA_DEV28_SD_MM2_RX] = -1,
- [DB8500_DMA_DEV29_SD_MM0_RX] = -1,
- [DB8500_DMA_DEV32_SD_MM1_RX] = -1,
- [DB8500_DMA_DEV33_SPI2_RX] = -1,
- [DB8500_DMA_DEV35_SPI1_RX] = -1,
- [DB8500_DMA_DEV40_SPI3_RX] = -1,
- [DB8500_DMA_DEV41_SD_MM3_RX] = -1,
- [DB8500_DMA_DEV42_SD_MM4_RX] = -1,
- [DB8500_DMA_DEV43_SD_MM5_RX] = -1,
-};
-
-/* Reserved event lines for memcpy only */
-static int dma40_memcpy_event[] = {
- DB8500_DMA_MEMCPY_TX_0,
- DB8500_DMA_MEMCPY_TX_1,
- DB8500_DMA_MEMCPY_TX_2,
- DB8500_DMA_MEMCPY_TX_3,
- DB8500_DMA_MEMCPY_TX_4,
- DB8500_DMA_MEMCPY_TX_5,
-};
-
-static struct stedma40_platform_data dma40_plat_data = {
- .dev_len = DB8500_DMA_NR_DEV,
- .dev_rx = dma40_rx_map,
- .dev_tx = dma40_tx_map,
- .memcpy = dma40_memcpy_event,
- .memcpy_len = ARRAY_SIZE(dma40_memcpy_event),
- .memcpy_conf_phy = &dma40_memcpy_conf_phy,
- .memcpy_conf_log = &dma40_memcpy_conf_log,
- .disabled_channels = {-1},
-};
-
-struct platform_device u8500_dma40_device = {
- .dev = {
- .platform_data = &dma40_plat_data,
+#define HSIR_OVERRUN(num) { \
+ .start = IRQ_DB8500_HSIR_CH##num##_OVRRUN, \
+ .end = IRQ_DB8500_HSIR_CH##num##_OVRRUN, \
+ .flags = IORESOURCE_IRQ, \
+ .name = "hsi_rx_overrun_ch"#num \
+}
+
+#define STE_HSI_PORT0_TX_CHANNEL_CFG(n) { \
+ .dir = STEDMA40_MEM_TO_PERIPH, \
+ .high_priority = true, \
+ .mode = STEDMA40_MODE_LOGICAL, \
+ .mode_opt = STEDMA40_LCHAN_SRC_LOG_DST_LOG, \
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY, \
+ .dst_dev_type = n,\
+ .src_info.big_endian = false,\
+ .src_info.data_width = STEDMA40_WORD_WIDTH,\
+ .dst_info.big_endian = false,\
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,\
+},
+
+#define STE_HSI_PORT0_RX_CHANNEL_CFG(n) { \
+ .dir = STEDMA40_PERIPH_TO_MEM, \
+ .high_priority = true, \
+ .mode = STEDMA40_MODE_LOGICAL, \
+ .mode_opt = STEDMA40_LCHAN_SRC_LOG_DST_LOG, \
+ .src_dev_type = n,\
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY, \
+ .src_info.big_endian = false,\
+ .src_info.data_width = STEDMA40_WORD_WIDTH,\
+ .dst_info.big_endian = false,\
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,\
+},
+
+static struct resource u8500_hsi_resources[] = {
+ {
+ .start = U8500_HSIR_BASE,
+ .end = U8500_HSIR_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "hsi_rx_base"
+ },
+ {
+ .start = U8500_HSIT_BASE,
+ .end = U8500_HSIT_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "hsi_tx_base"
+ },
+ {
+ .start = IRQ_DB8500_HSIRD0,
+ .end = IRQ_DB8500_HSIRD0,
+ .flags = IORESOURCE_IRQ,
+ .name = "hsi_rx_irq0"
+ },
+ {
+ .start = IRQ_DB8500_HSITD0,
+ .end = IRQ_DB8500_HSITD0,
+ .flags = IORESOURCE_IRQ,
+ .name = "hsi_tx_irq0"
+ },
+ {
+ .start = IRQ_DB8500_HSIR_EXCEP,
+ .end = IRQ_DB8500_HSIR_EXCEP,
+ .flags = IORESOURCE_IRQ,
+ .name = "hsi_rx_excep0"
+ },
+ HSIR_OVERRUN(0),
+ HSIR_OVERRUN(1),
+ HSIR_OVERRUN(2),
+ HSIR_OVERRUN(3),
+ HSIR_OVERRUN(4),
+ HSIR_OVERRUN(5),
+ HSIR_OVERRUN(6),
+ HSIR_OVERRUN(7),
+};
+
+#ifdef CONFIG_STE_DMA40
+static struct stedma40_chan_cfg ste_hsi_port0_dma_tx_cfg[] = {
+ STE_HSI_PORT0_TX_CHANNEL_CFG(DB8500_DMA_DEV20_SLIM0_CH0_TX_HSI_TX_CH0)
+ STE_HSI_PORT0_TX_CHANNEL_CFG(DB8500_DMA_DEV21_SLIM0_CH1_TX_HSI_TX_CH1)
+ STE_HSI_PORT0_TX_CHANNEL_CFG(DB8500_DMA_DEV22_SLIM0_CH2_TX_HSI_TX_CH2)
+ STE_HSI_PORT0_TX_CHANNEL_CFG(DB8500_DMA_DEV23_SLIM0_CH3_TX_HSI_TX_CH3)
+};
+
+static struct stedma40_chan_cfg ste_hsi_port0_dma_rx_cfg[] = {
+ STE_HSI_PORT0_RX_CHANNEL_CFG(DB8500_DMA_DEV20_SLIM0_CH0_RX_HSI_RX_CH0)
+ STE_HSI_PORT0_RX_CHANNEL_CFG(DB8500_DMA_DEV21_SLIM0_CH1_RX_HSI_RX_CH1)
+ STE_HSI_PORT0_RX_CHANNEL_CFG(DB8500_DMA_DEV22_SLIM0_CH2_RX_HSI_RX_CH2)
+ STE_HSI_PORT0_RX_CHANNEL_CFG(DB8500_DMA_DEV23_SLIM0_CH3_RX_HSI_RX_CH3)
+};
+#endif
+
+static struct ste_hsi_port_cfg ste_hsi_port0_cfg = {
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_tx_cfg = ste_hsi_port0_dma_tx_cfg,
+ .dma_rx_cfg = ste_hsi_port0_dma_rx_cfg
+#endif
+};
+
+struct ste_hsi_platform_data u8500_hsi_platform_data = {
+ .num_ports = 1,
+ .use_dma = 1,
+ .port_cfg = &ste_hsi_port0_cfg,
+};
+
+struct platform_device u8500_hsi_device = {
+ .dev = {
+ .platform_data = &u8500_hsi_platform_data,
+ },
+ .name = "ste_hsi",
+ .id = 0,
+ .resource = u8500_hsi_resources,
+ .num_resources = ARRAY_SIZE(u8500_hsi_resources)
+};
+#endif /* CONFIG_HSI */
+
+/*
+ * Thermal Sensor
+ */
+
+static struct resource u8500_thsens_resources[] = {
+ {
+ .name = "IRQ_HOTMON_LOW",
+ .start = IRQ_PRCMU_HOTMON_LOW,
+ .end = IRQ_PRCMU_HOTMON_LOW,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "IRQ_HOTMON_HIGH",
+ .start = IRQ_PRCMU_HOTMON_HIGH,
+ .end = IRQ_PRCMU_HOTMON_HIGH,
+ .flags = IORESOURCE_IRQ,
},
- .name = "dma40",
- .id = 0,
- .num_resources = ARRAY_SIZE(dma40_resources),
- .resource = dma40_resources
};
-void dma40_u8500ed_fixup(void)
-{
- dma40_plat_data.memcpy = NULL;
- dma40_plat_data.memcpy_len = 0;
- dma40_resources[0].start = U8500_DMA_BASE_ED;
- dma40_resources[0].end = U8500_DMA_BASE_ED + SZ_4K - 1;
- dma40_resources[1].start = U8500_DMA_LCPA_BASE_ED;
- dma40_resources[1].end = U8500_DMA_LCPA_BASE_ED + 2 * SZ_1K - 1;
-}
+struct platform_device u8500_thsens_device = {
+ .name = "dbx500_temp",
+ .resource = u8500_thsens_resources,
+ .num_resources = ARRAY_SIZE(u8500_thsens_resources),
+};
struct resource keypad_resources[] = {
[0] = {
diff --git a/arch/arm/mach-ux500/devices-db8500.h b/arch/arm/mach-ux500/devices-db8500.h
index cbd4a9ae810..4a54a6f5aa6 100644
--- a/arch/arm/mach-ux500/devices-db8500.h
+++ b/arch/arm/mach-ux500/devices-db8500.h
@@ -98,4 +98,9 @@ db8500_add_ssp(const char *name, resource_size_t base, int irq,
#define db8500_add_uart2(pdata) \
dbx500_add_uart("uart2", U8500_UART2_BASE, IRQ_DB8500_UART2, pdata)
+#define db8500_add_cryp1(pdata) \
+ dbx500_add_cryp1(-1, U8500_CRYP1_BASE, IRQ_DB8500_CRYP1, pdata)
+#define db8500_add_hash1(pdata) \
+ dbx500_add_hash1(-1, U8500_HASH1_BASE, pdata)
+
#endif
diff --git a/arch/arm/mach-ux500/devices.c b/arch/arm/mach-ux500/devices.c
index ea0a2f92ca7..151be4f3e68 100644
--- a/arch/arm/mach-ux500/devices.c
+++ b/arch/arm/mach-ux500/devices.c
@@ -14,6 +14,51 @@
#include <mach/hardware.h>
#include <mach/setup.h>
+#ifdef CONFIG_STE_TRACE_MODEM
+#include <linux/db8500-modem-trace.h>
+#endif
+
+#ifdef CONFIG_STE_TRACE_MODEM
+static struct resource trace_resource = {
+ .start = 0,
+ .end = 0,
+ .name = "db8500-trace-area",
+ .flags = IORESOURCE_MEM
+};
+
+static struct db8500_trace_platform_data trace_pdata = {
+ .ape_base = U8500_APE_BASE,
+ .modem_base = U8500_MODEM_BASE,
+};
+
+struct platform_device u8500_trace_modem = {
+ .name = "db8500-modem-trace",
+ .id = 0,
+ .dev = {
+ .init_name = "db8500-modem-trace",
+ .platform_data = &trace_pdata,
+ },
+ .num_resources = 1,
+ .resource = &trace_resource,
+};
+
+static int __init early_trace_modem(char *p)
+{
+ struct resource *data = &trace_resource;
+ u32 size = memparse(p, &p);
+ if (*p == '@')
+ data->start = memparse(p + 1, &p);
+ data->end = data->start + size -1;
+ return 0;
+}
+
+early_param("mem_mtrace", early_trace_modem);
+#endif
+
+struct platform_device ux500_hwmem_device = {
+ .name = "hwmem",
+};
+
void __init amba_add_devices(struct amba_device *devs[], int num)
{
int i;
diff --git a/arch/arm/mach-ux500/dma-db5500.c b/arch/arm/mach-ux500/dma-db5500.c
index 1cfab68ae41..d71c489d022 100644
--- a/arch/arm/mach-ux500/dma-db5500.c
+++ b/arch/arm/mach-ux500/dma-db5500.c
@@ -14,8 +14,7 @@
#include <plat/ste_dma40.h>
#include <mach/setup.h>
#include <mach/hardware.h>
-
-#include "ste-dma40-db5500.h"
+#include <mach/ste-dma40-db5500.h>
static struct resource dma40_resources[] = {
[0] = {
@@ -72,28 +71,128 @@ static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
* now.
*/
static const dma_addr_t dma40_rx_map[DB5500_DMA_NR_DEV] = {
- [DB5500_DMA_DEV24_SDMMC0_RX] = -1,
- [DB5500_DMA_DEV38_USB_OTG_IEP_8] = -1,
- [DB5500_DMA_DEV23_USB_OTG_IEP_7_15] = -1,
- [DB5500_DMA_DEV22_USB_OTG_IEP_6_14] = -1,
- [DB5500_DMA_DEV21_USB_OTG_IEP_5_13] = -1,
- [DB5500_DMA_DEV20_USB_OTG_IEP_4_12] = -1,
- [DB5500_DMA_DEV6_USB_OTG_IEP_3_11] = -1,
- [DB5500_DMA_DEV5_USB_OTG_IEP_2_10] = -1,
- [DB5500_DMA_DEV4_USB_OTG_IEP_1_9] = -1,
+ [DB5500_DMA_DEV0_SPI0_RX] = 0,
+ [DB5500_DMA_DEV1_SPI1_RX] = 0,
+ [DB5500_DMA_DEV2_SPI2_RX] = 0,
+ [DB5500_DMA_DEV3_SPI3_RX] = 0,
+ [DB5500_DMA_DEV4_USB_OTG_IEP_1_9] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV5_USB_OTG_IEP_2_10] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV6_USB_OTG_IEP_3_11] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV7_IRDA_RFS] = 0,
+ [DB5500_DMA_DEV8_IRDA_FIFO_RX] = 0,
+ [DB5500_DMA_DEV9_MSP0_RX] = U5500_MSP0_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV10_MSP1_RX] = U5500_MSP1_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV11_MSP2_RX] = U5500_MSP2_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV12_UART0_RX] = 0,
+ [DB5500_DMA_DEV13_UART1_RX] = 0,
+ [DB5500_DMA_DEV14_UART2_RX] = 0,
+ [DB5500_DMA_DEV15_UART3_RX] = 0,
+ [DB5500_DMA_DEV16_USB_OTG_IEP_8] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV17_USB_OTG_IEP_1_9] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV18_USB_OTG_IEP_2_10] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV19_USB_OTG_IEP_3_11] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV20_USB_OTG_IEP_4_12] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV21_USB_OTG_IEP_5_13] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV22_USB_OTG_IEP_6_14] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV23_USB_OTG_IEP_7_15] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV24_SDMMC0_RX] = U5500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV25_SDMMC1_RX] = U5500_SDI1_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV26_SDMMC2_RX] = U5500_SDI2_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV27_SDMMC3_RX] = U5500_SDI3_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV28_SDMMC4_RX] = U5500_SDI4_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ /* 29, 30 not used */
+ [DB5500_DMA_DEV31_CRYPTO1_RX] = 0, /* v2 */
+ /* 32 not used */
+ [DB5500_DMA_DEV33_SDMMC0_RX] = U5500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV34_SDMMC1_RX] = U5500_SDI1_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV35_SDMMC2_RX] = U5500_SDI2_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV36_SDMMC3_RX] = U5500_SDI3_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV37_SDMMC4_RX] = U5500_SDI4_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV38_USB_OTG_IEP_8] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV39_USB_OTG_IEP_1_9] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV40_USB_OTG_IEP_2_10] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV41_USB_OTG_IEP_3_11] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV42_USB_OTG_IEP_4_12] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV43_USB_OTG_IEP_5_13] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV44_USB_OTG_IEP_6_14] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV45_USB_OTG_IEP_7_15] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV46_CRYPTO1_RX] = 0, /* v2 */
+ [DB5500_DMA_DEV47_MCDE_RX] = 0,
+ [DB5500_DMA_DEV48_CRYPTO1_RX] = U5500_CRYP1_BASE + CRYP1_RX_REG_OFFSET,
+ /* 49, 50 not used */
+ [DB5500_DMA_DEV49_I2C1_RX] = 0,
+ [DB5500_DMA_DEV50_I2C3_RX] = 0,
+ [DB5500_DMA_DEV51_I2C2_RX] = 0,
+ /* 54 - 60 not used */
+ [DB5500_DMA_DEV61_CRYPTO0_RX] = 0,
+ /* 62, 63 not used */
};
/* Mapping between destination event lines and physical device address */
static const dma_addr_t dma40_tx_map[DB5500_DMA_NR_DEV] = {
- [DB5500_DMA_DEV24_SDMMC0_TX] = -1,
- [DB5500_DMA_DEV38_USB_OTG_OEP_8] = -1,
- [DB5500_DMA_DEV23_USB_OTG_OEP_7_15] = -1,
- [DB5500_DMA_DEV22_USB_OTG_OEP_6_14] = -1,
- [DB5500_DMA_DEV21_USB_OTG_OEP_5_13] = -1,
- [DB5500_DMA_DEV20_USB_OTG_OEP_4_12] = -1,
- [DB5500_DMA_DEV6_USB_OTG_OEP_3_11] = -1,
- [DB5500_DMA_DEV5_USB_OTG_OEP_2_10] = -1,
- [DB5500_DMA_DEV4_USB_OTG_OEP_1_9] = -1,
+ [DB5500_DMA_DEV0_SPI0_TX] = 0,
+ [DB5500_DMA_DEV1_SPI1_TX] = 0,
+ [DB5500_DMA_DEV2_SPI2_TX] = 0,
+ [DB5500_DMA_DEV3_SPI3_TX] = 0,
+ [DB5500_DMA_DEV4_USB_OTG_OEP_1_9] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV5_USB_OTG_OEP_2_10] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV6_USB_OTG_OEP_3_11] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV7_IRRC_TX] = 0,
+ [DB5500_DMA_DEV8_IRDA_FIFO_TX] = 0,
+ [DB5500_DMA_DEV9_MSP0_TX] = U5500_MSP0_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV10_MSP1_TX] = U5500_MSP1_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV11_MSP2_TX] = U5500_MSP2_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV12_UART0_TX] = 0,
+ [DB5500_DMA_DEV13_UART1_TX] = 0,
+ [DB5500_DMA_DEV14_UART2_TX] = 0,
+ [DB5500_DMA_DEV15_UART3_TX] = 0,
+ [DB5500_DMA_DEV16_USB_OTG_OEP_8] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV17_USB_OTG_OEP_1_9] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV18_USB_OTG_OEP_2_10] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV19_USB_OTG_OEP_3_11] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV20_USB_OTG_OEP_4_12] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV21_USB_OTG_OEP_5_13] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV22_USB_OTG_OEP_6_14] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV23_USB_OTG_OEP_7_15] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV24_SDMMC0_TX] = U5500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV25_SDMMC1_TX] = U5500_SDI1_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV26_SDMMC2_TX] = U5500_SDI2_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV27_SDMMC3_TX] = U5500_SDI3_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV28_SDMMC4_TX] = U5500_SDI4_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ /* 29 not used */
+ [DB5500_DMA_DEV30_HASH1_TX] = 0, /* v2 */
+ [DB5500_DMA_DEV31_CRYPTO1_TX] = 0, /* v2 */
+ [DB5500_DMA_DEV32_FSMC_TX] = 0,
+ [DB5500_DMA_DEV33_SDMMC0_TX] = U5500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV34_SDMMC1_TX] = U5500_SDI1_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV35_SDMMC2_TX] = U5500_SDI2_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV36_SDMMC3_TX] = U5500_SDI3_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV37_SDMMC4_TX] = U5500_SDI4_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV38_USB_OTG_OEP_8] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV39_USB_OTG_OEP_1_9] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV40_USB_OTG_OEP_2_10] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV41_USB_OTG_OEP_3_11] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV42_USB_OTG_OEP_4_12] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV43_USB_OTG_OEP_5_13] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV44_USB_OTG_OEP_6_14] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV45_USB_OTG_OEP_7_15] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV46_CRYPTO1_TX] = 0, /* v2 */
+ [DB5500_DMA_DEV47_STM_TX] = 0,
+ [DB5500_DMA_DEV48_CRYPTO1_TX] = U5500_CRYP1_BASE + CRYP1_TX_REG_OFFSET,
+ [DB5500_DMA_DEV49_CRYPTO1_TX_HASH1_TX] = 0,
+ [DB5500_DMA_DEV50_HASH1_TX] = U5500_HASH1_BASE + HASH1_TX_REG_OFFSET,
+ [DB5500_DMA_DEV51_I2C1_TX] = 0,
+ [DB5500_DMA_DEV52_I2C3_TX] = 0,
+ [DB5500_DMA_DEV53_I2C2_TX] = 0,
+ /* 54, 55 not used */
+ [DB5500_DMA_MEMCPY_TX_1] = 0,
+ [DB5500_DMA_MEMCPY_TX_2] = 0,
+ [DB5500_DMA_MEMCPY_TX_3] = 0,
+ [DB5500_DMA_MEMCPY_TX_4] = 0,
+ [DB5500_DMA_MEMCPY_TX_5] = 0,
+ [DB5500_DMA_DEV61_CRYPTO0_TX] = 0,
+ [DB5500_DMA_DEV62_CRYPTO0_TX_HASH0_TX] = 0,
+ [DB5500_DMA_DEV63_HASH0_TX] = 0,
};
static int dma40_memcpy_event[] = {
diff --git a/arch/arm/mach-ux500/dma-db8500.c b/arch/arm/mach-ux500/dma-db8500.c
new file mode 100644
index 00000000000..9976478fd80
--- /dev/null
+++ b/arch/arm/mach-ux500/dma-db8500.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ *
+ * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <plat/ste_dma40.h>
+
+#ifdef CONFIG_HSI
+#include <mach/hsi.h>
+#endif
+#include <mach/setup.h>
+#include <mach/ste-dma40-db8500.h>
+#include <mach/pm.h>
+#include <mach/context.h>
+
+
+
+static struct resource dma40_resources[] = {
+ [0] = {
+ .start = U8500_DMA_BASE,
+ .end = U8500_DMA_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "base",
+ },
+ [1] = {
+ .start = U8500_DMA_LCPA_BASE,
+ .end = U8500_DMA_LCPA_BASE + 2 * SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "lcpa",
+ },
+ [2] = {
+ .start = IRQ_DB8500_DMA,
+ .end = IRQ_DB8500_DMA,
+ .flags = IORESOURCE_IRQ
+ },
+ [3] = {
+ .start = U8500_DMA_LCLA_BASE,
+ .end = U8500_DMA_LCLA_BASE + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "lcla_esram",
+ }
+};
+
+/* Default configuration for physcial memcpy */
+static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
+ .mode = STEDMA40_MODE_PHYSICAL,
+ .dir = STEDMA40_MEM_TO_MEM,
+
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .src_info.psize = STEDMA40_PSIZE_PHY_1,
+ .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.psize = STEDMA40_PSIZE_PHY_1,
+ .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+
+};
+
+/* Default configuration for logical memcpy */
+static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
+ .dir = STEDMA40_MEM_TO_MEM,
+
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .src_info.psize = STEDMA40_PSIZE_LOG_1,
+ .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_1,
+ .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+
+};
+
+/*
+ * Mapping between soruce event lines and physical device address
+ * This was created assuming that the event line is tied to a device and
+ * therefore the address is constant, however this is not true for at least
+ * USB, and the values are just placeholders for USB. This table is preserved
+ * and used for now.
+ */
+static dma_addr_t dma40_rx_map[DB8500_DMA_NR_DEV] = {
+ [DB8500_DMA_DEV0_SPI0_RX] = 0,
+ [DB8500_DMA_DEV1_SD_MMC0_RX] = U8500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV2_SD_MMC1_RX] = 0,
+ [DB8500_DMA_DEV3_SD_MMC2_RX] = 0,
+ [DB8500_DMA_DEV4_I2C1_RX] = 0,
+ [DB8500_DMA_DEV5_I2C3_RX] = 0,
+ [DB8500_DMA_DEV6_I2C2_RX] = 0,
+ [DB8500_DMA_DEV7_I2C4_RX] = 0,
+ [DB8500_DMA_DEV8_SSP0_RX] = U8500_SSP0_BASE + SSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV9_SSP1_RX] = 0,
+ [DB8500_DMA_DEV10_MCDE_RX] = 0,
+ [DB8500_DMA_DEV11_UART2_RX] = 0,
+ [DB8500_DMA_DEV12_UART1_RX] = 0,
+ [DB8500_DMA_DEV13_UART0_RX] = 0,
+ [DB8500_DMA_DEV14_MSP2_RX] = U8500_MSP2_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV15_I2C0_RX] = 0,
+ [DB8500_DMA_DEV16_USB_OTG_IEP_7_15] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV17_USB_OTG_IEP_6_14] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV18_USB_OTG_IEP_5_13] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV19_USB_OTG_IEP_4_12] = U8500_USBOTG_BASE,
+#ifdef CONFIG_HSI
+ [DB8500_DMA_DEV20_SLIM0_CH0_RX_HSI_RX_CH0] = U8500_HSIR_BASE + 0x0 + STE_HSI_RX_BUFFERX,
+ [DB8500_DMA_DEV21_SLIM0_CH1_RX_HSI_RX_CH1] = U8500_HSIR_BASE + 0x4 + STE_HSI_RX_BUFFERX,
+ [DB8500_DMA_DEV22_SLIM0_CH2_RX_HSI_RX_CH2] = U8500_HSIR_BASE + 0x8 + STE_HSI_RX_BUFFERX,
+ [DB8500_DMA_DEV23_SLIM0_CH3_RX_HSI_RX_CH3] = U8500_HSIR_BASE + 0xC + STE_HSI_RX_BUFFERX,
+#endif
+ [DB8500_DMA_DEV24_SRC_SXA0_RX_TX] = 0,
+ [DB8500_DMA_DEV25_SRC_SXA1_RX_TX] = 0,
+ [DB8500_DMA_DEV26_SRC_SXA2_RX_TX] = 0,
+ [DB8500_DMA_DEV27_SRC_SXA3_RX_TX] = 0,
+ [DB8500_DMA_DEV28_SD_MM2_RX] = U8500_SDI2_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV29_SD_MM0_RX] = U8500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV30_MSP3_RX] = U8500_MSP3_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV31_MSP0_RX_SLIM0_CH0_RX] = U8500_MSP0_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV32_SD_MM1_RX] = U8500_SDI1_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV33_SPI2_RX] = 0,
+ [DB8500_DMA_DEV34_I2C3_RX2] = 0,
+ [DB8500_DMA_DEV35_SPI1_RX] = 0,
+ [DB8500_DMA_DEV36_USB_OTG_IEP_3_11] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV37_USB_OTG_IEP_2_10] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV38_USB_OTG_IEP_1_9] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV39_USB_OTG_IEP_8] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV40_SPI3_RX] = 0,
+ [DB8500_DMA_DEV41_SD_MM3_RX] = 0,
+ [DB8500_DMA_DEV42_SD_MM4_RX] = U8500_SDI4_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV43_SD_MM5_RX] = 0,
+ [DB8500_DMA_DEV44_SRC_SXA4_RX_TX] = 0,
+ [DB8500_DMA_DEV45_SRC_SXA5_RX_TX] = 0,
+ [DB8500_DMA_DEV46_SLIM0_CH8_RX_SRC_SXA6_RX_TX] = 0,
+ [DB8500_DMA_DEV47_SLIM0_CH9_RX_SRC_SXA7_RX_TX] = 0,
+ [DB8500_DMA_DEV48_CAC1_RX] = U8500_CRYP1_BASE + CRYP1_RX_REG_OFFSET,
+ /* 49, 50 and 51 are not used */
+ [DB8500_DMA_DEV52_SLIM0_CH4_RX_HSI_RX_CH4] = 0,
+ [DB8500_DMA_DEV53_SLIM0_CH5_RX_HSI_RX_CH5] = 0,
+ [DB8500_DMA_DEV54_SLIM0_CH6_RX_HSI_RX_CH6] = 0,
+ [DB8500_DMA_DEV55_SLIM0_CH7_RX_HSI_RX_CH7] = 0,
+ /* 56, 57, 58, 59 and 60 are not used */
+ [DB8500_DMA_DEV61_CAC0_RX] = 0,
+ /* 62 and 63 are not used */
+};
+
+/* Mapping between destination event lines and physical device address */
+static const dma_addr_t dma40_tx_map[DB8500_DMA_NR_DEV] = {
+ [DB8500_DMA_DEV0_SPI0_TX] = 0,
+ [DB8500_DMA_DEV1_SD_MMC0_TX] = U8500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV2_SD_MMC1_TX] = 0,
+ [DB8500_DMA_DEV3_SD_MMC2_TX] = 0,
+ [DB8500_DMA_DEV4_I2C1_TX] = 0,
+ [DB8500_DMA_DEV5_I2C3_TX] = 0,
+ [DB8500_DMA_DEV6_I2C2_TX] = 0,
+ [DB8500_DMA_DEV7_I2C4_TX] = 0,
+ [DB8500_DMA_DEV8_SSP0_TX] = U8500_SSP0_BASE + SSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV9_SSP1_TX] = 0,
+ /* 10 is not used*/
+ [DB8500_DMA_DEV11_UART2_TX] = 0,
+ [DB8500_DMA_DEV12_UART1_TX] = 0,
+ [DB8500_DMA_DEV13_UART0_TX] = 0,
+ [DB8500_DMA_DEV14_MSP2_TX] = U8500_MSP2_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV15_I2C0_TX] = 0,
+ [DB8500_DMA_DEV16_USB_OTG_OEP_7_15] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV17_USB_OTG_OEP_6_14] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV18_USB_OTG_OEP_5_13] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV19_USB_OTG_OEP_4_12] = U8500_USBOTG_BASE,
+#ifdef CONFIG_HSI
+ [DB8500_DMA_DEV20_SLIM0_CH0_TX_HSI_TX_CH0] = U8500_HSIT_BASE + 0x0 + STE_HSI_TX_BUFFERX,
+ [DB8500_DMA_DEV21_SLIM0_CH1_TX_HSI_TX_CH1] = U8500_HSIT_BASE + 0x4 + STE_HSI_TX_BUFFERX,
+ [DB8500_DMA_DEV22_SLIM0_CH2_TX_HSI_TX_CH2] = U8500_HSIT_BASE + 0x8 + STE_HSI_TX_BUFFERX,
+ [DB8500_DMA_DEV23_SLIM0_CH3_TX_HSI_TX_CH3] = U8500_HSIT_BASE + 0xC + STE_HSI_TX_BUFFERX,
+#endif
+ [DB8500_DMA_DEV24_DST_SXA0_RX_TX] = 0,
+ [DB8500_DMA_DEV25_DST_SXA1_RX_TX] = 0,
+ [DB8500_DMA_DEV26_DST_SXA2_RX_TX] = 0,
+ [DB8500_DMA_DEV27_DST_SXA3_RX_TX] = 0,
+ [DB8500_DMA_DEV28_SD_MM2_TX] = U8500_SDI2_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV29_SD_MM0_TX] = U8500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV30_MSP1_TX] = U8500_MSP1_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV31_MSP0_TX_SLIM0_CH0_TX] = U8500_MSP0_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV32_SD_MM1_TX] = U8500_SDI1_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV33_SPI2_TX] = 0,
+ [DB8500_DMA_DEV34_I2C3_TX2] = 0,
+ [DB8500_DMA_DEV35_SPI1_TX] = 0,
+ [DB8500_DMA_DEV36_USB_OTG_OEP_3_11] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV37_USB_OTG_OEP_2_10] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV38_USB_OTG_OEP_1_9] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV39_USB_OTG_OEP_8] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV40_SPI3_TX] = 0,
+ [DB8500_DMA_DEV41_SD_MM3_TX] = 0,
+ [DB8500_DMA_DEV42_SD_MM4_TX] = U8500_SDI4_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV43_SD_MM5_TX] = 0,
+ [DB8500_DMA_DEV44_DST_SXA4_RX_TX] = 0,
+ [DB8500_DMA_DEV45_DST_SXA5_RX_TX] = 0,
+ [DB8500_DMA_DEV46_SLIM0_CH8_TX_DST_SXA6_RX_TX] = 0,
+ [DB8500_DMA_DEV47_SLIM0_CH9_TX_DST_SXA7_RX_TX] = 0,
+ [DB8500_DMA_DEV48_CAC1_TX] = U8500_CRYP1_BASE + CRYP1_TX_REG_OFFSET,
+ [DB8500_DMA_DEV49_CAC1_TX_HAC1_TX] = 0,
+ [DB8500_DMA_DEV50_HAC1_TX] = U8500_HASH1_BASE + HASH1_TX_REG_OFFSET,
+ [DB8500_DMA_MEMCPY_TX_0] = 0,
+ [DB8500_DMA_DEV52_SLIM1_CH4_TX_HSI_TX_CH4] = 0,
+ [DB8500_DMA_DEV53_SLIM1_CH5_TX_HSI_TX_CH5] = 0,
+ [DB8500_DMA_DEV54_SLIM1_CH6_TX_HSI_TX_CH6] = 0,
+ [DB8500_DMA_DEV55_SLIM1_CH7_TX_HSI_TX_CH7] = 0,
+ [DB8500_DMA_MEMCPY_TX_1] = 0,
+ [DB8500_DMA_MEMCPY_TX_2] = 0,
+ [DB8500_DMA_MEMCPY_TX_3] = 0,
+ [DB8500_DMA_MEMCPY_TX_4] = 0,
+ [DB8500_DMA_MEMCPY_TX_5] = 0,
+ [DB8500_DMA_DEV61_CAC0_TX] = 0,
+ [DB8500_DMA_DEV62_CAC0_TX_HAC0_TX] = 0,
+ [DB8500_DMA_DEV63_HAC0_TX] = 0,
+};
+
+/* Reserved event lines for memcpy only */
+static int dma40_memcpy_event[] = {
+ DB8500_DMA_MEMCPY_TX_0,
+ DB8500_DMA_MEMCPY_TX_1,
+ DB8500_DMA_MEMCPY_TX_2,
+ DB8500_DMA_MEMCPY_TX_3,
+ DB8500_DMA_MEMCPY_TX_4,
+ DB8500_DMA_MEMCPY_TX_5,
+};
+
+static struct stedma40_platform_data dma40_plat_data = {
+ .dev_len = ARRAY_SIZE(dma40_rx_map),
+ .dev_rx = dma40_rx_map,
+ .dev_tx = dma40_tx_map,
+ .memcpy = dma40_memcpy_event,
+ .memcpy_len = ARRAY_SIZE(dma40_memcpy_event),
+ .memcpy_conf_phy = &dma40_memcpy_conf_phy,
+ .memcpy_conf_log = &dma40_memcpy_conf_log,
+ /* Audio is using physical channel 2 from MMDSP */
+ .disabled_channels = {2, -1},
+ .use_esram_lcla = false,
+};
+
+#ifdef CONFIG_UX500_CONTEXT
+#define D40_DREG_GCC 0x000
+#define D40_DREG_LCPA 0x020
+#define D40_DREG_LCLA 0x024
+
+static void __iomem *base;
+
+static int dma_context_notifier_call(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ static unsigned long lcpa;
+ static unsigned long lcla;
+ static unsigned long gcc;
+
+ switch (event) {
+ case CONTEXT_APE_SAVE:
+ lcla = readl(base + D40_DREG_LCLA);
+ lcpa = readl(base + D40_DREG_LCPA);
+ gcc = readl(base + D40_DREG_GCC);
+ break;
+
+ case CONTEXT_APE_RESTORE:
+ writel(gcc, base + D40_DREG_GCC);
+ writel(lcpa, base + D40_DREG_LCPA);
+ writel(lcla, base + D40_DREG_LCLA);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block dma_context_notifier = {
+ .notifier_call = dma_context_notifier_call,
+};
+
+static void dma_context_notifier_init(void)
+{
+ base = ioremap(dma40_resources[0].start, resource_size(&dma40_resources[0]));
+ if (WARN_ON(!base))
+ return;
+
+ WARN_ON(context_ape_notifier_register(&dma_context_notifier));
+}
+#else
+static void dma_context_notifier_init(void)
+{
+}
+#endif
+
+static struct platform_device dma40_device = {
+ .dev = {
+ .platform_data = &dma40_plat_data,
+#ifdef CONFIG_PM
+ .pm_domain = &ux500_dev_power_domain,
+#endif
+ },
+ .name = "dma40",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(dma40_resources),
+ .resource = dma40_resources
+};
+
+void __init db8500_dma_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&dma40_device);
+ if (ret)
+ dev_err(&dma40_device.dev, "unable to register device: %d\n", ret);
+
+ dma_context_notifier_init();
+}
diff --git a/arch/arm/mach-ux500/hotplug.c b/arch/arm/mach-ux500/hotplug.c
index 572015e57cd..c007a70c8f9 100644
--- a/arch/arm/mach-ux500/hotplug.c
+++ b/arch/arm/mach-ux500/hotplug.c
@@ -11,19 +11,30 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
+#include <linux/completion.h>
#include <asm/cacheflush.h>
+#include <mach/context.h>
+
extern volatile int pen_release;
+static DECLARE_COMPLETION(cpu_killed);
+
static inline void platform_do_lowpower(unsigned int cpu)
{
flush_cache_all();
- /* we put the platform to just WFI */
for (;;) {
- __asm__ __volatile__("dsb\n\t" "wfi\n\t"
- : : : "memory");
+
+ context_varm_save_core();
+ context_save_cpu_registers();
+
+ context_save_to_sram_and_wfi(false);
+
+ context_restore_cpu_registers();
+ context_varm_restore_core();
+
if (pen_release == cpu_logical_map(cpu)) {
/*
* OK, proper wakeup, we're done
@@ -35,7 +46,7 @@ static inline void platform_do_lowpower(unsigned int cpu)
int platform_cpu_kill(unsigned int cpu)
{
- return 1;
+ return wait_for_completion_timeout(&cpu_killed, 5000);
}
/*
@@ -45,6 +56,19 @@ int platform_cpu_kill(unsigned int cpu)
*/
void platform_cpu_die(unsigned int cpu)
{
+#ifdef DEBUG
+ unsigned int this_cpu = hard_smp_processor_id();
+
+ if (cpu != this_cpu) {
+ printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
+ this_cpu, cpu);
+ BUG();
+ }
+#endif
+
+ printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
+ complete(&cpu_killed);
+
/* directly enter low power state, skipping secure registers */
platform_do_lowpower(cpu);
}
diff --git a/arch/arm/mach-ux500/hwmem-int.c b/arch/arm/mach-ux500/hwmem-int.c
new file mode 100644
index 00000000000..c23049df4a6
--- /dev/null
+++ b/arch/arm/mach-ux500/hwmem-int.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Hardware memory driver integration
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/hwmem.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+/* CONA API */
+void *cona_create(const char *name, phys_addr_t region_paddr,
+ size_t region_size);
+void *cona_alloc(void *instance, size_t size);
+void cona_free(void *instance, void *alloc);
+phys_addr_t cona_get_alloc_paddr(void *alloc);
+void *cona_get_alloc_kaddr(void *instance, void *alloc);
+size_t cona_get_alloc_size(void *alloc);
+
+struct hwmem_mem_type_struct *hwmem_mem_types;
+unsigned int hwmem_num_mem_types;
+
+static phys_addr_t hwmem_paddr;
+static size_t hwmem_size;
+
+static int __init parse_hwmem_param(char *p)
+{
+ hwmem_size = memparse(p, &p);
+
+ if (*p != '@')
+ goto no_at;
+
+ hwmem_paddr = memparse(p + 1, &p);
+
+ return 0;
+
+no_at:
+ hwmem_size = 0;
+
+ return -EINVAL;
+}
+early_param("hwmem", parse_hwmem_param);
+
+static int __init setup_hwmem(void)
+{
+ static const unsigned int NUM_MEM_TYPES = 2;
+
+ int ret;
+
+ if (hwmem_paddr != PAGE_ALIGN(hwmem_paddr) ||
+ hwmem_size != PAGE_ALIGN(hwmem_size) || hwmem_size == 0) {
+ printk(KERN_WARNING "HWMEM: hwmem_paddr !="
+ " PAGE_ALIGN(hwmem_paddr) || hwmem_size !="
+ " PAGE_ALIGN(hwmem_size) || hwmem_size == 0\n");
+ return -ENOMSG;
+ }
+
+ hwmem_mem_types = kzalloc(sizeof(struct hwmem_mem_type_struct) *
+ NUM_MEM_TYPES, GFP_KERNEL);
+ if (hwmem_mem_types == NULL)
+ return -ENOMEM;
+
+ hwmem_mem_types[0].id = HWMEM_MEM_SCATTERED_SYS;
+ hwmem_mem_types[0].allocator_api.alloc = cona_alloc;
+ hwmem_mem_types[0].allocator_api.free = cona_free;
+ hwmem_mem_types[0].allocator_api.get_alloc_paddr =
+ cona_get_alloc_paddr;
+ hwmem_mem_types[0].allocator_api.get_alloc_kaddr =
+ cona_get_alloc_kaddr;
+ hwmem_mem_types[0].allocator_api.get_alloc_size = cona_get_alloc_size;
+ hwmem_mem_types[0].allocator_instance = cona_create("hwmem",
+ hwmem_paddr, hwmem_size);
+ if (IS_ERR(hwmem_mem_types[0].allocator_instance)) {
+ ret = PTR_ERR(hwmem_mem_types[0].allocator_instance);
+ goto hwmem_ima_init_failed;
+ }
+
+ hwmem_mem_types[1] = hwmem_mem_types[0];
+ hwmem_mem_types[1].id = HWMEM_MEM_CONTIGUOUS_SYS;
+
+ hwmem_num_mem_types = NUM_MEM_TYPES;
+
+ return 0;
+
+hwmem_ima_init_failed:
+ kfree(hwmem_mem_types);
+
+ return ret;
+}
+arch_initcall_sync(setup_hwmem);
+
+enum hwmem_alloc_flags cachi_get_cache_settings(
+ enum hwmem_alloc_flags requested_cache_settings)
+{
+ static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT |
+ HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE |
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY;
+
+ enum hwmem_alloc_flags cache_settings;
+
+ if (!(requested_cache_settings & CACHE_ON_FLAGS_MASK) &&
+ requested_cache_settings & (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED | HWMEM_ALLOC_HINT_WRITE_COMBINE))
+ /*
+ * We never use uncached as it's extremely slow and there is
+ * no scenario where it would be better than buffered memory.
+ */
+ return HWMEM_ALLOC_HINT_WRITE_COMBINE;
+
+ /*
+ * The user has specified cached or nothing at all, both are treated as
+ * cached.
+ */
+ cache_settings = (requested_cache_settings &
+ ~(HWMEM_ALLOC_HINT_UNCACHED |
+ HWMEM_ALLOC_HINT_NO_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY |
+ HWMEM_ALLOC_HINT_CACHE_NAOW)) |
+ HWMEM_ALLOC_HINT_WRITE_COMBINE | HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE;
+ if (!(cache_settings & (HWMEM_ALLOC_HINT_CACHE_WB |
+ HWMEM_ALLOC_HINT_CACHE_WT)))
+ cache_settings |= HWMEM_ALLOC_HINT_CACHE_WB;
+ /*
+ * On ARMv7 "alloc on write" is just a hint so we need to assume the
+ * worst case ie "alloc on write". We would however like to remember
+ * the requested "alloc on write" setting so that we can pass it on to
+ * the hardware, we use the reserved bit in the alloc flags to do that.
+ */
+ if (requested_cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW)
+ cache_settings |= HWMEM_ALLOC_RESERVED_CHI;
+ else
+ cache_settings &= ~HWMEM_ALLOC_RESERVED_CHI;
+
+ return cache_settings;
+}
+
+void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings,
+ pgprot_t *pgprot)
+{
+ if (cache_settings & HWMEM_ALLOC_HINT_CACHED) {
+ if (cache_settings & HWMEM_ALLOC_HINT_CACHE_WT)
+ *pgprot = __pgprot_modify(*pgprot, L_PTE_MT_MASK,
+ L_PTE_MT_WRITETHROUGH);
+ else {
+ if (cache_settings & HWMEM_ALLOC_RESERVED_CHI)
+ *pgprot = __pgprot_modify(*pgprot,
+ L_PTE_MT_MASK, L_PTE_MT_WRITEALLOC);
+ else
+ *pgprot = __pgprot_modify(*pgprot,
+ L_PTE_MT_MASK, L_PTE_MT_WRITEBACK);
+ }
+ } else {
+ *pgprot = pgprot_writecombine(*pgprot);
+ }
+}
diff --git a/arch/arm/mach-ux500/hwreg.c b/arch/arm/mach-ux500/hwreg.c
new file mode 100644
index 00000000000..1a47e60ed46
--- /dev/null
+++ b/arch/arm/mach-ux500/hwreg.c
@@ -0,0 +1,651 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ *
+ * Author: Etienne CARRIERE <etienne.carriere@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * HWREG: debug purpose module to map declared IOs and read/write
+ * access from debugfs entries.
+ *
+ * HWREG 32bit DB8500 v2.0 register access
+ * =======================================
+ *
+ * 32bit read:
+ * # echo <addr> > <debugfs>/mem/reg-addr
+ * # cat <debugfs>/mem/reg-val
+ *
+ * 32bit write:
+ * # echo <addr> > <debugfs>/mem/reg-addr
+ * # echo <value> > <debugfs>/mem/reg-val
+ *
+ * <addr> 0x-prefixed hexadecimal
+ * <value> decimal or 0x-prefixed hexadecimal
+ *
+ * HWREG DB8500 formated read/write access
+ * =======================================
+ *
+ * Read: read data, data>>SHIFT, data&=MASK, output data
+ * [0xABCDEF98] shift=12 mask=0xFFF => 0x00000CDE
+ * Write: read data, data &= ~(MASK<<SHIFT), data |= (VALUE<<SHIFT), write data
+ * [0xABCDEF98] shift=12 mask=0xFFF value=0x123 => [0xAB123F98]
+ *
+ * Usage:
+ * # echo "CMD [OPTIONS] ADRESS [VALUE]" > $debugfs/mem/hwreg
+ *
+ * CMD read read access
+ * write write access
+ *
+ * ADDRESS target reg physical addr (0x-hexa)
+ *
+ * VALUE (write) value to be updated
+ *
+ * OPTIONS
+ * -d|-dec (read) output in decimal
+ * -h|-hexa (read) output in 0x-hexa (default)
+ * -l|-w|-b 32bit (default), 16bit or 8bit reg access
+ * -m|-mask MASK 0x-hexa mask (default 0xFFFFFFFF)
+ * -s|-shift SHIFT bit shift value (read:left, write:right)
+ * -o|-offset OFFSET address offset to add to ADDRESS value
+ *
+ * Warning: bit shift operation is applied to bit-mask.
+ * Warning: bit shift direction depends on read or right command.
+ *
+ * Examples:
+ *
+ * before: [*ADDRESS = 0xABCDEF98]
+ * # echo read -h -mask 0xFFF -shift 12 ADDRESS > hwreg
+ * # cat hwreg-shift
+ * 0x0000CDE
+ * # echo write -h -mask 0xFFF -shift 12 ADDRESS 0x123 > hwreg
+ * # cat hwreg-shift
+ * 0x0000123
+ * after [*ADDRESS = 0xAB123F98]
+ *
+ * before: [*ADDRESS = 0xABCDEF98]
+ * # echo read -h -mask 0x00F0F000 ADDRESS 0x12345678 > hwreg
+ * # cat hwreg-shift
+ * 0x00C0E000
+ * # echo write -h -mask 0x00F0F000 ADDRESS 0x12345678 > hwreg
+ * # cat hwreg-shift
+ * 0xAB3D5F98
+ * after [*ADDRESS = 0xAB123F98]
+ *
+ * Read DB8500 version (full ID, chip version ID, chip version ID):
+ *
+ * echo read 0x9001DBF4 > hwreg
+ * cat hwreg
+ * echo read -m 0xFFFF -s 8 0x9001DBF4 > hwreg
+ * cat hwreg
+ * echo read -m 0xFF -s 0 0x9001DBF4 > hwreg
+ * cat hwreg
+ *
+ * Read and Enable/Disable I2C PRCMU clock:
+ *
+ * printf "I2CCLK = " && echo read -m 1 -s 8 0x80157520 > hwreg
+ * cat /sys/kernel/debug/db8500/hwreg
+ * printf "I2CCLK off" && echo write -m 1 -s 8 0x80157518 1 > hwreg
+ * printf "I2CCLK on" && echo write -m 1 -s 8 0x80157510 1 > hwreg
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <mach/hardware.h>
+
+/*
+ * temporary definitions
+ * The following declarations are to be removed
+ * when kernel/arch/arm/mach-ux8500/include/mach/db8500-regs.h is up-to-date
+ */
+
+/* DDR-SDRAM chip-select 0 (0x0000 0000 : 0x1FFF FFFF) */
+#ifndef U8500_SCU_CD_R4_BASE
+#define U8500_SCU_CD_R4_BASE 0x17c40000
+#endif
+
+#ifndef U8500_SCU_AD_R4_BASE
+#define U8500_SCU_AD_R4_BASE 0x17d40000
+#endif
+
+#ifndef U8500_HSI2CMODEMR4_BASE
+#define U8500_HSI2CMODEMR4_BASE 0x17e02000
+#endif
+/* End of temporary definitions */
+
+static struct dentry *hwreg_debugfs_dir;
+
+/* 32bit read/write ressources */
+static u32 debug_address; /* shared: single read/write access */
+
+/* hwreg entry ressources */
+struct hwreg_cfg {
+ uint addr; /* target physical addr to access */
+ uint fmt; /* format */
+ uint mask; /* read/write mask, applied before any bit shift */
+ int shift; /* bit shift (read:right shift, write:left shift */
+};
+#define REG_FMT_DEC(c) ((c)->fmt & 0x1) /* bit 0: 0=hexa, 1=dec */
+#define REG_FMT_HEX(c) (!REG_FMT_DEC(c)) /* bit 0: 0=hexa, 1=dec */
+#define REG_FMT_32B(c) (((c)->fmt & 0x6)==0x0) /* bit[2:1]=0 => 32b access */
+#define REG_FMT_16B(c) (((c)->fmt & 0x6)==0x2) /* bit[2:1]=1 => 16b access */
+#define REG_FMT_8B(c) (((c)->fmt & 0x6)==0x4) /* bit[2:1]=2 => 8b access */
+
+static struct hwreg_cfg hwreg_cfg = {
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+};
+
+/* HWREG guts: mapping table */
+
+struct hwreg_io_range {
+ u32 base;
+ u32 size;
+ u8 *addr;
+};
+
+/*
+ * HWREG guts: mapping table
+ */
+static struct hwreg_io_range hwreg_io_map[] = {
+ /* Periph1 Peripherals */
+ {.base = U8500_PER1_BASE, .size = 0x10000},
+ /* Periph2 Peripherals */
+ {.base = U8500_PER2_BASE, .size = 0x10000},
+ /* Periph3 Peripherals */
+ {.base = U8500_PER3_BASE, .size = 0x10000},
+ /* Periph4 Peripherals */
+ {.base = U8500_PER4_BASE, .size = 0x70000},
+ /* Periph5 Periphals */
+ {.base = U8500_PER5_BASE, .size = 0x20000},
+ /* Periph6 Peripherals */
+ {.base = U8500_PER6_BASE, .size = 0x10000},
+ /*
+ * Snoop Control Unit, A9 Private interrupt IF,
+ * A9 private peripherals, Level-2 Cache Configuration registers,
+ * and some reserved area
+ */
+ {.base = U8500_SCU_BASE, .size = 0x4000},
+
+ /* DISPLAY Ctrl. configuration registers */
+ {.base = U8500_MCDE_BASE, .size = SZ_4K},
+
+ /* DSI1 link registers */
+ {.base = U8500_DSI_LINK1_BASE, .size = SZ_4K},
+
+ /* DSI2 link registers */
+ {.base = U8500_DSI_LINK2_BASE, .size = SZ_4K},
+
+ /* DSI3 link registers */
+ {.base = U8500_DSI_LINK3_BASE, .size = SZ_4K},
+
+ /* DMA Ctrl. configuration registers (base address changed in V1) */
+ {.base = U8500_DMA_BASE, .size = SZ_4K},
+
+ /* 0xB7A00000 -> 0xB7E04000: Modem I2C */
+ {.base = U8500_MODEM_I2C, .size = 0x404000},
+
+ /* 0xA0390000 -> 0xA039FFFF: SBAG configuration registers */
+ {.base = U8500_SBAG_BASE, .size = SZ_4K},
+
+ /* 0xA0300000 -> 0xA031FFFF: SGA configuration registers */
+ {.base = U8500_SGA_BASE, .size = 0x10000},
+
+ /* 0xA0200000 -> 0xA02FFFFF: Smart Imaging Acc. Data Memory space (SIA) */
+ {.base = U8500_SIA_BASE, .size = 0x60000},
+
+ /* 0xA0100000 -> 0xA01FFFFF: Smart Video Acc. Data Memory space (SVA) */
+ {.base = U8500_SVA_BASE, .size = 0x60000},
+
+ /* 0x81000000 -> 0x8103FFFF: Main ICN Crossbar configuration registers */
+ {.base = U8500_ICN_BASE, .size = 0x2000},
+
+ /* 0x80140000 -> 0x8014FFFF: HSEM (Semaphores) configuration */
+ {.base = U8500_HSEM_BASE, .size = SZ_4K},
+
+ /* 0x80130000 -> 0x8013FFFF: B2R2 configuration registers */
+ {.base = U8500_B2R2_BASE, .size = SZ_4K},
+
+ /* 0x80100000 -> 0x8010FFFF: STM */
+ {.base = U8500_STM_BASE, .size = 0x10000},
+
+ /* High part of embedded boot ROM */
+ {.base = U8500_ASIC_ID_BASE, .size = SZ_4K},
+
+ /* 0x17C4 0000 : 0x17C4 007C */
+ {.base = U8500_SCU_CD_R4_BASE, .size = SZ_4K},
+
+ /* 0x17D4 0000 : 0x17D4 041C */
+ {.base = U8500_SCU_AD_R4_BASE, .size = SZ_4K},
+
+ /* 0x17E0 2000 : 0x17E0 2FFC */
+ {.base = U8500_HSI2CMODEMR4_BASE, .size = SZ_4K},
+
+ {.base = 0, .size = 0, },
+
+};
+
+static void hwreg_io_init(void)
+{
+ int i;
+
+ for (i = 0; hwreg_io_map[i].base; ++i) {
+ hwreg_io_map[i].addr = ioremap(hwreg_io_map[i].base,
+ hwreg_io_map[i].size);
+ if (!hwreg_io_map[i].addr)
+ printk(KERN_WARNING
+ "%s: ioremap for %d (%08x) failed\n",
+ __func__, i, hwreg_io_map[i].base);
+ }
+}
+
+static void hwreg_io_exit(void)
+{
+ int i;
+
+ for (i = 0; hwreg_io_map[i].base; ++i)
+ if (hwreg_io_map[i].addr)
+ iounmap(hwreg_io_map[i].addr);
+}
+
+static void *hwreg_io_ptov(u32 phys)
+{
+ int i;
+
+ for (i = 0; hwreg_io_map[i].base; ++i) {
+ u32 base = hwreg_io_map[i].base;
+ u32 size = hwreg_io_map[i].size;
+ u8 *addr = hwreg_io_map[i].addr;
+
+ if (phys < base || phys >= base + size)
+ continue;
+
+ if (addr)
+ return addr + phys - base;
+
+ break;
+ }
+
+ return NULL;
+}
+
+
+/*
+ * HWREG 32bit DB8500 register read/write access debugfs part
+ */
+
+static int hwreg_address_print(struct seq_file *s, void *p)
+{
+ return seq_printf(s, "0x%08X\n", debug_address);
+}
+
+static int hwreg_address_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hwreg_address_print, inode->i_private);
+}
+
+static ssize_t hwreg_address_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ int err;
+ unsigned long user_address;
+
+ err = kstrtoul_from_user(user_buf, count, 0, &user_address);
+
+ if (err)
+ return err;
+
+ if (hwreg_io_ptov(user_address)==NULL)
+ return -EADDRNOTAVAIL;
+
+ debug_address = user_address;
+ return count;
+}
+
+static int hwreg_value_print(struct seq_file *s, void *p)
+{
+ void *ptr;
+
+ ptr = hwreg_io_ptov(debug_address);
+ if (ptr == NULL)
+ return -EADDRNOTAVAIL;
+ seq_printf(s, "0x%X\n", readl(ptr));
+ return 0;
+}
+
+static int hwreg_value_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hwreg_value_print, inode->i_private);
+}
+
+static ssize_t hwreg_value_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ int err;
+ unsigned long user_val;
+ void *ptr;
+
+ err = kstrtoul_from_user(user_buf, count, 0, &user_val);
+
+ if (err)
+ return err;
+
+ if ((ptr = hwreg_io_ptov(debug_address)) == NULL)
+ return -EFAULT;
+ writel(user_val, ptr);
+ return count;
+}
+
+static const struct file_operations hwreg_address_fops = {
+ .open = hwreg_address_open,
+ .write = hwreg_address_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+static const struct file_operations hwreg_value_fops = {
+ .open = hwreg_value_open,
+ .write = hwreg_value_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/* 'map' read entry: display current HWREG IO mapping table */
+static int hwreg_map_print(struct seq_file *s, void *p)
+{
+ int err, i;
+ for (i = 0; hwreg_io_map[i].base; ++i) {
+ err = seq_printf(s, "%d: 0x%08X => 0x%08X\n",
+ i, hwreg_io_map[i].base,
+ hwreg_io_map[i].base+hwreg_io_map[i].size);
+ if (err < 0)
+ return -ENOMEM;
+ }
+ return 0;
+}
+static int hwreg_map_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hwreg_map_print, inode->i_private);
+}
+
+static const struct file_operations hwreg_map_fops = {
+ .open = hwreg_map_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * HWREG DB8500 formated routines
+ */
+
+static int hwreg_print(struct seq_file *s, void *d)
+{
+ struct hwreg_cfg *c = (struct hwreg_cfg*) s->private;
+ void *p;
+ uint v;
+
+ if ((c==NULL) || ((p = hwreg_io_ptov(c->addr))==NULL))
+ return -EADDRNOTAVAIL;
+
+ v = (uint) (REG_FMT_32B(c) ? readl(p) : REG_FMT_16B(c) ? readw(p) : readb(p));
+ v = (c->shift>=0) ? v >> c->shift : v << (-c->shift);
+ v = v & c->mask;
+
+ if (REG_FMT_DEC(c))
+ seq_printf(s, "%d\n", v);
+ else if (REG_FMT_32B(c))
+ seq_printf(s, "0x%08X\n", v);
+ else if (REG_FMT_32B(c))
+ seq_printf(s, "0x%04X\n", v);
+ else
+ seq_printf(s, "0x%02X\n", v);
+ return 0;
+}
+
+static int hwreg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hwreg_print, inode->i_private);
+}
+
+/*
+ * return length of an ASCII numerical value, 0 is string is not a numerical
+ * value. string shall start at value 1st char.
+ * string can be tailed with \0 or space or newline chars only.
+ * value can be decimal or hexadecimal (prefixed 0x or 0X).
+ */
+static int strval_len(char *b)
+{
+ char *s = b;
+ if((*s=='0') && ((*(s+1)=='x') || (*(s+1)=='X'))) {
+ s += 2;
+ for (; *s && (*s!=' ') && (*s!='\n'); s++) {
+ if (!isxdigit(*s))
+ return 0;
+ }
+ } else {
+ if (*s=='-')
+ s++;
+ for (; *s && (*s!=' ') && (*s!='\n'); s++) {
+ if (!isdigit(*s))
+ return 0;
+ }
+ }
+ return (int) (s-b);
+}
+
+/*
+ * parse hwreg input data.
+ * update global hwreg_cfg only if input data syntax is ok.
+ */
+static ssize_t hwreg_common_write(char *b, struct hwreg_cfg *cfg)
+{
+ uint write, val=0, offset=0;
+ struct hwreg_cfg loc = {
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+ };
+
+ /* read or write ? */
+ if(!strncmp(b, "read ", 5)) {
+ write = 0;
+ b += 5;
+ } else if (!strncmp(b, "write ", 6)) {
+ write = 1;
+ b += 6;
+ } else {
+ return -EINVAL;
+ }
+
+ /* OPTIONS -l|-w|-b -s -m -o */
+ while((*b==' ') || (*b=='-')) {
+ if (*(b-1)!=' ') {
+ b++;
+ continue;
+ }
+ if ((!strncmp(b, "-d ", 3)) || (!strncmp(b, "-dec ", 5))) {
+ b += (*(b+2)==' ') ? 3 : 5;
+ loc.fmt |= (1<<0);
+ } else if ((!strncmp(b, "-h ", 3)) || (!strncmp(b, "-hex ", 5))) {
+ b += (*(b+2)==' ') ? 3 : 5;
+ loc.fmt &= ~(1<<0);
+ } else if ((!strncmp(b, "-m ", 3)) || (!strncmp(b, "-mask ", 6))) {
+ b += (*(b+2)==' ') ? 3 : 6;
+ if (strval_len(b)==0)
+ return -EINVAL;
+ loc.mask = simple_strtoul(b, &b, 0);
+ } else if ((!strncmp(b, "-s ", 3)) || (!strncmp(b,"-shift ", 7))) {
+ b += (*(b+2)==' ') ? 3 : 7;
+ if (strval_len(b)==0)
+ return -EINVAL;
+ loc.shift = simple_strtol(b, &b, 0);
+
+ } else if ((!strncmp(b, "-o ", 3)) || (!strncmp(b,"-offset ", 8))) {
+ b += (*(b+2)==' ') ? 3 : 8;
+ if (strval_len(b)==0)
+ return -EINVAL;
+ offset = simple_strtol(b, &b, 0);
+ } else if (!strncmp(b, "-l ", 3)) {
+ b += 3;
+ loc.fmt = (loc.fmt & ~(3<<1)) | (0<<1);
+ } else if (!strncmp(b, "-w ", 3)) {
+ b += 3;
+ loc.fmt = (loc.fmt & ~(3<<1)) | (1<<1);
+ } else if (!strncmp(b, "-b ", 3)) {
+ b += 3;
+ loc.fmt = (loc.fmt & ~(3<<1)) | (2<<1);
+ } else {
+ return -EINVAL;
+ }
+ }
+ /* get arg ADDRESS */
+ if (strval_len(b)==0)
+ return -EINVAL;
+ loc.addr = simple_strtoul(b, &b, 0);
+ loc.addr += offset;
+ if (hwreg_io_ptov(loc.addr) == NULL)
+ return -EINVAL;
+
+ if (write) {
+ while(*b==' ') b++; /* skip spaces up to arg VALUE */
+ if (strval_len(b)==0)
+ return -EINVAL;
+ val = simple_strtoul(b, &b, 0);
+ }
+
+ /* args are ok, update target cfg (mainly for read) */
+ *cfg = loc;
+
+#ifdef DEBUG
+ printk(KERN_INFO "HWREG request: %s %d-bit reg, %s, addr=0x%08X, "
+ "mask=0x%X, shift=%d value=0x%X\n",
+ (write)?"write":"read",
+ REG_FMT_32B(cfg)?32:REG_FMT_16B(cfg)?16:8,
+ REG_FMT_DEC(cfg)?"decimal":"hexa",
+ cfg->addr, cfg->mask, cfg->shift, val);
+#endif
+
+ if (write) {
+ void *p = hwreg_io_ptov(cfg->addr);
+ uint d = (uint) (REG_FMT_32B(cfg)) ? readl(p) :
+ (REG_FMT_16B(cfg)) ? readw(p) : readb(p);
+
+ if (cfg->shift>=0) {
+ d &= ~(cfg->mask << (cfg->shift));
+ val = (val & cfg->mask) << (cfg->shift);
+ } else {
+ d &= ~(cfg->mask >> (-cfg->shift));
+ val = (val & cfg->mask) >> (-cfg->shift);
+ }
+ val = val | d;
+
+ /* read reg, reset mask field and update value bit-field */
+ if (REG_FMT_32B(cfg))
+ writel(val, p);
+ else if (REG_FMT_16B(cfg))
+ writew(val, p);
+ else
+ writeb(val, p);
+ }
+ return 0;
+}
+
+static ssize_t hwreg_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int buf_size, ret;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* get args and process */
+ ret = hwreg_common_write(buf, &hwreg_cfg);
+ return (ret) ? ret : buf_size;
+}
+
+static const struct file_operations hwreg_fops = {
+ .open = hwreg_open,
+ .write = hwreg_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * hwreg module init/cleanup
+ */
+static int __init hwreg_initialize(void)
+{
+ static struct dentry *file;
+ hwreg_io_init();
+
+ hwreg_debugfs_dir = debugfs_create_dir("mem", NULL);
+ if (!hwreg_debugfs_dir)
+ goto debugfs_err;
+
+ file = debugfs_create_file("reg-addr",
+ (S_IRUGO | S_IWUGO), hwreg_debugfs_dir,
+ NULL, &hwreg_address_fops);
+ if (!file)
+ goto debugfs_err;
+ file = debugfs_create_file("reg-val",
+ (S_IRUGO | S_IWUGO), hwreg_debugfs_dir,
+ NULL, &hwreg_value_fops);
+ if (!file)
+ goto debugfs_err;
+ file = debugfs_create_file("reg-map",
+ (S_IRUGO),
+ hwreg_debugfs_dir, NULL, &hwreg_map_fops);
+ if (!file)
+ goto debugfs_err;
+ file = debugfs_create_file("hwreg",
+ (S_IRUGO),
+ hwreg_debugfs_dir, &hwreg_cfg, &hwreg_fops);
+ if (!file)
+ goto debugfs_err;
+ return 0;
+
+debugfs_err:
+ if (hwreg_debugfs_dir)
+ debugfs_remove_recursive(hwreg_debugfs_dir);
+ printk(KERN_ERR "hwreg: failed to register debugfs entries.\n");
+ return -1;
+}
+
+static void __exit hwreg_finalize(void)
+{
+ debugfs_remove_recursive(hwreg_debugfs_dir);
+ hwreg_io_exit();
+}
+
+module_init(hwreg_initialize);
+module_exit(hwreg_finalize);
+
+MODULE_AUTHOR("ST-Ericsson");
+MODULE_DESCRIPTION("DB8500 HW registers access through debugfs");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-ux500/include/mach/ab8500_gpadc.h b/arch/arm/mach-ux500/include/mach/ab8500_gpadc.h
new file mode 100644
index 00000000000..4289dcfc0aa
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/ab8500_gpadc.h
@@ -0,0 +1,36 @@
+/*
+ * ab8500_gpadc.c - AB8500 GPADC Driver
+ *
+ * Copyright (C) 2010 ST-Ericsson SA
+ * Licensed under GPLv2.
+ *
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#ifndef _AB8500_GPADC_H
+#define _Ab8500_GPADC_H
+
+/* GPADC source: From datasheer(ADCSwSel[4:0] in GPADCCtrl2) */
+#define BAT_CTRL 0x01
+#define ACC_DETECT1 0x04
+#define ACC_DETECT2 0x05
+#define MAIN_BAT_V 0x08
+#define BK_BAT_V 0x0C
+#define VBUS_V 0x09
+#define MAIN_CHARGER_V 0x03
+#define MAIN_CHARGER_C 0x0A
+#define USB_CHARGER_C 0x0B
+#define DIE_TEMP 0x0D
+#define BTEMP_BALL 0x02
+
+struct ab8500_gpadc_device_info {
+ struct completion ab8500_gpadc_complete;
+ struct mutex ab8500_gpadc_lock;
+#if defined(CONFIG_REGULATOR)
+ struct regulator *regu;
+#endif
+};
+
+int ab8500_gpadc_conversion(int input);
+
+#endif /* _AB8500_GPADC_H */
diff --git a/arch/arm/mach-ux500/include/mach/abx500-accdet.h b/arch/arm/mach-ux500/include/mach/abx500-accdet.h
new file mode 100644
index 00000000000..03c627f6011
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/abx500-accdet.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright ST-Ericsson 2011.
+ *
+ * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com> for ST Ericsson.
+ * Licensed under GPLv2.
+ */
+
+#ifndef _ABx500_ACCDET_H
+#define _ABx500_ACCDET_H
+
+/*
+* Debounce times for AccDet1 input
+* @0x880 [2:0]
+*/
+#define ACCDET1_DB_0ms 0x00
+#define ACCDET1_DB_10ms 0x01
+#define ACCDET1_DB_20ms 0x02
+#define ACCDET1_DB_30ms 0x03
+#define ACCDET1_DB_40ms 0x04
+#define ACCDET1_DB_50ms 0x05
+#define ACCDET1_DB_60ms 0x06
+#define ACCDET1_DB_70ms 0x07
+
+/*
+* Voltage threshold for AccDet1 input
+* @0x880 [6:3]
+*/
+#define ACCDET1_TH_1100mV 0x40
+#define ACCDET1_TH_1200mV 0x48
+#define ACCDET1_TH_1300mV 0x50
+#define ACCDET1_TH_1400mV 0x58
+#define ACCDET1_TH_1500mV 0x60
+#define ACCDET1_TH_1600mV 0x68
+#define ACCDET1_TH_1700mV 0x70
+#define ACCDET1_TH_1800mV 0x78
+
+/*
+* Voltage threshold for AccDet21 input
+* @0x881 [3:0]
+*/
+#define ACCDET21_TH_300mV 0x00
+#define ACCDET21_TH_400mV 0x01
+#define ACCDET21_TH_500mV 0x02
+#define ACCDET21_TH_600mV 0x03
+#define ACCDET21_TH_700mV 0x04
+#define ACCDET21_TH_800mV 0x05
+#define ACCDET21_TH_900mV 0x06
+#define ACCDET21_TH_1000mV 0x07
+#define ACCDET21_TH_1100mV 0x08
+#define ACCDET21_TH_1200mV 0x09
+#define ACCDET21_TH_1300mV 0x0a
+#define ACCDET21_TH_1400mV 0x0b
+#define ACCDET21_TH_1500mV 0x0c
+#define ACCDET21_TH_1600mV 0x0d
+#define ACCDET21_TH_1700mV 0x0e
+#define ACCDET21_TH_1800mV 0x0f
+
+/*
+* Voltage threshold for AccDet22 input
+* @0x881 [7:4]
+*/
+#define ACCDET22_TH_300mV 0x00
+#define ACCDET22_TH_400mV 0x10
+#define ACCDET22_TH_500mV 0x20
+#define ACCDET22_TH_600mV 0x30
+#define ACCDET22_TH_700mV 0x40
+#define ACCDET22_TH_800mV 0x50
+#define ACCDET22_TH_900mV 0x60
+#define ACCDET22_TH_1000mV 0x70
+#define ACCDET22_TH_1100mV 0x80
+#define ACCDET22_TH_1200mV 0x90
+#define ACCDET22_TH_1300mV 0xa0
+#define ACCDET22_TH_1400mV 0xb0
+#define ACCDET22_TH_1500mV 0xc0
+#define ACCDET22_TH_1600mV 0xd0
+#define ACCDET22_TH_1700mV 0xe0
+#define ACCDET22_TH_1800mV 0xf0
+
+/*
+* Voltage threshold for AccDet1 input
+* @0x880 [6:3]
+*/
+#define ACCDET1_TH_300mV 0x00
+#define ACCDET1_TH_400mV 0x01
+#define ACCDET1_TH_500mV 0x02
+#define ACCDET1_TH_600mV 0x03
+#define ACCDET1_TH_700mV 0x04
+#define ACCDET1_TH_800mV 0x05
+#define ACCDET1_TH_900mV 0x06
+#define ACCDET1_TH_1000mV 0x07
+
+#define MAX_DET_COUNT 10
+#define MAX_VOLT_DIFF 30
+#define MIN_MIC_POWER -100
+
+/**
+ * struct abx500_accdet_platform_data - AV Accessory detection specific
+ * platform data
+ * @btn_keycode Keycode to be sent when accessory button is pressed.
+ * @accdet1_dbth Debounce time + voltage threshold for accdet 1 input.
+ * @accdet2122_th Voltage thresholds for accdet21 and accdet22 inputs.
+ * @is_detection_inverted Whether the accessory insert/removal, button
+ * press/release irq's are inverted.
+ */
+struct abx500_accdet_platform_data {
+ int btn_keycode;
+ u8 accdet1_dbth;
+ u8 accdet2122_th;
+ unsigned int video_ctrl_gpio;
+ bool is_detection_inverted;
+};
+
+/* Enumerations */
+
+/**
+ * @JACK_TYPE_UNSPECIFIED Not known whether any accessories are connected.
+ * @JACK_TYPE_DISCONNECTED No accessories connected.
+ * @JACK_TYPE_CONNECTED Accessory is connected but functionality was unable to
+ * detect the actual type. In this mode, possible button events are reported.
+ * @JACK_TYPE_HEADPHONE Headphone type of accessory (spkrs only) connected
+ * @JACK_TYPE_HEADSET Headset type of accessory (mic+spkrs) connected
+ * @JACK_TYPE_CARKIT Carkit type of accessory connected
+ * @JACK_TYPE_OPENCABLE Open cable connected
+ * @JACK_TYPE_CVIDEO CVideo type of accessory connected.
+ */
+enum accessory_jack_type {
+ JACK_TYPE_UNSPECIFIED,
+ JACK_TYPE_DISCONNECTED,
+ JACK_TYPE_CONNECTED,
+ JACK_TYPE_HEADPHONE,
+ JACK_TYPE_HEADSET,
+ JACK_TYPE_CARKIT,
+ JACK_TYPE_OPENCABLE,
+ JACK_TYPE_CVIDEO
+};
+
+/**
+ * @BUTTON_UNK Button state not known
+ * @BUTTON_PRESSED Button "down"
+ * @BUTTON_RELEASED Button "up"
+ */
+enum accessory_button_state {
+ BUTTON_UNK,
+ BUTTON_PRESSED,
+ BUTTON_RELEASED
+};
+
+/**
+ * @PLUG_IRQ Interrupt gen. when accessory plugged in
+ * @UNPLUG_IRQ Interrupt gen. when accessory plugged out
+ * @BUTTON_PRESS_IRQ Interrupt gen. when accessory button pressed.
+ * @BUTTON_RELEASE_IRQ Interrupt gen. when accessory button released.
+ */
+enum accessory_irq {
+ PLUG_IRQ,
+ UNPLUG_IRQ,
+ BUTTON_PRESS_IRQ,
+ BUTTON_RELEASE_IRQ,
+};
+
+/**
+ * Enumerates the op. modes of the avcontrol switch
+ * @AUDIO_IN Audio input is selected
+ * @VIDEO_OUT Video output is selected
+ * @NOT_SET The av-switch control signal is disconnected.
+ */
+enum accessory_avcontrol_dir {
+ AUDIO_IN,
+ VIDEO_OUT,
+ NOT_SET,
+};
+
+/**
+ * @REGULATOR_VAUDIO v-audio regulator
+ * @REGULATOR_VAMIC1 v-amic1 regulator
+ * @REGULATOR_AVSWITCH Audio/Video select switch regulator
+ * @REGULATOR_ALL All regulators combined
+ */
+enum accessory_regulator {
+ REGULATOR_NONE = 0x0,
+ REGULATOR_VAUDIO = 0x1,
+ REGULATOR_VAMIC1 = 0x2,
+ REGULATOR_AVSWITCH = 0x4,
+ REGULATOR_ALL = 0xFF
+};
+
+/* Structures */
+
+/**
+ * Describes an interrupt
+ * @irq interrupt identifier
+ * @name name of the irq in platform data
+ * @isr interrupt service routine
+ * @register are we currently registered to receive interrupts from this source.
+ */
+struct accessory_irq_descriptor {
+ enum accessory_irq irq;
+ const char *name;
+ irq_handler_t isr;
+ int registered;
+};
+
+/**
+ * Encapsulates info of single regulator.
+ * @id regulator identifier
+ * @name name of the regulator
+ * @enabled flag indicating whether regu is currently enabled.
+ * @handle regulator handle
+ */
+struct accessory_regu_descriptor {
+ enum accessory_regulator id;
+ const char *name;
+ int enabled;
+ struct regulator *handle;
+};
+
+/**
+ * Defines attributes for accessory detection operation.
+ * @typename type as string
+ * @type Type of accessory this task tests
+ * @req_det_count How many times this particular type of accessory
+ * needs to be detected in sequence in order to accept. Multidetection
+ * implemented to avoid false detections during plug-in.
+ * @meas_mv Should ACCDETECT2 input voltage be measured just before
+ * making the decision or can cached voltage be used instead.
+ * @minvol minimum voltage (mV) for decision
+ * @maxvol maximum voltage (mV) for decision
+ */
+struct accessory_detect_task {
+ const char *typename;
+ enum accessory_jack_type type;
+ int req_det_count;
+ int meas_mv;
+ int minvol;
+ int maxvol;
+};
+
+/**
+ * Device data, capsulates all relevant device data structures.
+ *
+ * @pdev: pointer to platform device
+ * @pdata: Platform data
+ * @gpadc: interface for ADC data
+ * @irq_work_queue: Work queue for deferred interrupt processing
+ * @detect_work: work item to perform detection work
+ * @unplug_irq_work: work item to process unplug event
+ * @init_work: work item to process initialization work.
+ * @btn_input_dev: button input device used to report btn presses
+ * @btn_state: Current state of accessory button
+ * @jack_type: type of currently connected accessory
+ * @reported_jack_type: previously reported jack type.
+ * @jack_type_temp: temporary storage for currently connected accessory
+ * @jack_det_count: counter how many times in sequence the accessory
+ * type detection has produced same result.
+ * @total_jack_det_count: after plug-in irq, how many times detection
+ * has totally been made in order to detect the accessory type
+ * @detect_jiffies: Used to save timestamp when detection was made. Timestamp
+ * used to filter out spurious button presses that might occur during the
+ * plug-in procedure.
+ * @accdet1_th_set: flag to indicate whether accdet1 threshold and debounce
+ * times are configured
+ * @accdet2_th_set: flag to indicate whether accdet2 thresholds are configured
+ * @gpio35_dir_set: flag to indicate whether GPIO35 (VIDEOCTRL) direction
+ * has been configured.
+ * @irq_desc_norm: irq's as specified in the initial versions of ab
+ * @irq_desc_inverted: irq's inverted as seen in the latest versions of ab
+ * @no_irqs: Total number of irq's
+ * @regu_desc: Pointer to the regulator descriptors.
+ * @no_of_regu_desc: Total nummber of descriptors.
+ * @config_accdetect2_hw: Callback for configuring accdet2 comparator.
+ * @config_accdetect1_hw: Callback for configuring accdet1 comparator.
+ * @detect_plugged_in: Callback to detect type of accessory connected.
+ * @meas_voltage_stable: Callback to read present accdet voltage.
+ * @config_hw_test_basic_carkit: Callback to configure hw for carkit
+ * detect.
+ * @turn_of_accdet_comparator: Call back to turn off comparators.
+ * @turn_on_accdet_comparator: Call back to turn ON comparators.
+ * @accdet_abx500_gpadc_get Call back to get a instance of the
+ * GPADC convertor.
+ * @config_hw_test_plug_connected: Call back to configure the hw for
+ * accessory detection.
+ * @set_av_switch: Call back to configure the switch for tvout or audioout.
+ * @get_platform_data: call to get platform specific data.
+ */
+struct abx500_ad {
+ struct platform_device *pdev;
+ struct abx500_accdet_platform_data *pdata;
+ void *gpadc;
+ struct workqueue_struct *irq_work_queue;
+
+ struct delayed_work detect_work;
+ struct delayed_work unplug_irq_work;
+ struct delayed_work init_work;
+
+ struct input_dev *btn_input_dev;
+ enum accessory_button_state btn_state;
+
+ enum accessory_jack_type jack_type;
+ enum accessory_jack_type reported_jack_type;
+ enum accessory_jack_type jack_type_temp;
+
+ int jack_det_count;
+ int total_jack_det_count;
+
+ unsigned long detect_jiffies;
+
+ int accdet1_th_set;
+ int accdet2_th_set;
+ int gpio35_dir_set;
+
+ struct accessory_irq_descriptor *irq_desc_norm;
+ struct accessory_irq_descriptor *irq_desc_inverted;
+ int no_irqs;
+
+ struct accessory_regu_descriptor *regu_desc;
+ int no_of_regu_desc;
+
+ void (*config_accdetect2_hw)(struct abx500_ad *, int);
+ void (*config_accdetect1_hw)(struct abx500_ad *, int);
+ int (*detect_plugged_in)(struct abx500_ad *);
+ int (*meas_voltage_stable)(struct abx500_ad *);
+ void (*config_hw_test_basic_carkit)(struct abx500_ad *, int);
+ void (*turn_off_accdet_comparator)(struct platform_device *pdev);
+ void (*turn_on_accdet_comparator)(struct platform_device *pdev);
+ void* (*accdet_abx500_gpadc_get)(void);
+ void (*config_hw_test_plug_connected)(struct abx500_ad *dd, int enable);
+ void (*set_av_switch)(struct abx500_ad *dd,
+ enum accessory_avcontrol_dir dir);
+ struct abx500_accdet_platform_data *
+ (*get_platform_data)(struct platform_device *pdev);
+};
+
+/* Forward declarations */
+extern irqreturn_t unplug_irq_handler(int irq, void *_userdata);
+extern irqreturn_t plug_irq_handler(int irq, void *_userdata);
+extern irqreturn_t button_press_irq_handler(int irq, void *_userdata);
+extern irqreturn_t button_release_irq_handler(int irq, void *_userdata);
+extern void accessory_regulator_enable(struct abx500_ad *dd,
+ enum accessory_regulator reg);
+extern void accessory_regulator_disable(struct abx500_ad *dd,
+ enum accessory_regulator reg);
+extern void report_jack_status(struct abx500_ad *dd);
+
+#ifdef CONFIG_INPUT_AB5500_ACCDET
+extern struct abx500_ad ab5500_accessory_det_callbacks;
+#endif
+
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+extern struct abx500_ad ab8500_accessory_det_callbacks;
+#endif
+
+#endif /* _ABx500_ACCDET_H */
+
diff --git a/arch/arm/mach-ux500/include/mach/context.h b/arch/arm/mach-ux500/include/mach/context.h
new file mode 100644
index 00000000000..22b56351284
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/context.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+#ifndef CONTEXT_H
+#define CONTEXT_H
+
+#include <linux/notifier.h>
+
+#ifdef CONFIG_UX500_CONTEXT
+
+/* Defines to be with
+ * context_ape_notifier_register
+ */
+#define CONTEXT_APE_SAVE 0 /* APE save */
+#define CONTEXT_APE_RESTORE 1 /* APE restore */
+
+/* Defines to be with
+ * context_arm_notifier_register
+ */
+#define CONTEXT_ARM_CORE_SAVE 0 /* Called for each ARM core */
+#define CONTEXT_ARM_CORE_RESTORE 1 /* Called for each ARM core */
+#define CONTEXT_ARM_COMMON_SAVE 2 /* Called when ARM common is saved */
+#define CONTEXT_ARM_COMMON_RESTORE 3 /* Called when ARM common is restored */
+
+int context_ape_notifier_register(struct notifier_block *nb);
+int context_ape_notifier_unregister(struct notifier_block *nb);
+
+int context_arm_notifier_register(struct notifier_block *nb);
+int context_arm_notifier_unregister(struct notifier_block *nb);
+
+void context_vape_save(void);
+void context_vape_restore(void);
+
+void context_gpio_save(void);
+void context_gpio_restore(void);
+void context_gpio_restore_mux(void);
+void context_gpio_mux_safe_switch(bool begin);
+
+void context_gic_dist_disable_unneeded_irqs(void);
+
+void context_varm_save_common(void);
+void context_varm_restore_common(void);
+
+void context_varm_save_core(void);
+void context_varm_restore_core(void);
+
+void context_save_cpu_registers(void);
+void context_restore_cpu_registers(void);
+
+void context_save_to_sram_and_wfi(bool cleanL2cache);
+
+void context_clean_l1_cache_all(void);
+void context_save_arm_registers(u32 **backup_stack);
+void context_restore_arm_registers(u32 **backup_stack);
+
+void context_save_cp15_registers(u32 **backup_stack);
+void context_restore_cp15_registers(u32 **backup_stack);
+
+void context_save_to_sram_and_wfi_internal(u32 backup_sram_storage,
+ bool cleanL2cache);
+
+/* DB specific functions in either context-db8500 or context-db5500 */
+void u8500_context_save_icn(void);
+void u8500_context_restore_icn(void);
+void u8500_context_init(void);
+
+void u5500_context_save_icn(void);
+void u5500_context_restore_icn(void);
+void u5500_context_init(void);
+
+#else
+
+static inline void context_varm_save_core(void) {}
+static inline void context_save_cpu_registers(void) {}
+static inline void context_save_to_sram_and_wfi(bool cleanL2cache) {}
+static inline void context_restore_cpu_registers(void) {}
+static inline void context_varm_restore_core(void) {}
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/crypto-ux500.h b/arch/arm/mach-ux500/include/mach/crypto-ux500.h
new file mode 100644
index 00000000000..9d1e1c52c13
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/crypto-ux500.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _CRYPTO_UX500_H
+#include <plat/ste_dma40.h>
+
+struct cryp_platform_data {
+ struct stedma40_chan_cfg mem_to_engine;
+ struct stedma40_chan_cfg engine_to_mem;
+};
+
+struct hash_platform_data {
+ struct stedma40_chan_cfg mem_to_engine;
+};
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/db5500-keypad.h b/arch/arm/mach-ux500/include/mach/db5500-keypad.h
new file mode 100644
index 00000000000..d9d23419ab3
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/db5500-keypad.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License, version 2
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ */
+
+#ifndef __DB5500_KEYPAD_H
+#define __DB5500_KEYPAD_H
+
+#include <linux/input/matrix_keypad.h>
+
+#define KEYPAD_MAX_ROWS 9
+#define KEYPAD_MAX_COLS 8
+
+/**
+ * struct db5500_keypad_platform_data - structure for platform specific data
+ * @keymap_data: matrix scan code table for keycodes
+ * @debounce_ms: platform specific debounce time
+ * @no_autorepeat: flag for auto repetition
+ * @init : pointer to keypad init function
+ * @exit : pointer to keypad exit function
+ * @krow : maximum number of rows
+ * @kcol : maximum number of cols
+ * @gpio_input_pins: pointer to gpio input pins
+ * @gpio_output_pins: pointer to gpio output pins
+ * @switch_delay : gpio switch_delay
+ */
+struct db5500_keypad_platform_data {
+ const struct matrix_keymap_data *keymap_data;
+ u8 debounce_ms;
+ bool no_autorepeat;
+ int (*init)(void);
+ int (*exit)(void);
+ u8 krow;
+ u8 kcol;
+ int *gpio_input_pins;
+ int *gpio_output_pins;
+ int switch_delay;
+};
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/db5500-regs.h b/arch/arm/mach-ux500/include/mach/db5500-regs.h
index 994b5fe6f85..870c3ff574c 100644
--- a/arch/arm/mach-ux500/include/mach/db5500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db5500-regs.h
@@ -34,6 +34,7 @@
#define U5500_ICN_BASE 0xA0040000
#define U5500_B2R2_BASE 0xa0200000
#define U5500_BOOT_ROM_BASE 0x90000000
+#define U5500_ASIC_ID_ADDRESS (U5500_BOOT_ROM_BASE + 0x1FFF4)
#define U5500_FSMC_BASE (U5500_PER1_BASE + 0x0000)
#define U5500_SDI0_BASE (U5500_PER1_BASE + 0x1000)
@@ -67,6 +68,8 @@
#define U5500_GPIO2_BASE (U5500_PER4_BASE + 0xA000)
#define U5500_CDETECT_BASE (U5500_PER4_BASE + 0xF000)
#define U5500_PRCMU_TCDM_BASE (U5500_PER4_BASE + 0x18000)
+#define U5500_PRCMU_TCPM_BASE (U5500_PER4_BASE + 0x10000)
+#define U5500_TPIU_BASE (U5500_PER4_BASE + 0x50000)
#define U5500_SPI0_BASE (U5500_PER5_BASE + 0x0000)
#define U5500_SPI1_BASE (U5500_PER5_BASE + 0x1000)
diff --git a/arch/arm/mach-ux500/include/mach/db8500-regs.h b/arch/arm/mach-ux500/include/mach/db8500-regs.h
index 751b0e6938d..d2956a508d8 100644
--- a/arch/arm/mach-ux500/include/mach/db8500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db8500-regs.h
@@ -22,7 +22,9 @@
#define U8500_ESRAM_DMA_LCPA_OFFSET 0x10000
#define U8500_DMA_LCPA_BASE (U8500_ESRAM_BANK0 + U8500_ESRAM_DMA_LCPA_OFFSET)
-#define U8500_DMA_LCPA_BASE_ED (U8500_ESRAM_BANK4 + 0x4000)
+
+/* This address fulfills the 256k alignment requirement of the lcla base */
+#define U8500_DMA_LCLA_BASE U8500_ESRAM_BANK4
#define U8500_PER3_BASE 0x80000000
#define U8500_STM_BASE 0x80100000
@@ -40,15 +42,14 @@
#define U8500_ASIC_ID_BASE 0x9001D000
#define U8500_PER6_BASE 0xa03c0000
+#define U8500_PER7_BASE 0xa03d0000
#define U8500_PER5_BASE 0xa03e0000
-#define U8500_PER7_BASE_ED 0xa03d0000
#define U8500_SVA_BASE 0xa0100000
#define U8500_SIA_BASE 0xa0200000
#define U8500_SGA_BASE 0xa0300000
#define U8500_MCDE_BASE 0xa0350000
-#define U8500_DMA_BASE_ED 0xa0362000
#define U8500_DMA_BASE 0x801C0000 /* v1 */
#define U8500_SBAG_BASE 0xa0390000
@@ -66,13 +67,6 @@
#define U8500_GPIO2_BASE (U8500_PER2_BASE + 0xE000)
#define U8500_GPIO3_BASE (U8500_PER5_BASE + 0x1E000)
-/* per7 base addresses */
-#define U8500_CR_BASE_ED (U8500_PER7_BASE_ED + 0x8000)
-#define U8500_MTU0_BASE_ED (U8500_PER7_BASE_ED + 0xa000)
-#define U8500_MTU1_BASE_ED (U8500_PER7_BASE_ED + 0xb000)
-#define U8500_TZPC0_BASE_ED (U8500_PER7_BASE_ED + 0xc000)
-#define U8500_CLKRST7_BASE_ED (U8500_PER7_BASE_ED + 0xf000)
-
#define U8500_UART0_BASE (U8500_PER1_BASE + 0x0000)
#define U8500_UART1_BASE (U8500_PER1_BASE + 0x1000)
@@ -104,7 +98,6 @@
#define U8500_PRCMU_BASE (U8500_PER4_BASE + 0x07000)
#define U8500_PRCMU_TIMER_3_BASE (U8500_PER4_BASE + 0x07338)
#define U8500_PRCMU_TIMER_4_BASE (U8500_PER4_BASE + 0x07450)
-#define U8500_PRCMU_TCDM_BASE_V1 (U8500_PER4_BASE + 0x0f000)
#define U8500_PRCMU_TCDM_BASE (U8500_PER4_BASE + 0x68000)
#define U8500_PRCMU_TCPM_BASE (U8500_PER4_BASE + 0x60000)
diff --git a/arch/arm/mach-ux500/include/mach/dcache.h b/arch/arm/mach-ux500/include/mach/dcache.h
new file mode 100644
index 00000000000..83fe618b04f
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/dcache.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Data cache helpers
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _MACH_UX500_DCACHE_H_
+#define _MACH_UX500_DCACHE_H_
+
+#include <linux/types.h>
+
+void drain_cpu_write_buf(void);
+void clean_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only,
+ bool *cleaned_everything);
+void flush_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only,
+ bool *flushed_everything);
+bool speculative_data_prefetch(void);
+/* Returns 1 if no cache is present */
+u32 get_dcache_granularity(void);
+
+#endif /* _MACH_UX500_DCACHE_H_ */
diff --git a/arch/arm/mach-ux500/include/mach/devices.h b/arch/arm/mach-ux500/include/mach/devices.h
index 020b6369a30..27e6b5faaa0 100644
--- a/arch/arm/mach-ux500/include/mach/devices.h
+++ b/arch/arm/mach-ux500/include/mach/devices.h
@@ -13,11 +13,24 @@ struct amba_device;
extern struct platform_device u5500_gpio_devs[];
extern struct platform_device u8500_gpio_devs[];
+extern struct platform_device u8500_mcde_device;
+extern struct platform_device u5500_mcde_device;
+extern struct platform_device u8500_shrm_device;
+extern struct platform_device u8500_b2r2_device;
+extern struct platform_device u5500_b2r2_device;
+extern struct platform_device u8500_trace_modem;
+extern struct platform_device ux500_hwmem_device;
+extern struct platform_device u8500_stm_device;
extern struct amba_device ux500_pl031_device;
-
-extern struct platform_device u8500_dma40_device;
+extern struct platform_device ux500_hash1_device;
+extern struct platform_device ux500_cryp1_device;
+extern struct platform_device mloader_fw_device;
+extern struct platform_device u5500_thsens_device;
+extern struct platform_device u8500_thsens_device;
extern struct platform_device ux500_ske_keypad_device;
-
-void dma40_u8500ed_fixup(void);
+extern struct platform_device u8500_wdt_device;
+extern struct platform_device u8500_hsi_device;
+extern struct platform_device ux500_mmio_device;
+extern struct platform_device u5500_mmio_device;
#endif
diff --git a/arch/arm/mach-ux500/include/mach/gpio.h b/arch/arm/mach-ux500/include/mach/gpio.h
index 7389df911b1..58e2a9a7043 100644
--- a/arch/arm/mach-ux500/include/mach/gpio.h
+++ b/arch/arm/mach-ux500/include/mach/gpio.h
@@ -5,6 +5,19 @@
* 288 (#267 is the highest one actually hooked up) onchip GPIOs, plus enough
* room for a couple of GPIO expanders.
*/
-#define ARCH_NR_GPIOS 350
+#define ARCH_NR_GPIOS 355
+#define NOMADIK_NR_GPIO 288
+
+#include <asm-generic/gpio.h>
+
+/* Invoke gpiolibs gpio_chip abstraction */
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
+
+#define MOP500_EGPIO(x) (NOMADIK_NR_GPIO + (x))
+#define MOP500_EGPIO_END MOP500_EGPIO(24)
+#define AB8500_GPIO_BASE MOP500_EGPIO_END
#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index 470ac52663d..5983d53c3c9 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -10,25 +10,56 @@
#ifndef __MACH_HARDWARE_H
#define __MACH_HARDWARE_H
-/* macros to get at IO space when running virtually
+/*
+ * Macros to get at IO space when running virtually
* We dont map all the peripherals, let ioremap do
* this for us. We map only very basic peripherals here.
*/
#define U8500_IO_VIRTUAL 0xf0000000
#define U8500_IO_PHYSICAL 0xa0000000
-/* this macro is used in assembly, so no cast */
+/* This macro is used in assembly, so no cast */
#define IO_ADDRESS(x) \
(((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + U8500_IO_VIRTUAL)
/* typesafe io address */
#define __io_address(n) __io(IO_ADDRESS(n))
-/* used by some plat-nomadik code */
+/* Used by some plat-nomadik code */
#define io_p2v(n) __io_address(n)
#include <mach/db8500-regs.h>
#include <mach/db5500-regs.h>
+/*
+ * FIFO offsets for IPs
+ */
+#define MSP_TX_RX_REG_OFFSET 0
+#define SSP_TX_RX_REG_OFFSET 0x8
+#define SPI_TX_RX_REG_OFFSET 0x8
+#define SD_MMC_TX_RX_REG_OFFSET 0x80
+#define CRYP1_RX_REG_OFFSET 0x10
+#define CRYP1_TX_REG_OFFSET 0x8
+#define HASH1_TX_REG_OFFSET 0x4
+
+/* MSP related board specific declaration************************/
+
+#define MSP_DATA_DELAY MSP_DELAY_0
+#define MSP_TX_CLOCK_EDGE MSP_FALLING_EDGE
+#define MSP_RX_CLOCK_EDGE MSP_FALLING_EDGE
+
+#define MSP_0_CONTROLLER 1
+#define MSP_1_CONTROLLER 2
+#define MSP_2_CONTROLLER 3
+#define MSP_3_CONTROLLER 4
+
+#define SSP_0_CONTROLLER 4
+#define SSP_1_CONTROLLER 5
+
+#define SPI023_0_CONTROLLER 6
+#define SPI023_1_CONTROLLER 7
+#define SPI023_2_CONTROLLER 8
+#define SPI023_3_CONTROLLER 9
+
#ifndef __ASSEMBLY__
#include <mach/id.h>
@@ -36,6 +67,14 @@ extern void __iomem *_PRCMU_BASE;
#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x)
+
+#ifdef CONFIG_UX500_SOC_DB5500
+bool cpu_is_u5500v1(void);
+bool cpu_is_u5500v2(void);
+#else
+static inline bool cpu_is_u5500v1(void) { return false; }
+static inline bool cpu_is_u5500v2(void) { return false; }
#endif
+#endif /* __ASSEMBLY__ */
#endif /* __MACH_HARDWARE_H */
diff --git a/arch/arm/mach-ux500/include/mach/hcl_defs.h b/arch/arm/mach-ux500/include/mach/hcl_defs.h
new file mode 100644
index 00000000000..efd37608cb3
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/hcl_defs.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson SA
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _HCL_DEFS_H
+#define _HCL_DEFS_H
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+//#include "platform_os.h"
+
+/*-----------------------------------------------------------------------------
+ * Type definition
+ *---------------------------------------------------------------------------*/
+typedef unsigned char t_uint8;
+typedef signed char t_sint8;
+typedef unsigned short t_uint16;
+typedef signed short t_sint16;
+typedef unsigned long t_uint32;
+typedef signed long t_sint32;
+
+typedef unsigned int t_bitfield;
+
+
+
+#if !defined(FALSE) && !defined(TRUE)
+typedef int t_bool;
+#define FALSE 0
+#define TRUE 1
+#endif
+
+/*
+ * Definition of the different kind of addresses manipulated into a system with MMU
+ * (handle physical AND logical addresses)
+ */
+
+
+typedef t_uint32 t_physical_address;
+typedef t_uint32 t_logical_address;
+
+
+
+/*
+ * Global frequency enumuration
+ * Added to avoid frequency conversion function which is required to convert one HCL
+ * frequency enumuration values to another HCL frequency enumuration values.
+ */
+
+/*typedef enum {
+ HCL_FREQ_NOT_SUPPORTED=-1,
+ HCL_FREQ_8KHZ ,
+ HCL_FREQ_11_25KHZ,
+ HCL_FREQ_12KHZ,
+ HCL_FREQ_16KHZ,
+ HCL_FREQ_22_05KHZ,
+ HCL_FREQ_22_5KHZ,
+ HCL_FREQ_24KHZ,
+ HCL_FREQ_32KHZ,
+ HCL_FREQ_44KHZ,
+ HCL_FREQ_44_1KHZ,
+ HCL_FREQ_48KHZ,
+ HCL_FREQ_64KHZ,
+ HCL_FREQ_88KHZ,
+ HCL_FREQ_88_2KHZ,
+ HCL_FREQ_96KHZ,
+ HCL_FREQ_128KHZ,
+ HCL_FREQ_176_4KHZ,
+ HCL_FREQ_192KHZ,
+
+ HCL_FREQ_1MHZ,
+ HCL_FREQ_2MHZ,
+ HCL_FREQ_3MHZ,
+ HCL_FREQ_4MHZ,
+ HCL_FREQ_5MHZ,
+ HCL_FREQ_6MHZ,
+ HCL_FREQ_8MHZ,
+ HCL_FREQ_11MHZ,
+ HCL_FREQ_12MHZ,
+ HCL_FREQ_16MHZ,
+ HCL_FREQ_22MHZ,
+ HCL_FREQ_24MHZ,
+ HCL_FREQ_48MHZ
+} t_frequency;
+
+*/
+
+typedef struct {
+ t_physical_address physical;
+ t_logical_address logical;
+} t_system_address;
+
+
+/*
+ * Define a type used to manipulate size of various buffers
+ */
+typedef t_uint32 t_size;
+
+typedef struct {
+ t_bitfield minor:8;
+ t_bitfield major:8;
+ t_bitfield version:16;
+} t_version;
+
+
+
+
+/*-----------------------------------------------------------------------------
+ * Keyword definition
+ *---------------------------------------------------------------------------*/
+#define PUBLIC /* Extern by default */
+#define PRIVATE static
+
+#ifndef NULL
+#define NULL (0)
+#endif /* ndef NULL */
+
+
+/*-----------------------------------------------------------------------------
+ * Bit setting or clearing
+ *---------------------------------------------------------------------------*/
+#define HCL_SET_BITS(reg,mask) ((reg) |= (mask))
+#define HCL_CLEAR_BITS(reg,mask) ((reg) &= ~(mask))
+#define HCL_READ_BITS(reg,mask) ((reg) & (mask))
+#define HCL_WRITE_BITS(reg,val,mask) ((reg) = (((reg) & ~(mask)) | ((val) & (mask))))
+#define HCL_READ_REG(reg) (reg)
+#define HCL_WRITE_REG(reg,val) ((reg) = (val))
+
+/*-----------------------------------------------------------------------------
+ * field offset extraction from a structure
+ *---------------------------------------------------------------------------*/
+#define HCL_BITFIELD_OFFSET(typeName, fieldName) (t_uint32)(&(((typeName *)0)->fieldName))
+
+/*-----------------------------------------------------------------------------
+ * Bit mask definition
+ *---------------------------------------------------------------------------*/
+#define MASK_NULL8 0x00
+#define MASK_NULL16 0x0000
+#define MASK_NULL32 0x00000000
+#define MASK_ALL8 0xFF
+#define MASK_ALL16 0xFFFF
+#define MASK_ALL32 0xFFFFFFFF
+
+#define MASK_BIT0 (1UL<<0)
+#define MASK_BIT1 (1UL<<1)
+#define MASK_BIT2 (1UL<<2)
+#define MASK_BIT3 (1UL<<3)
+#define MASK_BIT4 (1UL<<4)
+#define MASK_BIT5 (1UL<<5)
+#define MASK_BIT6 (1UL<<6)
+#define MASK_BIT7 (1UL<<7)
+#define MASK_BIT8 (1UL<<8)
+#define MASK_BIT9 (1UL<<9)
+#define MASK_BIT10 (1UL<<10)
+#define MASK_BIT11 (1UL<<11)
+#define MASK_BIT12 (1UL<<12)
+#define MASK_BIT13 (1UL<<13)
+#define MASK_BIT14 (1UL<<14)
+#define MASK_BIT15 (1UL<<15)
+#define MASK_BIT16 (1UL<<16)
+#define MASK_BIT17 (1UL<<17)
+#define MASK_BIT18 (1UL<<18)
+#define MASK_BIT19 (1UL<<19)
+#define MASK_BIT20 (1UL<<20)
+#define MASK_BIT21 (1UL<<21)
+#define MASK_BIT22 (1UL<<22)
+#define MASK_BIT23 (1UL<<23)
+#define MASK_BIT24 (1UL<<24)
+#define MASK_BIT25 (1UL<<25)
+#define MASK_BIT26 (1UL<<26)
+#define MASK_BIT27 (1UL<<27)
+#define MASK_BIT28 (1UL<<28)
+#define MASK_BIT29 (1UL<<29)
+#define MASK_BIT30 (1UL<<30)
+#define MASK_BIT31 (1UL<<31)
+
+/*-----------------------------------------------------------------------------
+ * quartet shift definition
+ *---------------------------------------------------------------------------*/
+#define MASK_QUARTET (0xFUL)
+#define SHIFT_QUARTET0 0
+#define SHIFT_QUARTET1 4
+#define SHIFT_QUARTET2 8
+#define SHIFT_QUARTET3 12
+#define SHIFT_QUARTET4 16
+#define SHIFT_QUARTET5 20
+#define SHIFT_QUARTET6 24
+#define SHIFT_QUARTET7 28
+#define MASK_QUARTET0 (MASK_QUARTET << SHIFT_QUARTET0)
+#define MASK_QUARTET1 (MASK_QUARTET << SHIFT_QUARTET1)
+#define MASK_QUARTET2 (MASK_QUARTET << SHIFT_QUARTET2)
+#define MASK_QUARTET3 (MASK_QUARTET << SHIFT_QUARTET3)
+#define MASK_QUARTET4 (MASK_QUARTET << SHIFT_QUARTET4)
+#define MASK_QUARTET5 (MASK_QUARTET << SHIFT_QUARTET5)
+#define MASK_QUARTET6 (MASK_QUARTET << SHIFT_QUARTET6)
+#define MASK_QUARTET7 (MASK_QUARTET << SHIFT_QUARTET7)
+
+/*-----------------------------------------------------------------------------
+ * Byte shift definition
+ *---------------------------------------------------------------------------*/
+#define MASK_BYTE (0xFFUL)
+#define SHIFT_BYTE0 0
+#define SHIFT_BYTE1 8
+#define SHIFT_BYTE2 16
+#define SHIFT_BYTE3 24
+#define MASK_BYTE0 (MASK_BYTE << SHIFT_BYTE0)
+#define MASK_BYTE1 (MASK_BYTE << SHIFT_BYTE1)
+#define MASK_BYTE2 (MASK_BYTE << SHIFT_BYTE2)
+#define MASK_BYTE3 (MASK_BYTE << SHIFT_BYTE3)
+
+/*-----------------------------------------------------------------------------
+ * Halfword shift definition
+ *---------------------------------------------------------------------------*/
+#define MASK_HALFWORD (0xFFFFUL)
+#define SHIFT_HALFWORD0 0
+#define SHIFT_HALFWORD1 16
+#define MASK_HALFWORD0 (MASK_HALFWORD << SHIFT_HALFWORD0)
+#define MASK_HALFWORD1 (MASK_HALFWORD << SHIFT_HALFWORD1)
+
+/*-----------------------------------------------------------------------------
+ * Global constants definition
+ *---------------------------------------------------------------------------*/
+ #define ONE_KB (1024)
+ #define ONE_MB (ONE_KB * ONE_KB)
+
+
+/*-----------------------------------------------------------------------------
+ * Address translation macros declaration
+ *---------------------------------------------------------------------------*/
+
+#define ARM_TO_AHB_ADDR(addr) (addr)
+#define AHB_TO_ARM_ADDR(addr) (addr)
+
+/* For input parameters - would not be changed by the API */
+#define IN
+/* For output parameters - would be changes by the API */
+#define OUT
+/* For input-output parameters - provides input to the API but would be changed by the API */
+#define INOUT
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _HCL_DEFS_H */
+
+/* End of file hcl_defs.h */
+
+
diff --git a/arch/arm/mach-ux500/include/mach/id.h b/arch/arm/mach-ux500/include/mach/id.h
index 02b541a37ee..6f669eebc04 100644
--- a/arch/arm/mach-ux500/include/mach/id.h
+++ b/arch/arm/mach-ux500/include/mach/id.h
@@ -38,38 +38,36 @@ static inline unsigned int __attribute_const__ dbx500_revision(void)
static inline bool __attribute_const__ cpu_is_u8500(void)
{
- return dbx500_partnumber() == 0x8500;
+#ifdef CONFIG_UX500_SOC_DB8500
+ /* partnumber 8520 also comes under 8500 */
+ return ((dbx500_partnumber() >> 8) & 0xff) == 0x85;
+#else
+ return false;
+#endif
+}
+
+static inline bool __attribute_const__ cpu_is_u8520(void)
+{
+#ifdef CONFIG_UX500_SOC_DB8500
+ return dbx500_partnumber() == 0x8520;
+#else
+ return false;
+#endif
}
static inline bool __attribute_const__ cpu_is_u5500(void)
{
+#ifdef CONFIG_UX500_SOC_DB5500
return dbx500_partnumber() == 0x5500;
+#else
+ return false;
+#endif
}
/*
* 8500 revisions
*/
-static inline bool __attribute_const__ cpu_is_u8500ed(void)
-{
- return cpu_is_u8500() && dbx500_revision() == 0x00;
-}
-
-static inline bool __attribute_const__ cpu_is_u8500v1(void)
-{
- return cpu_is_u8500() && (dbx500_revision() & 0xf0) == 0xA0;
-}
-
-static inline bool __attribute_const__ cpu_is_u8500v10(void)
-{
- return cpu_is_u8500() && dbx500_revision() == 0xA0;
-}
-
-static inline bool __attribute_const__ cpu_is_u8500v11(void)
-{
- return cpu_is_u8500() && dbx500_revision() == 0xA1;
-}
-
static inline bool __attribute_const__ cpu_is_u8500v2(void)
{
return cpu_is_u8500() && ((dbx500_revision() & 0xf0) == 0xB0);
@@ -85,9 +83,14 @@ static inline bool cpu_is_u8500v21(void)
return cpu_is_u8500() && (dbx500_revision() == 0xB1);
}
+static inline bool cpu_is_u8500v22(void)
+{
+ return cpu_is_u8500() && (dbx500_revision() == 0xB2);
+}
+
static inline bool cpu_is_u8500v20_or_later(void)
{
- return cpu_is_u8500() && !cpu_is_u8500v10() && !cpu_is_u8500v11();
+ return cpu_is_u8500() && ((dbx500_revision() & 0xf0) >= 0xB0);
}
static inline bool ux500_is_svp(void)
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
index 47969909836..a58aa51fccf 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
@@ -43,6 +43,8 @@
#define MOP500_AB8500_VIR_GPIO_IRQ_BASE \
MOP500_STMPE1601_IRQ_END
+#define MOP500_AB8500_VIR_GPIO_IRQ(x) \
+ (MOP500_AB8500_VIR_GPIO_IRQ_BASE + (x))
#define MOP500_AB8500_VIR_GPIO_IRQ_END \
(MOP500_AB8500_VIR_GPIO_IRQ_BASE + AB8500_VIR_GPIO_NR_IRQS)
@@ -57,7 +59,7 @@
*/
#if MOP500_IRQ_END > IRQ_BOARD_END
#undef IRQ_BOARD_END
-#define IRQ_BOARD_END MOP500_IRQ_END
+#define IRQ_BOARD_END MOP500_IRQ_END
#endif
#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h b/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h
index 29d972c7717..2294a47b3a2 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h
@@ -7,13 +7,20 @@
#ifndef __MACH_IRQS_BOARD_U5500_H
#define __MACH_IRQS_BOARD_U5500_H
-#define AB5500_NR_IRQS 5
+#include <linux/mfd/abx500/ab5500.h>
+
+#define AB5500_NR_IRQS (AB5500_NUM_IRQ_REGS * 8)
#define IRQ_AB5500_BASE IRQ_BOARD_START
#define IRQ_AB5500_END (IRQ_AB5500_BASE + AB5500_NR_IRQS)
#define U5500_IRQ_END IRQ_AB5500_END
-#if IRQ_BOARD_END < U5500_IRQ_END
+/*
+ * We may have several boards, but only one will run at a
+ * time, so the one with most IRQs will bump this ahead,
+ * but the IRQ_BOARD_START remains the same for either board.
+ */
+#if U5500_IRQ_END > IRQ_BOARD_END
#undef IRQ_BOARD_END
#define IRQ_BOARD_END U5500_IRQ_END
#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs-db5500.h b/arch/arm/mach-ux500/include/mach/irqs-db5500.h
index 77239776a6f..4ea577aefa0 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-db5500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-db5500.h
@@ -85,6 +85,36 @@
#ifdef CONFIG_UX500_SOC_DB5500
+/* Virtual interrupts corresponding to the PRCMU wakeups. */
+#define IRQ_DB5500_PRCMU_BASE IRQ_SOC_START
+
+#define IRQ_DB5500_PRCMU_RTC (IRQ_DB5500_PRCMU_BASE)
+#define IRQ_DB5500_PRCMU_RTT0 (IRQ_DB5500_PRCMU_BASE + 1)
+#define IRQ_DB5500_PRCMU_RTT1 (IRQ_DB5500_PRCMU_BASE + 2)
+#define IRQ_DB5500_PRCMU_CD_IRQ (IRQ_DB5500_PRCMU_BASE + 3)
+#define IRQ_DB5500_PRCMU_SRP_TIM (IRQ_DB5500_PRCMU_BASE + 4)
+#define IRQ_DB5500_PRCMU_APE_REQ (IRQ_DB5500_PRCMU_BASE + 5)
+#define IRQ_DB5500_PRCMU_USB (IRQ_DB5500_PRCMU_BASE + 6)
+#define IRQ_DB5500_PRCMU_ABB (IRQ_DB5500_PRCMU_BASE + 7)
+#define IRQ_DB5500_PRCMU_ARM (IRQ_DB5500_PRCMU_BASE + 8)
+#define IRQ_DB5500_PRCMU_MODEM_SW_RESET_REQ (IRQ_DB5500_PRCMU_BASE + 9)
+#define IRQ_DB5500_PRCMU_AC_WAKE_ACK (IRQ_DB5500_PRCMU_BASE + 10)
+#define IRQ_DB5500_PRCMU_GPIO0 (IRQ_DB5500_PRCMU_BASE + 11)
+#define IRQ_DB5500_PRCMU_GPIO1 (IRQ_DB5500_PRCMU_BASE + 12)
+#define IRQ_DB5500_PRCMU_GPIO2 (IRQ_DB5500_PRCMU_BASE + 13)
+#define IRQ_DB5500_PRCMU_GPIO3 (IRQ_DB5500_PRCMU_BASE + 14)
+#define IRQ_DB5500_PRCMU_GPIO4 (IRQ_DB5500_PRCMU_BASE + 15)
+#define IRQ_DB5500_PRCMU_GPIO5 (IRQ_DB5500_PRCMU_BASE + 16)
+#define IRQ_DB5500_PRCMU_GPIO6 (IRQ_DB5500_PRCMU_BASE + 17)
+#define IRQ_DB5500_PRCMU_GPIO7 (IRQ_DB5500_PRCMU_BASE + 18)
+#define IRQ_DB5500_PRCMU_AC_REL_ACK (IRQ_DB5500_PRCMU_BASE + 19)
+#define IRQ_DB5500_PRCMU_LOW_POWER_AUDIO (IRQ_DB5500_PRCMU_BASE + 20)
+#define IRQ_DB5500_PRCMU_TEMP_SENSOR_LOW (IRQ_DB5500_PRCMU_BASE + 21)
+#define IRQ_DB5500_PRCMU_TEMP_SENSOR_HIGH (IRQ_DB5500_PRCMU_BASE + 22)
+#define IRQ_DB5500_PRCMU_END (IRQ_DB5500_PRCMU_BASE + 23)
+
+#define NUM_DB5500_PRCMU_WAKEUPS (IRQ_DB5500_PRCMU_END - IRQ_DB5500_PRCMU_BASE)
+
/*
* After the GPIO ones we reserve a range of IRQ:s in which virtual
* IRQ:s representing modem IRQ:s can be allocated
diff --git a/arch/arm/mach-ux500/include/mach/irqs.h b/arch/arm/mach-ux500/include/mach/irqs.h
index 9db68d264c5..a2876464d43 100644
--- a/arch/arm/mach-ux500/include/mach/irqs.h
+++ b/arch/arm/mach-ux500/include/mach/irqs.h
@@ -11,9 +11,7 @@
#define ASM_ARCH_IRQS_H
#include <mach/hardware.h>
-
-#define IRQ_LOCALTIMER 29
-#define IRQ_LOCALWDOG 30
+#include <linux/gpio.h>
/* Shared Peripheral Interrupt (SHPI) */
#define IRQ_SHPI_START 32
@@ -22,27 +20,34 @@
* MTU0 preserved for now until plat-nomadik is taught not to use it. Don't
* add any other IRQs here, use the irqs-dbx500.h files.
*/
-#define IRQ_MTU0 (IRQ_SHPI_START + 4)
+#define IRQ_MTU0 (IRQ_SHPI_START + 4)
+
+#define IRQ_LOCALTIMER 29
+#define IRQ_LOCALWDOG 30
+
+/*********************************************************************/
#define DBX500_NR_INTERNAL_IRQS 160
/* After chip-specific IRQ numbers we have the GPIO ones */
-#define NOMADIK_NR_GPIO 288
#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + DBX500_NR_INTERNAL_IRQS)
#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - DBX500_NR_INTERNAL_IRQS)
+
+#define GPIO_TO_IRQ NOMADIK_GPIO_TO_IRQ
+#define IRQ_TO_GPIO NOMADIK_IRQ_TO_GPIO
#define IRQ_GPIO_END NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO)
-#define IRQ_SOC_START IRQ_GPIO_END
+#define IRQ_SOC_START IRQ_GPIO_END
/* This will be overridden by SoC-specific irq headers */
-#define IRQ_SOC_END IRQ_SOC_START
+#define IRQ_SOC_END IRQ_SOC_START
+
+#define IRQ_BOARD_START IRQ_SOC_END
+/* This will be overridden by board-specific irq headers */
+#define IRQ_BOARD_END IRQ_BOARD_START
#include <mach/irqs-db5500.h>
#include <mach/irqs-db8500.h>
-#define IRQ_BOARD_START IRQ_SOC_END
-/* This will be overridden by board-specific irq headers */
-#define IRQ_BOARD_END IRQ_BOARD_START
-
#ifdef CONFIG_MACH_U8500
#include <mach/irqs-board-mop500.h>
#endif
@@ -51,6 +56,8 @@
#include <mach/irqs-board-u5500.h>
#endif
+#ifndef NR_IRQS
#define NR_IRQS IRQ_BOARD_END
+#endif
#endif /* ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-ux500/include/mach/pm-timer.h b/arch/arm/mach-ux500/include/mach/pm-timer.h
new file mode 100644
index 00000000000..ec9e919e70d
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/pm-timer.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ * License Terms: GNU General Public License v2
+ *
+ */
+
+#ifndef PM_TIMER_H
+#define PM_TIMER_H
+
+#include <linux/ktime.h>
+
+#ifdef CONFIG_U8500_CPUIDLE_DEBUG
+ktime_t u8500_rtc_exit_latency_get(void);
+void ux500_rtcrtt_measure_latency(bool enable);
+#else
+static inline ktime_t u8500_rtc_exit_latency_get(void)
+{
+ return ktime_set(0, 0);
+}
+static inline void ux500_rtcrtt_measure_latency(bool enable) { }
+
+#endif
+
+void ux500_rtcrtt_off(void);
+void ux500_rtcrtt_next(u32 time_us);
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/pm.h b/arch/arm/mach-ux500/include/mach/pm.h
new file mode 100644
index 00000000000..b9fe3bbc8d7
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/pm.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#ifndef PM_COMMON_H
+#define PM_COMMON_H
+
+#ifdef CONFIG_PM
+enum prcmu_idle_stat {
+ SLEEP_OK = 0xf3,
+ DEEP_SLEEP_OK = 0xf6,
+ IDLE_OK = 0xf0,
+ DEEPIDLE_OK = 0xe3,
+ PRCMU2ARMPENDINGIT_ER = 0x91,
+ ARMPENDINGIT_ER = 0x93,
+};
+
+/**
+ * ux500_pm_gic_decouple()
+ *
+ * Decouple GIC from the interrupt bus.
+ */
+void ux500_pm_gic_decouple(void);
+
+/**
+ * ux500_pm_gic_recouple()
+ *
+ * Recouple GIC with the interrupt bus.
+ */
+void ux500_pm_gic_recouple(void);
+
+/**
+ * ux500_pm_gic_pending_interrupt()
+ *
+ * returns true, if there are pending interrupts.
+ */
+bool ux500_pm_gic_pending_interrupt(void);
+
+/**
+ * ux500_pm_prcmu_pending_interrupt()
+ *
+ * returns true, if there are pending interrupts.
+ */
+bool ux500_pm_prcmu_pending_interrupt(void);
+
+/**
+ * ux500_pm_prcmu_set_ioforce()
+ *
+ * @enable: Enable/disable
+ *
+ * Enable/disable the gpio-ring
+ */
+void ux500_pm_prcmu_set_ioforce(bool enable);
+
+/**
+ * ux500_pm_prcmu_copy_gic_settings()
+ *
+ * This function copies all the gic interrupt settings to the prcmu.
+ * This is needed for the system to catch interrupts in ApIdle
+ */
+void ux500_pm_prcmu_copy_gic_settings(void);
+
+/**
+ * ux500_pm_gpio_save_wake_up_status()
+ *
+ * This function is called when the prcmu has woken the ARM
+ * but before ioforce is disabled.
+ */
+void ux500_pm_gpio_save_wake_up_status(void);
+
+/**
+ * ux500_pm_gpio_read_wake_up_status()
+ *
+ * @bank_number: The gpio bank.
+ *
+ * Returns the WKS register settings for given bank number.
+ * The WKS register is cleared when ioforce is released therefore
+ * this function is needed.
+ */
+u32 ux500_pm_gpio_read_wake_up_status(unsigned int bank_number);
+
+/**
+ * ux500_pm_other_cpu_wfi()
+ *
+ * Returns true if the other CPU is in WFI.
+ */
+bool ux500_pm_other_cpu_wfi(void);
+
+/**
+ * ux500_pm_prcmu_idle_stat()
+ *
+ * Returns the status of the last prcmu idle/sleep
+ */
+enum prcmu_idle_stat ux500_pm_prcmu_idle_stat(void);
+
+struct dev_pm_domain;
+extern struct dev_pm_domain ux500_dev_power_domain;
+extern struct dev_pm_domain ux500_amba_dev_power_domain;
+
+#else
+u32 ux500_pm_gpio_read_wake_up_status(unsigned int bank_number)
+{
+ return 0;
+}
+
+/**
+ * ux500_pm_prcmu_set_ioforce()
+ *
+ * @enable: Enable/disable
+ *
+ * Enable/disable the gpio-ring
+ */
+static inline void ux500_pm_prcmu_set_ioforce(bool enable) { }
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/prcmu-debug.h b/arch/arm/mach-ux500/include/mach/prcmu-debug.h
new file mode 100644
index 00000000000..e468543fdef
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/prcmu-debug.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Martin Persson for ST-Ericsson
+ * Etienne Carriere <etienne.carriere@stericsson.com> for ST-Ericsson
+ *
+ */
+
+#ifndef PRCMU_DEBUG_H
+#define PRCMU_DEBUG_H
+
+#ifdef CONFIG_DBX500_PRCMU_DEBUG
+void prcmu_debug_ape_opp_log(u8 opp);
+void prcmu_debug_ddr_opp_log(u8 opp);
+void prcmu_debug_arm_opp_log(u8 opp);
+#else
+static inline void prcmu_debug_ape_opp_log(u8 opp) {}
+static inline void prcmu_debug_ddr_opp_log(u8 opp) {}
+static inline void prcmu_debug_arm_opp_log(u8 opp) {}
+#endif
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/reboot_reasons.h b/arch/arm/mach-ux500/include/mach/reboot_reasons.h
new file mode 100644
index 00000000000..2c21aab58c4
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/reboot_reasons.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Rickard Evertsson <rickard.evertsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Use this file to customize your reboot / sw reset reasons. Add, remove or
+ * modify reasons in reboot_reasons[].
+ * The reboot reasons will be saved to a secure location in TCDM memory and
+ * can be read at bootup by e.g. the bootloader, or at a later stage userspace
+ * since the code is exposed through sysfs.
+ */
+
+#ifndef _REBOOT_REASONS_H
+#define _REBOOT_REASONS_H
+
+/*
+ * These defines contains the codes that will be written down to a secure
+ * location before resetting. These values are exposed through a sysfs
+ * entry under /sys/socinfo, see mach-ux500/cpu-db8500.c
+ */
+#define SW_RESET_NO_ARGUMENT 0xBEEF
+#define SW_RESET_FACTORY_RESET 0x4242
+#define SW_RESET_CRASH 0xDEAD
+#define SW_RESET_NORMAL 0xc001
+#define SW_RESET_CHARGING 0xCAFE
+#define SW_RESET_COLDSTART 0x0
+#define SW_RESET_RECOVERY 0x5502
+
+/*
+ * The array reboot_reasons[] is used when you want to map a string to a reboot
+ * reason code
+ */
+struct reboot_reason {
+ const char *reason;
+ u16 code;
+};
+
+extern struct reboot_reason reboot_reasons[];
+
+extern unsigned int reboot_reasons_size;
+
+u16 reboot_reason_code(const char *cmd);
+const char *reboot_reason_string(u16 code);
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/sensors1p.h b/arch/arm/mach-ux500/include/mach/sensors1p.h
new file mode 100644
index 00000000000..544e1d8bab5
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/sensors1p.h
@@ -0,0 +1,24 @@
+
+/*
+ * Copyright (C) 2009-2010 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * Header file for 1 pin gpio sensors;
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ */
+
+#ifndef __ASM_ARCH_SFH7741_H
+#define __ASM_ARCH_SFH7741_H
+
+struct sensor_config {
+ int pin;
+ int startup_time; /* in ms */
+ char regulator[32];
+};
+
+struct sensors1p_config {
+ struct sensor_config hal;
+ struct sensor_config proximity;
+};
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
index a7d363fdb4c..c49f55e0be1 100644
--- a/arch/arm/mach-ux500/include/mach/setup.h
+++ b/arch/arm/mach-ux500/include/mach/setup.h
@@ -26,6 +26,7 @@ extern void __init ux500_init_irq(void);
extern void __init u5500_sdi_init(void);
extern void __init db5500_dma_init(void);
+extern void __init db8500_dma_init(void);
/* We re-use nomadik_timer for this platform */
extern void nmdk_timer_init(void);
diff --git a/arch/arm/mach-ux500/ste-dma40-db5500.h b/arch/arm/mach-ux500/include/mach/ste-dma40-db5500.h
index cb2110c3285..0ddd4ab9020 100644
--- a/arch/arm/mach-ux500/ste-dma40-db5500.h
+++ b/arch/arm/mach-ux500/include/mach/ste-dma40-db5500.h
@@ -42,7 +42,9 @@ enum dma_src_dev_type {
DB5500_DMA_DEV26_SDMMC2_RX = 26,
DB5500_DMA_DEV27_SDMMC3_RX = 27,
DB5500_DMA_DEV28_SDMMC4_RX = 28,
- /* 29 - 32 not used */
+ /* 29, 30 not used */
+ DB5500_DMA_DEV31_CRYPTO1_RX = 31, /* v2 */
+ /* 32 not used */
DB5500_DMA_DEV33_SDMMC0_RX = 33,
DB5500_DMA_DEV34_SDMMC1_RX = 34,
DB5500_DMA_DEV35_SDMMC2_RX = 35,
@@ -56,7 +58,7 @@ enum dma_src_dev_type {
DB5500_DMA_DEV43_USB_OTG_IEP_5_13 = 43,
DB5500_DMA_DEV44_USB_OTG_IEP_6_14 = 44,
DB5500_DMA_DEV45_USB_OTG_IEP_7_15 = 45,
- /* 46 not used */
+ DB5500_DMA_DEV46_CRYPTO1_RX = 46, /* v2 */
DB5500_DMA_DEV47_MCDE_RX = 47,
DB5500_DMA_DEV48_CRYPTO1_RX = 48,
/* 49, 50 not used */
@@ -98,7 +100,9 @@ enum dma_dest_dev_type {
DB5500_DMA_DEV26_SDMMC2_TX = 26,
DB5500_DMA_DEV27_SDMMC3_TX = 27,
DB5500_DMA_DEV28_SDMMC4_TX = 28,
- /* 29 - 31 not used */
+ /* 29 not used */
+ DB5500_DMA_DEV30_HASH1_TX = 30, /* v2 */
+ DB5500_DMA_DEV31_CRYPTO1_TX = 31, /* v2 */
DB5500_DMA_DEV32_FSMC_TX = 32,
DB5500_DMA_DEV33_SDMMC0_TX = 33,
DB5500_DMA_DEV34_SDMMC1_TX = 34,
@@ -113,7 +117,7 @@ enum dma_dest_dev_type {
DB5500_DMA_DEV43_USB_OTG_OEP_5_13 = 43,
DB5500_DMA_DEV44_USB_OTG_OEP_6_14 = 44,
DB5500_DMA_DEV45_USB_OTG_OEP_7_15 = 45,
- /* 46 not used */
+ DB5500_DMA_DEV46_CRYPTO1_TX = 46, /* v2 */
DB5500_DMA_DEV47_STM_TX = 47,
DB5500_DMA_DEV48_CRYPTO1_TX = 48,
DB5500_DMA_DEV49_CRYPTO1_TX_HASH1_TX = 49,
diff --git a/arch/arm/mach-ux500/ste-dma40-db8500.h b/arch/arm/mach-ux500/include/mach/ste-dma40-db8500.h
index a616419bea7..65799a75199 100644
--- a/arch/arm/mach-ux500/ste-dma40-db8500.h
+++ b/arch/arm/mach-ux500/include/mach/ste-dma40-db8500.h
@@ -1,16 +1,19 @@
/*
- * arch/arm/mach-ux500/ste_dma40_db8500.h
- * DB8500-SoC-specific configuration for DMA40
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ * DB8500-SoC-specific configuration for DMA40
*/
#ifndef STE_DMA40_DB8500_H
#define STE_DMA40_DB8500_H
#define DB8500_DMA_NR_DEV 64
+/*
+ * All entries with double names are multiplexed
+ * and can never be used at the same time.
+ */
enum dma_src_dev_type {
DB8500_DMA_DEV0_SPI0_RX = 0,
@@ -20,7 +23,7 @@ enum dma_src_dev_type {
DB8500_DMA_DEV4_I2C1_RX = 4,
DB8500_DMA_DEV5_I2C3_RX = 5,
DB8500_DMA_DEV6_I2C2_RX = 6,
- DB8500_DMA_DEV7_I2C4_RX = 7, /* Only on V1 and later */
+ DB8500_DMA_DEV7_I2C4_RX = 7,
DB8500_DMA_DEV8_SSP0_RX = 8,
DB8500_DMA_DEV9_SSP1_RX = 9,
DB8500_DMA_DEV10_MCDE_RX = 10,
@@ -43,8 +46,6 @@ enum dma_src_dev_type {
DB8500_DMA_DEV27_SRC_SXA3_RX_TX = 27,
DB8500_DMA_DEV28_SD_MM2_RX = 28,
DB8500_DMA_DEV29_SD_MM0_RX = 29,
- DB8500_DMA_DEV30_MSP1_RX = 30,
- /* On DB8500v2, MSP3 RX replaces MSP1 RX */
DB8500_DMA_DEV30_MSP3_RX = 30,
DB8500_DMA_DEV31_MSP0_RX_SLIM0_CH0_RX = 31,
DB8500_DMA_DEV32_SD_MM1_RX = 32,
@@ -82,7 +83,7 @@ enum dma_dest_dev_type {
DB8500_DMA_DEV4_I2C1_TX = 4,
DB8500_DMA_DEV5_I2C3_TX = 5,
DB8500_DMA_DEV6_I2C2_TX = 6,
- DB8500_DMA_DEV7_I2C4_TX = 7, /* Only on V1 and later */
+ DB8500_DMA_DEV7_I2C4_TX = 7,
DB8500_DMA_DEV8_SSP0_TX = 8,
DB8500_DMA_DEV9_SSP1_TX = 9,
/* 10 is not used*/
diff --git a/arch/arm/mach-ux500/include/mach/suspend.h b/arch/arm/mach-ux500/include/mach/suspend.h
new file mode 100644
index 00000000000..5a8df72be2e
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/suspend.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __MACH_SUSPEND_H
+#define __MACH_SUSPEND_H
+
+#ifdef CONFIG_UX500_SUSPEND
+void suspend_block_sleep(void);
+void suspend_unblock_sleep(void);
+void suspend_set_pins_force_fn(void (*force)(void), void (*force_mux)(void));
+#else
+static inline void suspend_block_sleep(void) { }
+static inline void suspend_unblock_sleep(void) { }
+static inline void suspend_set_pins_force_fn(void (*force)(void),
+ void (*force_mux)(void)) { }
+#endif
+
+#endif /* __MACH_SUSPEND_H */
diff --git a/arch/arm/mach-ux500/include/mach/system.h b/arch/arm/mach-ux500/include/mach/system.h
index c0cd8006f1a..42bc8c72a79 100644
--- a/arch/arm/mach-ux500/include/mach/system.h
+++ b/arch/arm/mach-ux500/include/mach/system.h
@@ -8,6 +8,9 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
+#include <linux/mfd/dbx500-prcmu.h>
+#include <mach/reboot_reasons.h>
+
static inline void arch_idle(void)
{
/*
@@ -19,7 +22,10 @@ static inline void arch_idle(void)
static inline void arch_reset(char mode, const char *cmd)
{
- /* yet to be implemented - TODO */
+#ifdef CONFIG_UX500_SOC_DB8500
+ /* Call the PRCMU reset API (w/o reset reason code) */
+ prcmu_system_reset(SW_RESET_NO_ARGUMENT);
+#endif
}
#endif
diff --git a/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h b/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h
new file mode 100644
index 00000000000..6978b7314c5
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h
@@ -0,0 +1,48 @@
+/*
+ * Data types and interface for TEE application for starting the modem.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef TEE_TA_START_MODEM_H
+#define TEE_TA_START_MODEM_H
+
+#define COMMAND_ID_START_MODEM 0x00000001
+
+#define UUID_TEE_TA_START_MODEM_LOW 0x8AD94107
+#define UUID_TEE_TA_START_MODEM_MID 0x6E50
+#define UUID_TEE_TA_START_MODEM_HIGH 0x418E
+#define UUID_TEE_TA_START_MODEM_CLOCKSEQ \
+ {0xB1, 0x14, 0x75, 0x7D, 0x60, 0x21, 0xBD, 0x36}
+
+struct mcore_segment_descr {
+ void *segment;
+ void *hash;
+ u32 size;
+};
+
+struct access_image_descr {
+ void *elf_hdr;
+ void *pgm_hdr_tbl;
+ void *signature;
+ unsigned long nbr_segment;
+ struct mcore_segment_descr *descr;
+};
+
+/* TODO: To be redefined with only info needed by Secure world. */
+struct tee_ta_start_modem {
+ void *access_mem_start;
+ u32 shared_mem_size;
+ u32 access_private_mem_size;
+ struct access_image_descr access_image_descr;
+};
+
+/**
+ * This is the function to handle the modem release.
+ */
+int tee_ta_start_modem(struct tee_ta_start_modem *data);
+
+#endif
+
diff --git a/arch/arm/mach-ux500/include/mach/timex.h b/arch/arm/mach-ux500/include/mach/timex.h
index d0942c17401..0ba497bd9d7 100644
--- a/arch/arm/mach-ux500/include/mach/timex.h
+++ b/arch/arm/mach-ux500/include/mach/timex.h
@@ -2,5 +2,6 @@
#define __ASM_ARCH_TIMEX_H
#define CLOCK_TICK_RATE 110000000
+#define ARCH_HAS_READ_CURRENT_TIMER
#endif
diff --git a/arch/arm/mach-ux500/include/mach/usb.h b/arch/arm/mach-ux500/include/mach/usb.h
index d3739d41881..67fbd00e690 100644
--- a/arch/arm/mach-ux500/include/mach/usb.h
+++ b/arch/arm/mach-ux500/include/mach/usb.h
@@ -22,4 +22,14 @@ struct ux500_musb_board_data {
void ux500_add_usb(resource_size_t base, int irq, int *dma_rx_cfg,
int *dma_tx_cfg);
+
+struct abx500_usbgpio_platform_data {
+ int (*get)(struct device *device);
+ void (*enable)(void);
+ void (*disable)(void);
+ void (*put)(void);
+ int usb_cs;
+};
+
+void ux500_restore_context(void);
#endif
diff --git a/arch/arm/mach-ux500/include/mach/vmalloc.h b/arch/arm/mach-ux500/include/mach/vmalloc.h
index a4945cb4117..e0c6c408552 100644
--- a/arch/arm/mach-ux500/include/mach/vmalloc.h
+++ b/arch/arm/mach-ux500/include/mach/vmalloc.h
@@ -15,4 +15,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xf0000000UL
+#define VMALLOC_END 0xf7800000UL
diff --git a/arch/arm/mach-ux500/l2x0-prefetch.c b/arch/arm/mach-ux500/l2x0-prefetch.c
new file mode 100644
index 00000000000..48a4495533f
--- /dev/null
+++ b/arch/arm/mach-ux500/l2x0-prefetch.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/tee.h>
+#include <linux/io.h>
+#include <mach/hardware.h>
+#include <asm/hardware/cache-l2x0.h>
+
+static struct tee_session session;
+static struct tee_context context;
+static void __iomem *l2x0_base;
+
+#define L2X0_PREFETCH_CTRL_REG (0x00000F60)
+#define L2X0_PREFETCH_CTRL_BIT_DATA_EN (1 << 28)
+#define L2X0_PREFETCH_CTRL_BIT_INST_EN (1 << 29)
+
+#define L2X0_UUID_TEE_TA_START_LOW 0xBC765EDE
+#define L2X0_UUID_TEE_TA_START_MID 0x6724
+#define L2X0_UUID_TEE_TA_START_HIGH 0x11DF
+#define L2X0_UUID_TEE_TA_START_CLOCKSEQ \
+ {0x8E, 0x12, 0xEC, 0xDB, 0xDF, 0xD7, 0x20, 0x85}
+
+static void prefetch_enable(void)
+{
+ struct tee_operation operation;
+ u32 data;
+ int err;
+ int origin_err;
+
+ data = readl(l2x0_base + L2X0_PREFETCH_CTRL_REG);
+
+ pr_debug("l2x0-prefetch: %s start, preftect_ctrl=0x%08x\n", __func__,
+ data);
+ if (!(data & L2X0_PREFETCH_CTRL_BIT_INST_EN) ||
+ !(data & L2X0_PREFETCH_CTRL_BIT_DATA_EN)) {
+
+ data |= (L2X0_PREFETCH_CTRL_BIT_INST_EN |
+ L2X0_PREFETCH_CTRL_BIT_DATA_EN);
+
+ operation.shm[0].buffer = &data;
+ operation.shm[0].size = sizeof(data);
+ operation.shm[0].flags = TEEC_MEM_INPUT;
+ operation.flags = TEEC_MEMREF_0_USED;
+
+ err = teec_invoke_command(&session,
+ TEE_STA_SET_L2CC_PREFETCH_CTRL_REGISTER,
+ &operation, &origin_err);
+ if (err)
+ pr_err("l2x0-prefetch: prefetch enable failed, err=%d",
+ err);
+ }
+ pr_debug("l2x0-prefetch: %s end, prefetch_ctrl=0x%08x\n", __func__,
+ readl(l2x0_base + L2X0_PREFETCH_CTRL_REG));
+}
+
+static void prefetch_disable(void)
+{
+ struct tee_operation operation;
+ u32 data;
+ int err;
+ int origin_err;
+
+ data = readl(l2x0_base + L2X0_PREFETCH_CTRL_REG);
+
+ pr_debug("l2x0-prefetch: %s start, preftect_ctrl=0x%08x\n", __func__,
+ data);
+ if (data & (L2X0_PREFETCH_CTRL_BIT_INST_EN |
+ L2X0_PREFETCH_CTRL_BIT_DATA_EN)) {
+
+ data &= ~(L2X0_PREFETCH_CTRL_BIT_INST_EN |
+ L2X0_PREFETCH_CTRL_BIT_DATA_EN);
+
+ operation.shm[0].buffer = &data;
+ operation.shm[0].size = sizeof(data);
+ operation.shm[0].flags = TEEC_MEM_INPUT;
+ operation.flags = TEEC_MEMREF_0_USED;
+
+ err = teec_invoke_command(&session,
+ TEE_STA_SET_L2CC_PREFETCH_CTRL_REGISTER,
+ &operation, &origin_err);
+ if (err)
+ pr_err("l2x0-prefetch: prefetch disable failed, err=%d",
+ err);
+ }
+ pr_debug("l2x0-prefetch: %s end, prefetch_ctrl=0x%08x\n", __func__,
+ readl(l2x0_base + L2X0_PREFETCH_CTRL_REG));
+}
+
+static int __init prefetch_ctrl_init(void)
+{
+ int err;
+ int origin_err;
+ /* Selects trustzone application needed for the job. */
+ struct tee_uuid static_uuid = {
+ L2X0_UUID_TEE_TA_START_LOW,
+ L2X0_UUID_TEE_TA_START_MID,
+ L2X0_UUID_TEE_TA_START_HIGH,
+ L2X0_UUID_TEE_TA_START_CLOCKSEQ,
+ };
+
+ /* Get PL310 base address. It will be used as readonly. */
+ if (cpu_is_u5500())
+ l2x0_base = __io_address(U5500_L2CC_BASE);
+ else if (cpu_is_u8500())
+ l2x0_base = __io_address(U8500_L2CC_BASE);
+ else
+ ux500_unknown_soc();
+
+ err = teec_initialize_context(NULL, &context);
+ if (err) {
+ pr_err("l2x0-prefetch: unable to initialize tee context,"
+ " err = %d\n", err);
+ err = -EINVAL;
+ goto error0;
+ }
+
+ err = teec_open_session(&context, &session, &static_uuid,
+ TEEC_LOGIN_PUBLIC, NULL, NULL, &origin_err);
+ if (err) {
+ pr_err("l2x0-prefetch: unable to open tee session,"
+ " tee error = %d, origin error = %d\n",
+ err, origin_err);
+ err = -EINVAL;
+ goto error1;
+ }
+
+ outer_cache.prefetch_enable = prefetch_enable;
+ outer_cache.prefetch_disable = prefetch_disable;
+
+ pr_info("l2x0-prefetch: initialized.\n");
+
+ return 0;
+
+error1:
+ (void)teec_finalize_context(&context);
+error0:
+ return err;
+}
+
+static void __exit prefetch_ctrl_exit(void)
+{
+ outer_cache.prefetch_enable = NULL;
+ outer_cache.prefetch_disable = NULL;
+
+ (void)teec_close_session(&session);
+ (void)teec_finalize_context(&context);
+}
+
+/* Wait for TEE driver to be initialized. */
+late_initcall(prefetch_ctrl_init);
+module_exit(prefetch_ctrl_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PL310 prefetch control");
diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
deleted file mode 100644
index 2b2d51caf9d..00000000000
--- a/arch/arm/mach-ux500/mbox-db5500.c
+++ /dev/null
@@ -1,565 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Stefan Nilsson <stefan.xk.nilsson@stericsson.com> for ST-Ericsson.
- * Author: Martin Persson <martin.persson@stericsson.com> for ST-Ericsson.
- * License terms: GNU General Public License (GPL), version 2.
- */
-
-/*
- * Mailbox nomenclature:
- *
- * APE MODEM
- * mbox pairX
- * ..........................
- * . .
- * . peer .
- * . send ---- .
- * . --> | | .
- * . | | .
- * . ---- .
- * . .
- * . local .
- * . rec ---- .
- * . | | <-- .
- * . | | .
- * . ---- .
- * .........................
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/completion.h>
-#include <mach/mbox-db5500.h>
-
-#define MBOX_NAME "mbox"
-
-#define MBOX_FIFO_DATA 0x000
-#define MBOX_FIFO_ADD 0x004
-#define MBOX_FIFO_REMOVE 0x008
-#define MBOX_FIFO_THRES_FREE 0x00C
-#define MBOX_FIFO_THRES_OCCUP 0x010
-#define MBOX_FIFO_STATUS 0x014
-
-#define MBOX_DISABLE_IRQ 0x4
-#define MBOX_ENABLE_IRQ 0x0
-#define MBOX_LATCH 1
-
-/* Global list of all mailboxes */
-static struct list_head mboxs = LIST_HEAD_INIT(mboxs);
-
-static struct mbox *get_mbox_with_id(u8 id)
-{
- u8 i;
- struct list_head *pos = &mboxs;
- for (i = 0; i <= id; i++)
- pos = pos->next;
-
- return (struct mbox *) list_entry(pos, struct mbox, list);
-}
-
-int mbox_send(struct mbox *mbox, u32 mbox_msg, bool block)
-{
- int res = 0;
-
- spin_lock(&mbox->lock);
-
- dev_dbg(&(mbox->pdev->dev),
- "About to buffer 0x%X to mailbox 0x%X."
- " ri = %d, wi = %d\n",
- mbox_msg, (u32)mbox, mbox->read_index,
- mbox->write_index);
-
- /* Check if write buffer is full */
- while (((mbox->write_index + 1) % MBOX_BUF_SIZE) == mbox->read_index) {
- if (!block) {
- dev_dbg(&(mbox->pdev->dev),
- "Buffer full in non-blocking call! "
- "Returning -ENOMEM!\n");
- res = -ENOMEM;
- goto exit;
- }
- spin_unlock(&mbox->lock);
- dev_dbg(&(mbox->pdev->dev),
- "Buffer full in blocking call! Sleeping...\n");
- mbox->client_blocked = 1;
- wait_for_completion(&mbox->buffer_available);
- dev_dbg(&(mbox->pdev->dev),
- "Blocking send was woken up! Trying again...\n");
- spin_lock(&mbox->lock);
- }
-
- mbox->buffer[mbox->write_index] = mbox_msg;
- mbox->write_index = (mbox->write_index + 1) % MBOX_BUF_SIZE;
-
- /*
- * Indicate that we want an IRQ as soon as there is a slot
- * in the FIFO
- */
- writel(MBOX_ENABLE_IRQ, mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
-
-exit:
- spin_unlock(&mbox->lock);
- return res;
-}
-EXPORT_SYMBOL(mbox_send);
-
-#if defined(CONFIG_DEBUG_FS)
-/*
- * Expected input: <value> <nbr sends>
- * Example: "echo 0xdeadbeef 4 > mbox-node" sends 0xdeadbeef 4 times
- */
-static ssize_t mbox_write_fifo(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- unsigned long mbox_mess;
- unsigned long nbr_sends;
- unsigned long i;
- char int_buf[16];
- char *token;
- char *val;
-
- struct mbox *mbox = (struct mbox *) dev->platform_data;
-
- strncpy((char *) &int_buf, buf, sizeof(int_buf));
- token = (char *) &int_buf;
-
- /* Parse message */
- val = strsep(&token, " ");
- if ((val == NULL) || (strict_strtoul(val, 16, &mbox_mess) != 0))
- mbox_mess = 0xDEADBEEF;
-
- val = strsep(&token, " ");
- if ((val == NULL) || (strict_strtoul(val, 10, &nbr_sends) != 0))
- nbr_sends = 1;
-
- dev_dbg(dev, "Will write 0x%lX %ld times using data struct at 0x%X\n",
- mbox_mess, nbr_sends, (u32) mbox);
-
- for (i = 0; i < nbr_sends; i++)
- mbox_send(mbox, mbox_mess, true);
-
- return count;
-}
-
-static ssize_t mbox_read_fifo(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int mbox_value;
- struct mbox *mbox = (struct mbox *) dev->platform_data;
-
- if ((readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7) <= 0)
- return sprintf(buf, "Mailbox is empty\n");
-
- mbox_value = readl(mbox->virtbase_local + MBOX_FIFO_DATA);
- writel(MBOX_LATCH, (mbox->virtbase_local + MBOX_FIFO_REMOVE));
-
- return sprintf(buf, "0x%X\n", mbox_value);
-}
-
-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
-
-static int mbox_show(struct seq_file *s, void *data)
-{
- struct list_head *pos;
- u8 mbox_index = 0;
-
- list_for_each(pos, &mboxs) {
- struct mbox *m =
- (struct mbox *) list_entry(pos, struct mbox, list);
- if (m == NULL) {
- seq_printf(s,
- "Unable to retrieve mailbox %d\n",
- mbox_index);
- continue;
- }
-
- spin_lock(&m->lock);
- if ((m->virtbase_peer == NULL) || (m->virtbase_local == NULL)) {
- seq_printf(s, "MAILBOX %d not setup or corrupt\n",
- mbox_index);
- spin_unlock(&m->lock);
- continue;
- }
-
- seq_printf(s,
- "===========================\n"
- " MAILBOX %d\n"
- " PEER MAILBOX DUMP\n"
- "---------------------------\n"
- "FIFO: 0x%X (%d)\n"
- "Free Threshold: 0x%.2X (%d)\n"
- "Occupied Threshold: 0x%.2X (%d)\n"
- "Status: 0x%.2X (%d)\n"
- " Free spaces (ot): %d (%d)\n"
- " Occup spaces (ot): %d (%d)\n"
- "===========================\n"
- " LOCAL MAILBOX DUMP\n"
- "---------------------------\n"
- "FIFO: 0x%.X (%d)\n"
- "Free Threshold: 0x%.2X (%d)\n"
- "Occupied Threshold: 0x%.2X (%d)\n"
- "Status: 0x%.2X (%d)\n"
- " Free spaces (ot): %d (%d)\n"
- " Occup spaces (ot): %d (%d)\n"
- "===========================\n"
- "write_index: %d\n"
- "read_index : %d\n"
- "===========================\n"
- "\n",
- mbox_index,
- readl(m->virtbase_peer + MBOX_FIFO_DATA),
- readl(m->virtbase_peer + MBOX_FIFO_DATA),
- readl(m->virtbase_peer + MBOX_FIFO_THRES_FREE),
- readl(m->virtbase_peer + MBOX_FIFO_THRES_FREE),
- readl(m->virtbase_peer + MBOX_FIFO_THRES_OCCUP),
- readl(m->virtbase_peer + MBOX_FIFO_THRES_OCCUP),
- readl(m->virtbase_peer + MBOX_FIFO_STATUS),
- readl(m->virtbase_peer + MBOX_FIFO_STATUS),
- (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 4) & 0x7,
- (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 7) & 0x1,
- (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 0) & 0x7,
- (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 3) & 0x1,
- readl(m->virtbase_local + MBOX_FIFO_DATA),
- readl(m->virtbase_local + MBOX_FIFO_DATA),
- readl(m->virtbase_local + MBOX_FIFO_THRES_FREE),
- readl(m->virtbase_local + MBOX_FIFO_THRES_FREE),
- readl(m->virtbase_local + MBOX_FIFO_THRES_OCCUP),
- readl(m->virtbase_local + MBOX_FIFO_THRES_OCCUP),
- readl(m->virtbase_local + MBOX_FIFO_STATUS),
- readl(m->virtbase_local + MBOX_FIFO_STATUS),
- (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 4) & 0x7,
- (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 7) & 0x1,
- (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 0) & 0x7,
- (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 3) & 0x1,
- m->write_index, m->read_index);
- mbox_index++;
- spin_unlock(&m->lock);
- }
-
- return 0;
-}
-
-static int mbox_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mbox_show, NULL);
-}
-
-static const struct file_operations mbox_operations = {
- .owner = THIS_MODULE,
- .open = mbox_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
-
-static irqreturn_t mbox_irq(int irq, void *arg)
-{
- u32 mbox_value;
- int nbr_occup;
- int nbr_free;
- struct mbox *mbox = (struct mbox *) arg;
-
- spin_lock(&mbox->lock);
-
- dev_dbg(&(mbox->pdev->dev),
- "mbox IRQ [%d] received. ri = %d, wi = %d\n",
- irq, mbox->read_index, mbox->write_index);
-
- /*
- * Check if we have any outgoing messages, and if there is space for
- * them in the FIFO.
- */
- if (mbox->read_index != mbox->write_index) {
- /*
- * Check by reading FREE for LOCAL since that indicates
- * OCCUP for PEER
- */
- nbr_free = (readl(mbox->virtbase_local + MBOX_FIFO_STATUS)
- >> 4) & 0x7;
- dev_dbg(&(mbox->pdev->dev),
- "Status indicates %d empty spaces in the FIFO!\n",
- nbr_free);
-
- while ((nbr_free > 0) &&
- (mbox->read_index != mbox->write_index)) {
- /* Write the message and latch it into the FIFO */
- writel(mbox->buffer[mbox->read_index],
- (mbox->virtbase_peer + MBOX_FIFO_DATA));
- writel(MBOX_LATCH,
- (mbox->virtbase_peer + MBOX_FIFO_ADD));
- dev_dbg(&(mbox->pdev->dev),
- "Wrote message 0x%X to addr 0x%X\n",
- mbox->buffer[mbox->read_index],
- (u32) (mbox->virtbase_peer + MBOX_FIFO_DATA));
-
- nbr_free--;
- mbox->read_index =
- (mbox->read_index + 1) % MBOX_BUF_SIZE;
- }
-
- /*
- * Check if we still want IRQ:s when there is free
- * space to send
- */
- if (mbox->read_index != mbox->write_index) {
- dev_dbg(&(mbox->pdev->dev),
- "Still have messages to send, but FIFO full. "
- "Request IRQ again!\n");
- writel(MBOX_ENABLE_IRQ,
- mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
- } else {
- dev_dbg(&(mbox->pdev->dev),
- "No more messages to send. "
- "Do not request IRQ again!\n");
- writel(MBOX_DISABLE_IRQ,
- mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
- }
-
- /*
- * Check if we can signal any blocked clients that it is OK to
- * start buffering again
- */
- if (mbox->client_blocked &&
- (((mbox->write_index + 1) % MBOX_BUF_SIZE)
- != mbox->read_index)) {
- dev_dbg(&(mbox->pdev->dev),
- "Waking up blocked client\n");
- complete(&mbox->buffer_available);
- mbox->client_blocked = 0;
- }
- }
-
- /* Check if we have any incoming messages */
- nbr_occup = readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7;
- if (nbr_occup == 0)
- goto exit;
-
- if (mbox->cb == NULL) {
- dev_dbg(&(mbox->pdev->dev), "No receive callback registered, "
- "leaving %d incoming messages in fifo!\n", nbr_occup);
- goto exit;
- }
-
- /* Read and acknowledge the message */
- mbox_value = readl(mbox->virtbase_local + MBOX_FIFO_DATA);
- writel(MBOX_LATCH, (mbox->virtbase_local + MBOX_FIFO_REMOVE));
-
- /* Notify consumer of new mailbox message */
- dev_dbg(&(mbox->pdev->dev), "Calling callback for message 0x%X!\n",
- mbox_value);
- mbox->cb(mbox_value, mbox->client_data);
-
-exit:
- dev_dbg(&(mbox->pdev->dev), "Exit mbox IRQ. ri = %d, wi = %d\n",
- mbox->read_index, mbox->write_index);
- spin_unlock(&mbox->lock);
-
- return IRQ_HANDLED;
-}
-
-/* Setup is executed once for each mbox pair */
-struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
-{
- struct resource *resource;
- int irq;
- int res;
- struct mbox *mbox;
-
- mbox = get_mbox_with_id(mbox_id);
- if (mbox == NULL) {
- dev_err(&(mbox->pdev->dev), "Incorrect mailbox id: %d!\n",
- mbox_id);
- goto exit;
- }
-
- /*
- * Check if mailbox has been allocated to someone else,
- * otherwise allocate it
- */
- if (mbox->allocated) {
- dev_err(&(mbox->pdev->dev), "Mailbox number %d is busy!\n",
- mbox_id);
- mbox = NULL;
- goto exit;
- }
- mbox->allocated = true;
-
- dev_dbg(&(mbox->pdev->dev), "Initiating mailbox number %d: 0x%X...\n",
- mbox_id, (u32)mbox);
-
- mbox->client_data = priv;
- mbox->cb = mbox_cb;
-
- /* Get addr for peer mailbox and ioremap it */
- resource = platform_get_resource_byname(mbox->pdev,
- IORESOURCE_MEM,
- "mbox_peer");
- if (resource == NULL) {
- dev_err(&(mbox->pdev->dev),
- "Unable to retrieve mbox peer resource\n");
- mbox = NULL;
- goto exit;
- }
- dev_dbg(&(mbox->pdev->dev),
- "Resource name: %s start: 0x%X, end: 0x%X\n",
- resource->name, resource->start, resource->end);
- mbox->virtbase_peer = ioremap(resource->start, resource_size(resource));
- if (!mbox->virtbase_peer) {
- dev_err(&(mbox->pdev->dev), "Unable to ioremap peer mbox\n");
- mbox = NULL;
- goto exit;
- }
- dev_dbg(&(mbox->pdev->dev),
- "ioremapped peer physical: (0x%X-0x%X) to virtual: 0x%X\n",
- resource->start, resource->end, (u32) mbox->virtbase_peer);
-
- /* Get addr for local mailbox and ioremap it */
- resource = platform_get_resource_byname(mbox->pdev,
- IORESOURCE_MEM,
- "mbox_local");
- if (resource == NULL) {
- dev_err(&(mbox->pdev->dev),
- "Unable to retrieve mbox local resource\n");
- mbox = NULL;
- goto exit;
- }
- dev_dbg(&(mbox->pdev->dev),
- "Resource name: %s start: 0x%X, end: 0x%X\n",
- resource->name, resource->start, resource->end);
- mbox->virtbase_local = ioremap(resource->start, resource_size(resource));
- if (!mbox->virtbase_local) {
- dev_err(&(mbox->pdev->dev), "Unable to ioremap local mbox\n");
- mbox = NULL;
- goto exit;
- }
- dev_dbg(&(mbox->pdev->dev),
- "ioremapped local physical: (0x%X-0x%X) to virtual: 0x%X\n",
- resource->start, resource->end, (u32) mbox->virtbase_peer);
-
- init_completion(&mbox->buffer_available);
- mbox->client_blocked = 0;
-
- /* Get IRQ for mailbox and allocate it */
- irq = platform_get_irq_byname(mbox->pdev, "mbox_irq");
- if (irq < 0) {
- dev_err(&(mbox->pdev->dev),
- "Unable to retrieve mbox irq resource\n");
- mbox = NULL;
- goto exit;
- }
-
- dev_dbg(&(mbox->pdev->dev), "Allocating irq %d...\n", irq);
- res = request_irq(irq, mbox_irq, 0, mbox->name, (void *) mbox);
- if (res < 0) {
- dev_err(&(mbox->pdev->dev),
- "Unable to allocate mbox irq %d\n", irq);
- mbox = NULL;
- goto exit;
- }
-
- /* Set up mailbox to not launch IRQ on free space in mailbox */
- writel(MBOX_DISABLE_IRQ, mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
-
- /*
- * Set up mailbox to launch IRQ on new message if we have
- * a callback set. If not, do not raise IRQ, but keep message
- * in FIFO for manual retrieval
- */
- if (mbox_cb != NULL)
- writel(MBOX_ENABLE_IRQ,
- mbox->virtbase_local + MBOX_FIFO_THRES_OCCUP);
- else
- writel(MBOX_DISABLE_IRQ,
- mbox->virtbase_local + MBOX_FIFO_THRES_OCCUP);
-
-#if defined(CONFIG_DEBUG_FS)
- res = device_create_file(&(mbox->pdev->dev), &dev_attr_fifo);
- if (res != 0)
- dev_warn(&(mbox->pdev->dev),
- "Unable to create mbox sysfs entry");
-
- (void) debugfs_create_file("mbox", S_IFREG | S_IRUGO, NULL,
- NULL, &mbox_operations);
-#endif
-
- dev_info(&(mbox->pdev->dev),
- "Mailbox driver with index %d initiated!\n", mbox_id);
-
-exit:
- return mbox;
-}
-EXPORT_SYMBOL(mbox_setup);
-
-
-int __init mbox_probe(struct platform_device *pdev)
-{
- struct mbox local_mbox;
- struct mbox *mbox;
- int res = 0;
- dev_dbg(&(pdev->dev), "Probing mailbox (pdev = 0x%X)...\n", (u32) pdev);
-
- memset(&local_mbox, 0x0, sizeof(struct mbox));
-
- /* Associate our mbox data with the platform device */
- res = platform_device_add_data(pdev,
- (void *) &local_mbox,
- sizeof(struct mbox));
- if (res != 0) {
- dev_err(&(pdev->dev),
- "Unable to allocate driver platform data!\n");
- goto exit;
- }
-
- mbox = (struct mbox *) pdev->dev.platform_data;
- mbox->pdev = pdev;
- mbox->write_index = 0;
- mbox->read_index = 0;
-
- INIT_LIST_HEAD(&(mbox->list));
- list_add_tail(&(mbox->list), &mboxs);
-
- sprintf(mbox->name, "%s", MBOX_NAME);
- spin_lock_init(&mbox->lock);
-
- dev_info(&(pdev->dev), "Mailbox driver loaded\n");
-
-exit:
- return res;
-}
-
-static struct platform_driver mbox_driver = {
- .driver = {
- .name = MBOX_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-static int __init mbox_init(void)
-{
- return platform_driver_probe(&mbox_driver, mbox_probe);
-}
-
-module_init(mbox_init);
-
-void __exit mbox_exit(void)
-{
- platform_driver_unregister(&mbox_driver);
-}
-
-module_exit(mbox_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MBOX driver");
diff --git a/arch/arm/mach-ux500/pins-db8500.h b/arch/arm/mach-ux500/pins-db8500.h
index 8b1d1a7a679..062c7acf457 100644
--- a/arch/arm/mach-ux500/pins-db8500.h
+++ b/arch/arm/mach-ux500/pins-db8500.h
@@ -35,40 +35,40 @@
#define GPIO4_GPIO PIN_CFG(4, GPIO)
#define GPIO4_U1_RXD PIN_CFG(4, ALT_A)
-#define GPIO4_I2C4_SCL PIN_CFG_INPUT(4, ALT_B, PULLUP)
+#define GPIO4_I2C4_SCL PIN_CFG(4, ALT_B)
#define GPIO4_IP_TRSTn PIN_CFG(4, ALT_C)
#define GPIO5_GPIO PIN_CFG(5, GPIO)
#define GPIO5_U1_TXD PIN_CFG(5, ALT_A)
-#define GPIO5_I2C4_SDA PIN_CFG_INPUT(5, ALT_B, PULLUP)
+#define GPIO5_I2C4_SDA PIN_CFG(5, ALT_B)
#define GPIO5_IP_GPIO6 PIN_CFG(5, ALT_C)
#define GPIO6_GPIO PIN_CFG(6, GPIO)
#define GPIO6_U1_CTSn PIN_CFG(6, ALT_A)
-#define GPIO6_I2C1_SCL PIN_CFG_INPUT(6, ALT_B, PULLUP)
+#define GPIO6_I2C1_SCL PIN_CFG(6, ALT_B)
#define GPIO6_IP_GPIO0 PIN_CFG(6, ALT_C)
#define GPIO7_GPIO PIN_CFG(7, GPIO)
#define GPIO7_U1_RTSn PIN_CFG(7, ALT_A)
-#define GPIO7_I2C1_SDA PIN_CFG_INPUT(7, ALT_B, PULLUP)
+#define GPIO7_I2C1_SDA PIN_CFG(7, ALT_B)
#define GPIO7_IP_GPIO1 PIN_CFG(7, ALT_C)
#define GPIO8_GPIO PIN_CFG(8, GPIO)
-#define GPIO8_IPI2C_SDA PIN_CFG_INPUT(8, ALT_A, PULLUP)
-#define GPIO8_I2C2_SDA PIN_CFG_INPUT(8, ALT_B, PULLUP)
+#define GPIO8_IPI2C_SDA PIN_CFG(8, ALT_A)
+#define GPIO8_I2C2_SDA PIN_CFG(8, ALT_B)
#define GPIO9_GPIO PIN_CFG(9, GPIO)
-#define GPIO9_IPI2C_SCL PIN_CFG_INPUT(9, ALT_A, PULLUP)
-#define GPIO9_I2C2_SCL PIN_CFG_INPUT(9, ALT_B, PULLUP)
+#define GPIO9_IPI2C_SCL PIN_CFG(9, ALT_A)
+#define GPIO9_I2C2_SCL PIN_CFG(9, ALT_B)
#define GPIO10_GPIO PIN_CFG(10, GPIO)
-#define GPIO10_IPI2C_SDA PIN_CFG_INPUT(10, ALT_A, PULLUP)
-#define GPIO10_I2C2_SDA PIN_CFG_INPUT(10, ALT_B, PULLUP)
+#define GPIO10_IPI2C_SDA PIN_CFG(10, ALT_A)
+#define GPIO10_I2C2_SDA PIN_CFG(10, ALT_B)
#define GPIO10_IP_GPIO3 PIN_CFG(10, ALT_C)
#define GPIO11_GPIO PIN_CFG(11, GPIO)
-#define GPIO11_IPI2C_SCL PIN_CFG_INPUT(11, ALT_A, PULLUP)
-#define GPIO11_I2C2_SCL PIN_CFG_INPUT(11, ALT_B, PULLUP)
+#define GPIO11_IPI2C_SCL PIN_CFG(11, ALT_A)
+#define GPIO11_I2C2_SCL PIN_CFG(11, ALT_B)
#define GPIO11_IP_GPIO2 PIN_CFG(11, ALT_C)
#define GPIO12_GPIO PIN_CFG(12, GPIO)
@@ -87,12 +87,12 @@
#define GPIO16_GPIO PIN_CFG(16, GPIO)
#define GPIO16_MSP0_RFS PIN_CFG(16, ALT_A)
-#define GPIO16_I2C1_SCL PIN_CFG_INPUT(16, ALT_B, PULLUP)
+#define GPIO16_I2C1_SCL PIN_CFG(16, ALT_B)
#define GPIO16_SLIM0_DAT PIN_CFG(16, ALT_C)
#define GPIO17_GPIO PIN_CFG(17, GPIO)
#define GPIO17_MSP0_RCK PIN_CFG(17, ALT_A)
-#define GPIO17_I2C1_SDA PIN_CFG_INPUT(17, ALT_B, PULLUP)
+#define GPIO17_I2C1_SDA PIN_CFG(17, ALT_B)
#define GPIO17_SLIM0_CLK PIN_CFG(17, ALT_C)
#define GPIO18_GPIO PIN_CFG(18, GPIO)
@@ -434,10 +434,10 @@
#define GPIO146_SSP0_TXD PIN_CFG(146, ALT_A)
#define GPIO147_GPIO PIN_CFG(147, GPIO)
-#define GPIO147_I2C0_SCL PIN_CFG_INPUT(147, ALT_A, PULLUP)
+#define GPIO147_I2C0_SCL PIN_CFG(147, ALT_A)
#define GPIO148_GPIO PIN_CFG(148, GPIO)
-#define GPIO148_I2C0_SDA PIN_CFG_INPUT(148, ALT_A, PULLUP)
+#define GPIO148_I2C0_SDA PIN_CFG(148, ALT_A)
#define GPIO149_GPIO PIN_CFG(149, GPIO)
#define GPIO149_IP_GPIO0 PIN_CFG(149, ALT_A)
@@ -459,82 +459,82 @@
#define GPIO152_KP_O9 PIN_CFG(152, ALT_C)
#define GPIO153_GPIO PIN_CFG(153, GPIO)
-#define GPIO153_KP_I7 PIN_CFG_INPUT(153, ALT_A, PULLDOWN)
+#define GPIO153_KP_I7 PIN_CFG(153, ALT_A)
#define GPIO153_LCD_D24 PIN_CFG(153, ALT_B)
#define GPIO153_U2_RXD PIN_CFG(153, ALT_C)
#define GPIO154_GPIO PIN_CFG(154, GPIO)
-#define GPIO154_KP_I6 PIN_CFG_INPUT(154, ALT_A, PULLDOWN)
+#define GPIO154_KP_I6 PIN_CFG(154, ALT_A)
#define GPIO154_LCD_D25 PIN_CFG(154, ALT_B)
#define GPIO154_U2_TXD PIN_CFG(154, ALT_C)
#define GPIO155_GPIO PIN_CFG(155, GPIO)
-#define GPIO155_KP_I5 PIN_CFG_INPUT(155, ALT_A, PULLDOWN)
+#define GPIO155_KP_I5 PIN_CFG(155, ALT_A)
#define GPIO155_LCD_D26 PIN_CFG(155, ALT_B)
#define GPIO155_STMAPE_CLK PIN_CFG(155, ALT_C)
#define GPIO156_GPIO PIN_CFG(156, GPIO)
-#define GPIO156_KP_I4 PIN_CFG_INPUT(156, ALT_A, PULLDOWN)
+#define GPIO156_KP_I4 PIN_CFG(156, ALT_A)
#define GPIO156_LCD_D27 PIN_CFG(156, ALT_B)
#define GPIO156_STMAPE_DAT3 PIN_CFG(156, ALT_C)
#define GPIO157_GPIO PIN_CFG(157, GPIO)
-#define GPIO157_KP_O7 PIN_CFG_INPUT(157, ALT_A, PULLUP)
+#define GPIO157_KP_O7 PIN_CFG(157, ALT_A)
#define GPIO157_LCD_D28 PIN_CFG(157, ALT_B)
#define GPIO157_STMAPE_DAT2 PIN_CFG(157, ALT_C)
#define GPIO158_GPIO PIN_CFG(158, GPIO)
-#define GPIO158_KP_O6 PIN_CFG_INPUT(158, ALT_A, PULLUP)
+#define GPIO158_KP_O6 PIN_CFG(158, ALT_A)
#define GPIO158_LCD_D29 PIN_CFG(158, ALT_B)
#define GPIO158_STMAPE_DAT1 PIN_CFG(158, ALT_C)
#define GPIO159_GPIO PIN_CFG(159, GPIO)
-#define GPIO159_KP_O5 PIN_CFG_INPUT(159, ALT_A, PULLUP)
+#define GPIO159_KP_O5 PIN_CFG(159, ALT_A)
#define GPIO159_LCD_D30 PIN_CFG(159, ALT_B)
#define GPIO159_STMAPE_DAT0 PIN_CFG(159, ALT_C)
#define GPIO160_GPIO PIN_CFG(160, GPIO)
-#define GPIO160_KP_O4 PIN_CFG_INPUT(160, ALT_A, PULLUP)
+#define GPIO160_KP_O4 PIN_CFG(160, ALT_A)
#define GPIO160_LCD_D31 PIN_CFG(160, ALT_B)
#define GPIO160_NONE PIN_CFG(160, ALT_C)
#define GPIO161_GPIO PIN_CFG(161, GPIO)
-#define GPIO161_KP_I3 PIN_CFG_INPUT(161, ALT_A, PULLDOWN)
+#define GPIO161_KP_I3 PIN_CFG(161, ALT_A)
#define GPIO161_LCD_D32 PIN_CFG(161, ALT_B)
#define GPIO161_UARTMOD_RXD PIN_CFG(161, ALT_C)
#define GPIO162_GPIO PIN_CFG(162, GPIO)
-#define GPIO162_KP_I2 PIN_CFG_INPUT(162, ALT_A, PULLDOWN)
+#define GPIO162_KP_I2 PIN_CFG(162, ALT_A)
#define GPIO162_LCD_D33 PIN_CFG(162, ALT_B)
#define GPIO162_UARTMOD_TXD PIN_CFG(162, ALT_C)
#define GPIO163_GPIO PIN_CFG(163, GPIO)
-#define GPIO163_KP_I1 PIN_CFG_INPUT(163, ALT_A, PULLDOWN)
+#define GPIO163_KP_I1 PIN_CFG(163, ALT_A)
#define GPIO163_LCD_D34 PIN_CFG(163, ALT_B)
#define GPIO163_STMMOD_CLK PIN_CFG(163, ALT_C)
#define GPIO164_GPIO PIN_CFG(164, GPIO)
-#define GPIO164_KP_I0 PIN_CFG_INPUT(164, ALT_A, PULLUP)
+#define GPIO164_KP_I0 PIN_CFG(164, ALT_A)
#define GPIO164_LCD_D35 PIN_CFG(164, ALT_B)
#define GPIO164_STMMOD_DAT3 PIN_CFG(164, ALT_C)
#define GPIO165_GPIO PIN_CFG(165, GPIO)
-#define GPIO165_KP_O3 PIN_CFG_INPUT(165, ALT_A, PULLUP)
+#define GPIO165_KP_O3 PIN_CFG(165, ALT_A)
#define GPIO165_LCD_D36 PIN_CFG(165, ALT_B)
#define GPIO165_STMMOD_DAT2 PIN_CFG(165, ALT_C)
#define GPIO166_GPIO PIN_CFG(166, GPIO)
-#define GPIO166_KP_O2 PIN_CFG_INPUT(166, ALT_A, PULLUP)
+#define GPIO166_KP_O2 PIN_CFG(166, ALT_A)
#define GPIO166_LCD_D37 PIN_CFG(166, ALT_B)
#define GPIO166_STMMOD_DAT1 PIN_CFG(166, ALT_C)
#define GPIO167_GPIO PIN_CFG(167, GPIO)
-#define GPIO167_KP_O1 PIN_CFG_INPUT(167, ALT_A, PULLUP)
+#define GPIO167_KP_O1 PIN_CFG(167, ALT_A)
#define GPIO167_LCD_D38 PIN_CFG(167, ALT_B)
#define GPIO167_STMMOD_DAT0 PIN_CFG(167, ALT_C)
#define GPIO168_GPIO PIN_CFG(168, GPIO)
-#define GPIO168_KP_O0 PIN_CFG_INPUT(168, ALT_A, PULLUP)
+#define GPIO168_KP_O0 PIN_CFG(168, ALT_A)
#define GPIO168_LCD_D39 PIN_CFG(168, ALT_B)
#define GPIO168_NONE PIN_CFG(168, ALT_C)
@@ -637,7 +637,7 @@
#define GPIO216_GPIO PIN_CFG(216, GPIO)
#define GPIO216_MC1_DAT2DIR PIN_CFG(216, ALT_A)
#define GPIO216_MC3_CMDDIR PIN_CFG(216, ALT_B)
-#define GPIO216_I2C3_SDA PIN_CFG_INPUT(216, ALT_C, PULLUP)
+#define GPIO216_I2C3_SDA PIN_CFG(216, ALT_C)
#define GPIO216_SPI2_FRM PIN_CFG(216, ALT_C)
#define GPIO217_GPIO PIN_CFG(217, GPIO)
@@ -649,7 +649,7 @@
#define GPIO218_GPIO PIN_CFG(218, GPIO)
#define GPIO218_MC1_DAT31DIR PIN_CFG(218, ALT_A)
#define GPIO218_MC3_DAT0DIR PIN_CFG(218, ALT_B)
-#define GPIO218_I2C3_SCL PIN_CFG_INPUT(218, ALT_C, PULLUP)
+#define GPIO218_I2C3_SCL PIN_CFG(218, ALT_C)
#define GPIO218_SPI2_RXD PIN_CFG(218, ALT_C)
#define GPIO219_GPIO PIN_CFG(219, GPIO)
@@ -698,12 +698,12 @@
#define GPIO229_GPIO PIN_CFG(229, GPIO)
#define GPIO229_CLKOUT1 PIN_CFG(229, ALT_A)
#define GPIO229_PWL PIN_CFG(229, ALT_B)
-#define GPIO229_I2C3_SDA PIN_CFG_INPUT(229, ALT_C, PULLUP)
+#define GPIO229_I2C3_SDA PIN_CFG(229, ALT_C)
#define GPIO230_GPIO PIN_CFG(230, GPIO)
#define GPIO230_CLKOUT2 PIN_CFG(230, ALT_A)
#define GPIO230_PWL PIN_CFG(230, ALT_B)
-#define GPIO230_I2C3_SCL PIN_CFG_INPUT(230, ALT_C, PULLUP)
+#define GPIO230_I2C3_SCL PIN_CFG(230, ALT_C)
#define GPIO256_GPIO PIN_CFG(256, GPIO)
#define GPIO256_USB_NXT PIN_CFG(256, ALT_A)
diff --git a/arch/arm/mach-ux500/pins.c b/arch/arm/mach-ux500/pins.c
new file mode 100644
index 00000000000..125512ea12b
--- /dev/null
+++ b/arch/arm/mach-ux500/pins.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <plat/pincfg.h>
+
+#include "pins.h"
+
+static LIST_HEAD(pin_lookups);
+static DEFINE_MUTEX(pin_lookups_mutex);
+static DEFINE_SPINLOCK(pins_lock);
+
+void __init ux500_pins_add(struct ux500_pin_lookup *pl, size_t num)
+{
+ mutex_lock(&pin_lookups_mutex);
+
+ while (num--) {
+ list_add_tail(&pl->node, &pin_lookups);
+ pl++;
+ }
+
+ mutex_unlock(&pin_lookups_mutex);
+}
+
+struct ux500_pins *ux500_pins_get(const char *name)
+{
+ struct ux500_pins *pins = NULL;
+ struct ux500_pin_lookup *pl;
+
+ mutex_lock(&pin_lookups_mutex);
+
+ list_for_each_entry(pl, &pin_lookups, node) {
+ if (!strcmp(pl->name, name)) {
+ pins = pl->pins;
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&pin_lookups_mutex);
+ return pins;
+}
+
+int ux500_pins_enable(struct ux500_pins *pins)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&pins_lock, flags);
+
+ if (pins->usage++ == 0)
+ ret = nmk_config_pins(pins->cfg, pins->num);
+
+ spin_unlock_irqrestore(&pins_lock, flags);
+ return ret;
+}
+
+int ux500_pins_disable(struct ux500_pins *pins)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&pins_lock, flags);
+
+ if (WARN_ON(pins->usage == 0))
+ goto out;
+
+ if (--pins->usage == 0)
+ ret = nmk_config_pins_sleep(pins->cfg, pins->num);
+
+out:
+ spin_unlock_irqrestore(&pins_lock, flags);
+ return ret;
+}
+
+void ux500_pins_put(struct ux500_pins *pins)
+{
+ WARN_ON(!pins);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/gpio/nomadik.h>
+
+#include <mach/gpio.h>
+
+static void show_pin(struct seq_file *s, pin_cfg_t pin)
+{
+ static const char *afnames[] = {
+ [NMK_GPIO_ALT_GPIO] = "GPIO",
+ [NMK_GPIO_ALT_A] = "A",
+ [NMK_GPIO_ALT_B] = "B",
+ [NMK_GPIO_ALT_C] = "C"
+ };
+ static const char *pullnames[] = {
+ [NMK_GPIO_PULL_NONE] = "none",
+ [NMK_GPIO_PULL_UP] = "up",
+ [NMK_GPIO_PULL_DOWN] = "down",
+ [3] /* illegal */ = "??"
+ };
+
+ int pin_num = PIN_NUM(pin);
+ int pull = PIN_PULL(pin);
+ int af = PIN_ALT(pin);
+ int slpm = PIN_SLPM(pin);
+ int output = PIN_DIR(pin);
+ int val = PIN_VAL(pin);
+ int slpm_pull = PIN_SLPM_PULL(pin);
+ int slpm_dir = PIN_SLPM_DIR(pin);
+ int slpm_val = PIN_SLPM_VAL(pin);
+ int slpm_pdis = PIN_SLPM_PDIS(pin);
+
+ seq_printf(s,
+ " pin %d [%#lx]: af %s, pull %s (%s%s) - slpm: %s%s%s%s%s\n",
+ pin_num, pin, afnames[af],
+ pullnames[pull],
+ output ? "output " : "input",
+ output ? (val ? "high" : "low") : "",
+ slpm ? "no-change/no-wakeup " : "input/wakeup ",
+ slpm_dir ? (slpm_dir == 1 ? "input " : "output " ) : "",
+ slpm_dir == 1 ? (slpm_pull == 0 ? "pull: none ":
+ (slpm_pull == NMK_GPIO_PULL_UP ?
+ "pull: up " : "pull: down ") ): "",
+ slpm_dir == 2 ? (slpm_val == 1 ? "low " : "high " ) : "",
+ slpm_pdis ? (slpm_pdis == 1 ? "pdis: dis" : "pdis: en") :
+ "pdis: no change");
+}
+
+static int pins_dbg_show(struct seq_file *s, void *iter)
+{
+ struct ux500_pin_lookup *pl;
+ int i;
+ bool *pins;
+ int prev = -2;
+ int first = 0;
+
+ pins = kzalloc(sizeof(bool) * NOMADIK_NR_GPIO, GFP_KERNEL);
+
+ mutex_lock(&pin_lookups_mutex);
+
+ list_for_each_entry(pl, &pin_lookups, node) {
+ seq_printf(s, "\n%s (%d) usage: %d\n",
+ pl->name, pl->pins->num, pl->pins->usage);
+ for (i = 0; i < pl->pins->num; i++) {
+ show_pin(s, pl->pins->cfg[i]);
+ pins[PIN_NUM(pl->pins->cfg[i])] = true;
+ }
+ }
+ mutex_unlock(&pin_lookups_mutex);
+
+ seq_printf(s, "\nSummary allocated pins:\n");
+ for (i = 0; i < NOMADIK_NR_GPIO; i++) {
+ if (prev == i - 1) {
+ if (pins[i])
+ prev = i;
+ else
+ if (prev > 0) {
+ if (first != prev)
+ seq_printf(s, "-%d, ", prev);
+ else
+ seq_printf(s, ", ");
+ }
+ continue;
+ }
+ if (pins[i]) {
+ seq_printf(s, "%d", i);
+ prev = i;
+ first = i;
+ }
+ }
+ if (prev == i - 1 && first != prev)
+ seq_printf(s, "-%d", prev);
+
+ seq_printf(s, "\n");
+
+ return 0;
+}
+
+static int pins_dbg_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, pins_dbg_show, inode->i_private);
+}
+
+static const struct file_operations pins_fops = {
+ .open = pins_dbg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int __init pins_dbg_init(void)
+{
+ (void) debugfs_create_file("pins", S_IRUGO,
+ NULL,
+ NULL,
+ &pins_fops);
+ return 0;
+}
+late_initcall(pins_dbg_init);
+#endif
diff --git a/arch/arm/mach-ux500/pins.h b/arch/arm/mach-ux500/pins.h
new file mode 100644
index 00000000000..0fa65cc6b96
--- /dev/null
+++ b/arch/arm/mach-ux500/pins.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef __MACH_UX500_PINS_H
+#define __MACH_UX500_PINS_H
+
+#include <linux/list.h>
+#include <plat/pincfg.h>
+
+#define PIN_LOOKUP(_name, _pins) \
+{ \
+ .name = _name, \
+ .pins = _pins, \
+}
+
+#define UX500_PINS(name, pins...) \
+struct ux500_pins name = { \
+ .cfg = (pin_cfg_t[]) {pins}, \
+ .num = ARRAY_SIZE(((pin_cfg_t[]) {pins})), \
+}
+
+struct ux500_pins {
+ int usage;
+ int num;
+ pin_cfg_t *cfg;
+};
+
+struct ux500_pin_lookup {
+ struct list_head node;
+ const char *name;
+ struct ux500_pins *pins;
+};
+
+void __init ux500_pins_add(struct ux500_pin_lookup *pl, size_t num);
+struct ux500_pins *ux500_pins_get(const char *name);
+int ux500_pins_enable(struct ux500_pins *pins);
+int ux500_pins_disable(struct ux500_pins *pins);
+void ux500_pins_put(struct ux500_pins *pins);
+int pins_for_u9500(void);
+
+#endif
diff --git a/arch/arm/mach-ux500/pm/Kconfig b/arch/arm/mach-ux500/pm/Kconfig
new file mode 100644
index 00000000000..195a1d7cad2
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/Kconfig
@@ -0,0 +1,68 @@
+config DBX500_PRCMU_QOS_POWER
+ bool "DBX500 PRCMU power QoS support"
+ depends on (MFD_DB5500_PRCMU || MFD_DB8500_PRCMU)
+ default y
+ help
+ Add support for PRCMU power Quality of Service
+
+config UX500_CONTEXT
+ bool "Context save/restore support for UX500"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && PM
+ help
+ This is needed for ApSleep and deeper sleep states.
+
+config UX500_PM_PERFORMANCE
+ bool "Performance supervision"
+ depends on DBX500_PRCMU_QOS_POWER
+ default y
+ help
+ Enable supervision of events which may require a boost
+ of platform performance.
+
+config UX500_CONSOLE_UART_GPIO_PIN
+ int "The pin number of the console UART GPIO pin"
+ default 29
+ depends on UX500_SUSPEND_DBG_WAKE_ON_UART || UX500_CPUIDLE_DEBUG
+ help
+ GPIO pin number of the GPIO pin connected to the console UART RX line.
+
+config UX500_SUSPEND
+ bool "Suspend to mem and standby support"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && PM && SUSPEND
+ select UX500_CONTEXT
+ help
+ Add support for suspend.
+
+config UX500_SUSPEND_STANDBY
+ bool "Suspend Standby goes to ApSleep"
+ depends on UX500_SUSPEND
+ help
+ If yes, echo standby > /sys/power/state puts the system into ApSleep.
+
+config UX500_SUSPEND_MEM
+ bool "Suspend Mem goes to ApDeepSleep"
+ depends on UX500_SUSPEND
+ help
+ If yes, echo mem > /sys/power/state puts the system into ApDeepSleep else
+ it will do the same as echo standby > /sys/power/state.
+
+config UX500_SUSPEND_DBG
+ bool "Suspend debug"
+ depends on UX500_SUSPEND && DEBUG_FS
+ help
+ Add debug support for suspend.
+
+config UX500_SUSPEND_DBG_WAKE_ON_UART
+ bool "Suspend wakes on console UART"
+ depends on UX500_SUSPEND_DBG
+ help
+ Wake up on uart interrupts. Makes it possible for the console to wake up system.
+
+config UX500_USECASE_GOVERNOR
+ bool "UX500 use-case governor"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && \
+ (CPU_FREQ && CPU_IDLE && HOTPLUG_CPU && \
+ EARLYSUSPEND && UX500_L2X0_PREFETCH_CTRL && PM)
+ default y
+ help
+ Adjusts CPU_IDLE, CPU_FREQ, HOTPLUG_CPU and L2 cache parameters
diff --git a/arch/arm/mach-ux500/pm/Makefile b/arch/arm/mach-ux500/pm/Makefile
new file mode 100644
index 00000000000..c0af28e5d3e
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/Makefile
@@ -0,0 +1,12 @@
+#
+# Power save related files
+#
+obj-y := pm.o runtime.o
+
+obj-$(CONFIG_DBX500_PRCMU_QOS_POWER) += prcmu-qos-power.o
+obj-$(CONFIG_UX500_CONTEXT) += context.o context_arm.o context-db8500.o context-db5500.o
+obj-$(CONFIG_UX500_CPUIDLE) += timer.o
+obj-$(CONFIG_UX500_SUSPEND) += suspend.o
+obj-$(CONFIG_UX500_SUSPEND_DBG) += suspend_dbg.o
+obj-$(CONFIG_UX500_PM_PERFORMANCE) += performance.o
+obj-$(CONFIG_UX500_USECASE_GOVERNOR) += usecase_gov.o
diff --git a/arch/arm/mach-ux500/pm/context-db5500.c b/arch/arm/mach-ux500/pm/context-db5500.c
new file mode 100644
index 00000000000..8075bd9e113
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context-db5500.c
@@ -0,0 +1,404 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>,
+ * Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Sundar Iyer <sundar.iyer@stericsson.com>,
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/context.h>
+
+/* These registers are DB5500 specific */
+#define NODE_HIBW1_ESRAM_IN_0_PRIORITY 0x0
+#define NODE_HIBW1_ESRAM_IN_1_PRIORITY 0x4
+
+#define NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT 0x18
+#define NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT 0x1C
+#define NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT 0x20
+
+#define NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT 0x24
+#define NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT 0x28
+#define NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT 0x2C
+
+#define NODE_HIBW1_DDR_IN_0_PRIORITY 0x400
+#define NODE_HIBW1_DDR_IN_1_PRIORITY 0x404
+#define NODE_HIBW1_DDR_IN_2_PRIORITY 0x408
+
+#define NODE_HIBW1_DDR_IN_0_LIMIT 0x424
+#define NODE_HIBW1_DDR_IN_1_LIMIT 0x428
+#define NODE_HIBW1_DDR_IN_2_LIMIT 0x42C
+
+#define NODE_HIBW1_DDR_OUT_0_PRIORITY 0x430
+
+#define NODE_HIBW2_ESRAM_IN_0_PRIORITY 0x800
+#define NODE_HIBW2_ESRAM_IN_1_PRIORITY 0x804
+
+#define NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT 0x818
+#define NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT 0x81C
+#define NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT 0x820
+
+#define NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT 0x824
+#define NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT 0x828
+#define NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT 0x82C
+
+#define NODE_HIBW2_DDR_IN_0_PRIORITY 0xC00
+#define NODE_HIBW2_DDR_IN_1_PRIORITY 0xC04
+#define NODE_HIBW2_DDR_IN_2_PRIORITY 0xC08
+#define NODE_HIBW2_DDR_IN_3_PRIORITY 0xC0C
+
+#define NODE_HIBW2_DDR_IN_0_LIMIT 0xC30
+#define NODE_HIBW2_DDR_IN_1_LIMIT 0xC34
+#define NODE_HIBW2_DDR_IN_2_LIMIT 0xC38
+#define NODE_HIBW2_DDR_IN_3_LIMIT 0xC3C
+
+#define NODE_HIBW2_DDR_OUT_0_PRIORITY 0xC40
+
+#define NODE_ESRAM0_IN_0_PRIORITY 0x1000
+#define NODE_ESRAM0_IN_1_PRIORITY 0x1004
+#define NODE_ESRAM0_IN_2_PRIORITY 0x1008
+
+#define NODE_ESRAM0_IN_0_LIMIT 0x1024
+#define NODE_ESRAM0_IN_1_LIMIT 0x1028
+#define NODE_ESRAM0_IN_2_LIMIT 0x102C
+#define NODE_ESRAM0_OUT_0_PRIORITY 0x1030
+
+#define NODE_ESRAM1_2_IN_0_PRIORITY 0x1400
+#define NODE_ESRAM1_2_IN_1_PRIORITY 0x1404
+#define NODE_ESRAM1_2_IN_2_PRIORITY 0x1408
+
+#define NODE_ESRAM1_2_IN_0_ARB_1_LIMIT 0x1424
+#define NODE_ESRAM1_2_IN_1_ARB_1_LIMIT 0x1428
+#define NODE_ESRAM1_2_IN_2_ARB_1_LIMIT 0x142C
+#define NODE_ESRAM1_2_OUT_0_PRIORITY 0x1430
+
+#define NODE_ESRAM3_4_IN_0_PRIORITY 0x1800
+#define NODE_ESRAM3_4_IN_1_PRIORITY 0x1804
+#define NODE_ESRAM3_4_IN_2_PRIORITY 0x1808
+
+#define NODE_ESRAM3_4_IN_0_ARB_1_LIMIT 0x1824
+#define NODE_ESRAM3_4_IN_1_ARB_1_LIMIT 0x1828
+#define NODE_ESRAM3_4_IN_2_ARB_1_LIMIT 0x182C
+#define NODE_ESRAM3_4_OUT_0_PRIORITY 0x1830
+
+/*
+ * Save ICN (Interconnect or Interconnect nodes) configuration registers
+ * TODO: This can be optimized, for example if we have
+ * a static ICN configuration.
+ */
+
+static struct {
+ void __iomem *base;
+ u32 hibw1_esram_in_pri[2];
+ u32 hibw1_esram_in0_arb[3];
+ u32 hibw1_esram_in1_arb[3];
+ u32 hibw1_ddr_in_prio[3];
+ u32 hibw1_ddr_in_limit[3];
+ u32 hibw1_ddr_out_prio_reg;
+
+ /* HiBw2 node registers */
+ u32 hibw2_esram_in_pri[2];
+ u32 hibw2_esram_in0_arblimit[3];
+ u32 hibw2_esram_in1_arblimit[3];
+ u32 hibw2_ddr_in_prio[4];
+ u32 hibw2_ddr_in_limit[4];
+ u32 hibw2_ddr_out_prio_reg;
+
+ /* ESRAM node registers */
+ u32 esram_in_prio[3];
+ u32 esram_in_lim[3];
+ u32 esram_out_prio_reg;
+
+ u32 esram12_in_prio[3];
+ u32 esram12_in_arb_lim[3];
+ u32 esram12_out_prio_reg;
+
+ u32 esram34_in_prio[3];
+ u32 esram34_in_arb_lim[3];
+ u32 esram34_out_prio;
+} context_icn;
+
+
+void u5500_context_save_icn(void)
+{
+ /* hibw1 */
+ context_icn.hibw1_esram_in_pri[0] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw1_esram_in_pri[1] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+
+ context_icn.hibw1_esram_in0_arb[0] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in0_arb[1] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in0_arb[2] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw1_esram_in1_arb[0] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in1_arb[1] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in1_arb[2] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw1_ddr_in_prio[0] =
+ readl(context_icn.base + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[1] =
+ readl(context_icn.base + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[2] =
+ readl(context_icn.base + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ context_icn.hibw1_ddr_in_limit[0] =
+ readl(context_icn.base + NODE_HIBW1_DDR_IN_0_LIMIT);
+ context_icn.hibw1_ddr_in_limit[1] =
+ readl(context_icn.base + NODE_HIBW1_DDR_IN_1_LIMIT);
+ context_icn.hibw1_ddr_in_limit[2] =
+ readl(context_icn.base + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ context_icn.hibw1_ddr_out_prio_reg =
+ readl(context_icn.base + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ /* hibw2 */
+ context_icn.hibw2_esram_in_pri[0] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw2_esram_in_pri[1] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ context_icn.hibw2_esram_in0_arblimit[0] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[1] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[2] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw2_esram_in1_arblimit[0] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[1] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[2] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw2_ddr_in_prio[0] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[1] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[2] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_2_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[3] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_3_PRIORITY);
+
+ context_icn.hibw2_ddr_in_limit[0] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_0_LIMIT);
+ context_icn.hibw2_ddr_in_limit[1] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_1_LIMIT);
+ context_icn.hibw2_ddr_in_limit[2] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_2_LIMIT);
+ context_icn.hibw2_ddr_in_limit[3] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_3_LIMIT);
+
+ context_icn.hibw2_ddr_out_prio_reg =
+ readl(context_icn.base + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ /* ESRAM0 */
+ context_icn.esram_in_prio[0] =
+ readl(context_icn.base + NODE_ESRAM0_IN_0_PRIORITY);
+ context_icn.esram_in_prio[1] =
+ readl(context_icn.base + NODE_ESRAM0_IN_1_PRIORITY);
+ context_icn.esram_in_prio[2] =
+ readl(context_icn.base + NODE_ESRAM0_IN_2_PRIORITY);
+
+ context_icn.esram_in_lim[0] =
+ readl(context_icn.base + NODE_ESRAM0_IN_0_LIMIT);
+ context_icn.esram_in_lim[1] =
+ readl(context_icn.base + NODE_ESRAM0_IN_1_LIMIT);
+ context_icn.esram_in_lim[2] =
+ readl(context_icn.base + NODE_ESRAM0_IN_2_LIMIT);
+
+ context_icn.esram_out_prio_reg =
+ readl(context_icn.base + NODE_ESRAM0_OUT_0_PRIORITY);
+
+ /* ESRAM1-2 */
+ context_icn.esram12_in_prio[0] =
+ readl(context_icn.base + NODE_ESRAM1_2_IN_0_PRIORITY);
+ context_icn.esram12_in_prio[1] =
+ readl(context_icn.base + NODE_ESRAM1_2_IN_1_PRIORITY);
+ context_icn.esram12_in_prio[2] =
+ readl(context_icn.base + NODE_ESRAM1_2_IN_2_PRIORITY);
+
+ context_icn.esram12_in_arb_lim[0] =
+ readl(context_icn.base + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[1] =
+ readl(context_icn.base + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[2] =
+ readl(context_icn.base + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+
+ context_icn.esram12_out_prio_reg =
+ readl(context_icn.base + NODE_ESRAM1_2_OUT_0_PRIORITY);
+
+ /* ESRAM3-4 */
+ context_icn.esram34_in_prio[0] =
+ readl(context_icn.base + NODE_ESRAM3_4_IN_0_PRIORITY);
+ context_icn.esram34_in_prio[1] =
+ readl(context_icn.base + NODE_ESRAM3_4_IN_1_PRIORITY);
+ context_icn.esram34_in_prio[2] =
+ readl(context_icn.base + NODE_ESRAM3_4_IN_2_PRIORITY);
+
+ context_icn.esram34_in_arb_lim[0] =
+ readl(context_icn.base + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[1] =
+ readl(context_icn.base + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[2] =
+ readl(context_icn.base + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+
+ context_icn.esram34_out_prio =
+ readl(context_icn.base + NODE_ESRAM3_4_OUT_0_PRIORITY);
+}
+
+/*
+ * Restore ICN configuration registers
+ */
+void u5500_context_restore_icn(void)
+{
+
+ /* hibw1 */
+ writel(context_icn.hibw1_esram_in_pri[0],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ writel(context_icn.hibw1_esram_in_pri[1],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+
+ writel(context_icn.hibw1_esram_in0_arb[0],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ writel(context_icn.hibw1_esram_in0_arb[1],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ writel(context_icn.hibw1_esram_in0_arb[2],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel(context_icn.hibw1_esram_in1_arb[0],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ writel(context_icn.hibw1_esram_in1_arb[1],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ writel(context_icn.hibw1_esram_in1_arb[2],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel(context_icn.hibw1_ddr_in_prio[0],
+ context_icn.base + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ writel(context_icn.hibw1_ddr_in_prio[1],
+ context_icn.base + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ writel(context_icn.hibw1_ddr_in_prio[2],
+ context_icn.base + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ writel(context_icn.hibw1_ddr_in_limit[0],
+ context_icn.base + NODE_HIBW1_DDR_IN_0_LIMIT);
+ writel(context_icn.hibw1_ddr_in_limit[1],
+ context_icn.base + NODE_HIBW1_DDR_IN_1_LIMIT);
+ writel(context_icn.hibw1_ddr_in_limit[2],
+ context_icn.base + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ writel(context_icn.hibw1_ddr_out_prio_reg,
+ context_icn.base + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ /* hibw2 */
+ writel(context_icn.hibw2_esram_in_pri[0],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ writel(context_icn.hibw2_esram_in_pri[1],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ writel(context_icn.hibw2_esram_in0_arblimit[0],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ writel(context_icn.hibw2_esram_in0_arblimit[1],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ writel(context_icn.hibw2_esram_in0_arblimit[2],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel(context_icn.hibw2_esram_in1_arblimit[0],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ writel(context_icn.hibw2_esram_in1_arblimit[1],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ writel(context_icn.hibw2_esram_in1_arblimit[2],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel(context_icn.hibw2_ddr_in_prio[0],
+ context_icn.base + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ writel(context_icn.hibw2_ddr_in_prio[1],
+ context_icn.base + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ writel(context_icn.hibw2_ddr_in_prio[2],
+ context_icn.base + NODE_HIBW2_DDR_IN_2_PRIORITY);
+ writel(context_icn.hibw2_ddr_in_prio[3],
+ context_icn.base + NODE_HIBW2_DDR_IN_3_PRIORITY);
+
+ writel(context_icn.hibw2_ddr_in_limit[0],
+ context_icn.base + NODE_HIBW2_DDR_IN_0_LIMIT);
+ writel(context_icn.hibw2_ddr_in_limit[1],
+ context_icn.base + NODE_HIBW2_DDR_IN_1_LIMIT);
+ writel(context_icn.hibw2_ddr_in_limit[2],
+ context_icn.base + NODE_HIBW2_DDR_IN_2_LIMIT);
+ writel(context_icn.hibw2_ddr_in_limit[3],
+ context_icn.base + NODE_HIBW2_DDR_IN_3_LIMIT);
+
+ writel(context_icn.hibw2_ddr_out_prio_reg,
+ context_icn.base + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ /* ESRAM0 */
+ writel(context_icn.esram_in_prio[0],
+ context_icn.base + NODE_ESRAM0_IN_0_PRIORITY);
+ writel(context_icn.esram_in_prio[1],
+ context_icn.base + NODE_ESRAM0_IN_1_PRIORITY);
+ writel(context_icn.esram_in_prio[2],
+ context_icn.base + NODE_ESRAM0_IN_2_PRIORITY);
+
+ writel(context_icn.esram_in_lim[0],
+ context_icn.base + NODE_ESRAM0_IN_0_LIMIT);
+ writel(context_icn.esram_in_lim[1],
+ context_icn.base + NODE_ESRAM0_IN_1_LIMIT);
+ writel(context_icn.esram_in_lim[2],
+ context_icn.base + NODE_ESRAM0_IN_2_LIMIT);
+
+ writel(context_icn.esram_out_prio_reg,
+ context_icn.base + NODE_ESRAM0_OUT_0_PRIORITY);
+
+ /* ESRAM1-2 */
+ writel(context_icn.esram12_in_prio[0],
+ context_icn.base + NODE_ESRAM1_2_IN_0_PRIORITY);
+ writel(context_icn.esram12_in_prio[1],
+ context_icn.base + NODE_ESRAM1_2_IN_1_PRIORITY);
+ writel(context_icn.esram12_in_prio[2],
+ context_icn.base + NODE_ESRAM1_2_IN_2_PRIORITY);
+
+ writel(context_icn.esram12_in_arb_lim[0],
+ context_icn.base + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ writel(context_icn.esram12_in_arb_lim[1],
+ context_icn.base + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ writel(context_icn.esram12_in_arb_lim[2],
+ context_icn.base + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+
+ writel(context_icn.esram12_out_prio_reg,
+ context_icn.base + NODE_ESRAM1_2_OUT_0_PRIORITY);
+
+ /* ESRAM3-4 */
+ writel(context_icn.esram34_in_prio[0],
+ context_icn.base + NODE_ESRAM3_4_IN_0_PRIORITY);
+ writel(context_icn.esram34_in_prio[1],
+ context_icn.base + NODE_ESRAM3_4_IN_1_PRIORITY);
+ writel(context_icn.esram34_in_prio[2],
+ context_icn.base + NODE_ESRAM3_4_IN_2_PRIORITY);
+
+ writel(context_icn.esram34_in_arb_lim[0],
+ context_icn.base + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ writel(context_icn.esram34_in_arb_lim[1],
+ context_icn.base + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ writel(context_icn.esram34_in_arb_lim[2],
+ context_icn.base + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+
+ writel(context_icn.esram34_out_prio,
+ context_icn.base + NODE_ESRAM3_4_OUT_0_PRIORITY);
+
+}
+
+void u5500_context_init(void)
+{
+ context_icn.base = ioremap(U5500_ICN_BASE, SZ_8K);
+}
diff --git a/arch/arm/mach-ux500/pm/context-db8500.c b/arch/arm/mach-ux500/pm/context-db8500.c
new file mode 100644
index 00000000000..3ba73e51a6d
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context-db8500.c
@@ -0,0 +1,456 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Sundar Iyer for ST-Ericsson
+ *
+ */
+
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/context.h>
+
+/*
+ * ST-Interconnect context
+ */
+
+/* priority, bw limiter register offsets */
+#define NODE_HIBW1_ESRAM_IN_0_PRIORITY 0x00
+#define NODE_HIBW1_ESRAM_IN_1_PRIORITY 0x04
+#define NODE_HIBW1_ESRAM_IN_2_PRIORITY 0x08
+#define NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT 0x24
+#define NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT 0x28
+#define NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT 0x2C
+#define NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT 0x30
+#define NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT 0x34
+#define NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT 0x38
+#define NODE_HIBW1_ESRAM_IN_2_ARB_1_LIMIT 0x3C
+#define NODE_HIBW1_ESRAM_IN_2_ARB_2_LIMIT 0x40
+#define NODE_HIBW1_ESRAM_IN_2_ARB_3_LIMIT 0x44
+#define NODE_HIBW1_DDR_IN_0_PRIORITY 0x400
+#define NODE_HIBW1_DDR_IN_1_PRIORITY 0x404
+#define NODE_HIBW1_DDR_IN_2_PRIORITY 0x408
+#define NODE_HIBW1_DDR_IN_0_LIMIT 0x424
+#define NODE_HIBW1_DDR_IN_1_LIMIT 0x428
+#define NODE_HIBW1_DDR_IN_2_LIMIT 0x42C
+#define NODE_HIBW1_DDR_OUT_0_PRIORITY 0x430
+#define NODE_HIBW2_ESRAM_IN_0_PRIORITY 0x800
+#define NODE_HIBW2_ESRAM_IN_1_PRIORITY 0x804
+#define NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT 0x818
+#define NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT 0x81C
+#define NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT 0x820
+#define NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT 0x824
+#define NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT 0x828
+#define NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT 0x82C
+#define NODE_HIBW2_DDR_IN_0_PRIORITY 0xC00
+#define NODE_HIBW2_DDR_IN_1_PRIORITY 0xC04
+#define NODE_HIBW2_DDR_IN_2_PRIORITY 0xC08
+
+#define NODE_HIBW2_DDR_IN_0_LIMIT 0xC24
+#define NODE_HIBW2_DDR_IN_1_LIMIT 0xC28
+#define NODE_HIBW2_DDR_IN_2_LIMIT 0xC2C
+#define NODE_HIBW2_DDR_OUT_0_PRIORITY 0xC30
+
+/*
+ * Note the following addresses are presented in
+ * db8500 design spec v3.1 and v3.3, table 10.
+ * But their addresses are not the same as in the
+ * description. The addresses in the description
+ * of each registers are correct.
+ * NODE_HIBW2_DDR_IN_3_LIMIT is only present in v1.
+ *
+ * Faulty registers addresses in table 10:
+ * NODE_HIBW2_DDR_IN_2_LIMIT 0xC38
+ * NODE_HIBW2_DDR_IN_3_LIMIT 0xC3C
+ * NODE_HIBW2_DDR_OUT_0_PRIORITY 0xC40
+ */
+
+#define NODE_ESRAM0_IN_0_PRIORITY 0x1000
+#define NODE_ESRAM0_IN_1_PRIORITY 0x1004
+#define NODE_ESRAM0_IN_2_PRIORITY 0x1008
+#define NODE_ESRAM0_IN_3_PRIORITY 0x100C
+#define NODE_ESRAM0_IN_0_LIMIT 0x1030
+#define NODE_ESRAM0_IN_1_LIMIT 0x1034
+#define NODE_ESRAM0_IN_2_LIMIT 0x1038
+#define NODE_ESRAM0_IN_3_LIMIT 0x103C
+/* common */
+#define NODE_ESRAM1_2_IN_0_PRIORITY 0x1400
+#define NODE_ESRAM1_2_IN_1_PRIORITY 0x1404
+#define NODE_ESRAM1_2_IN_2_PRIORITY 0x1408
+#define NODE_ESRAM1_2_IN_3_PRIORITY 0x140C
+#define NODE_ESRAM1_2_IN_0_ARB_1_LIMIT 0x1430
+#define NODE_ESRAM1_2_IN_0_ARB_2_LIMIT 0x1434
+#define NODE_ESRAM1_2_IN_1_ARB_1_LIMIT 0x1438
+#define NODE_ESRAM1_2_IN_1_ARB_2_LIMIT 0x143C
+#define NODE_ESRAM1_2_IN_2_ARB_1_LIMIT 0x1440
+#define NODE_ESRAM1_2_IN_2_ARB_2_LIMIT 0x1444
+#define NODE_ESRAM1_2_IN_3_ARB_1_LIMIT 0x1448
+#define NODE_ESRAM1_2_IN_3_ARB_2_LIMIT 0x144C
+
+#define NODE_ESRAM3_4_IN_0_PRIORITY 0x1800
+#define NODE_ESRAM3_4_IN_1_PRIORITY 0x1804
+#define NODE_ESRAM3_4_IN_2_PRIORITY 0x1808
+#define NODE_ESRAM3_4_IN_3_PRIORITY 0x180C
+#define NODE_ESRAM3_4_IN_0_ARB_1_LIMIT 0x1830
+#define NODE_ESRAM3_4_IN_0_ARB_2_LIMIT 0x1834
+#define NODE_ESRAM3_4_IN_1_ARB_1_LIMIT 0x1838
+#define NODE_ESRAM3_4_IN_1_ARB_2_LIMIT 0x183C
+#define NODE_ESRAM3_4_IN_2_ARB_1_LIMIT 0x1840
+#define NODE_ESRAM3_4_IN_2_ARB_2_LIMIT 0x1844
+#define NODE_ESRAM3_4_IN_3_ARB_1_LIMIT 0x1848
+#define NODE_ESRAM3_4_IN_3_ARB_2_LIMIT 0x184C
+
+static struct {
+ void __iomem *base;
+ u32 hibw1_esram_in_pri[3];
+ u32 hibw1_esram_in0_arb[3];
+ u32 hibw1_esram_in1_arb[3];
+ u32 hibw1_esram_in2_arb[3];
+ u32 hibw1_ddr_in_prio[3];
+ u32 hibw1_ddr_in_limit[3];
+ u32 hibw1_ddr_out_prio;
+
+ /* HiBw2 node registers */
+ u32 hibw2_esram_in_pri[2];
+ u32 hibw2_esram_in0_arblimit[3];
+ u32 hibw2_esram_in1_arblimit[3];
+ u32 hibw2_ddr_in_prio[4];
+ u32 hibw2_ddr_in_limit[4];
+ u32 hibw2_ddr_out_prio;
+
+ /* ESRAM node registers */
+ u32 esram_in_prio[4];
+ u32 esram_in_lim[4];
+ u32 esram0_in_prio[4];
+ u32 esram0_in_lim[4];
+ u32 esram12_in_prio[4];
+ u32 esram12_in_arb_lim[8];
+ u32 esram34_in_prio[4];
+ u32 esram34_in_arb_lim[8];
+} context_icn;
+
+/**
+ * u8500_context_save_icn() - save ICN context
+ *
+ */
+void u8500_context_save_icn(void)
+{
+ void __iomem *b = context_icn.base;
+
+ context_icn.hibw1_esram_in_pri[0] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw1_esram_in_pri[1] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+ context_icn.hibw1_esram_in_pri[2] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_2_PRIORITY);
+
+ context_icn.hibw1_esram_in0_arb[0] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in0_arb[1] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in0_arb[2] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw1_esram_in1_arb[0] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in1_arb[1] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in1_arb[2] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw1_esram_in2_arb[0] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_2_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in2_arb[1] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_2_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in2_arb[2] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_2_ARB_3_LIMIT);
+
+ context_icn.hibw1_ddr_in_prio[0] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[1] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[2] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ context_icn.hibw1_ddr_in_limit[0] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_0_LIMIT);
+ context_icn.hibw1_ddr_in_limit[1] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_1_LIMIT);
+ context_icn.hibw1_ddr_in_limit[2] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ context_icn.hibw1_ddr_out_prio =
+ readl_relaxed(b + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ context_icn.hibw2_esram_in_pri[0] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw2_esram_in_pri[1] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ context_icn.hibw2_esram_in0_arblimit[0] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[1] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[2] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw2_esram_in1_arblimit[0] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[1] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[2] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw2_ddr_in_prio[0] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[1] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[2] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_2_PRIORITY);
+
+ context_icn.hibw2_ddr_in_limit[0] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_0_LIMIT);
+ context_icn.hibw2_ddr_in_limit[1] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_1_LIMIT);
+
+ context_icn.hibw2_ddr_in_limit[2] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_2_LIMIT);
+
+ context_icn.hibw2_ddr_out_prio =
+ readl_relaxed(b + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ context_icn.esram0_in_prio[0] =
+ readl_relaxed(b + NODE_ESRAM0_IN_0_PRIORITY);
+ context_icn.esram0_in_prio[1] =
+ readl_relaxed(b + NODE_ESRAM0_IN_1_PRIORITY);
+ context_icn.esram0_in_prio[2] =
+ readl_relaxed(b + NODE_ESRAM0_IN_2_PRIORITY);
+ context_icn.esram0_in_prio[3] =
+ readl_relaxed(b + NODE_ESRAM0_IN_3_PRIORITY);
+
+ context_icn.esram0_in_lim[0] =
+ readl_relaxed(b + NODE_ESRAM0_IN_0_LIMIT);
+ context_icn.esram0_in_lim[1] =
+ readl_relaxed(b + NODE_ESRAM0_IN_1_LIMIT);
+ context_icn.esram0_in_lim[2] =
+ readl_relaxed(b + NODE_ESRAM0_IN_2_LIMIT);
+ context_icn.esram0_in_lim[3] =
+ readl_relaxed(b + NODE_ESRAM0_IN_3_LIMIT);
+
+ context_icn.esram12_in_prio[0] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_0_PRIORITY);
+ context_icn.esram12_in_prio[1] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_1_PRIORITY);
+ context_icn.esram12_in_prio[2] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_2_PRIORITY);
+ context_icn.esram12_in_prio[3] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_3_PRIORITY);
+
+ context_icn.esram12_in_arb_lim[0] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[1] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_0_ARB_2_LIMIT);
+ context_icn.esram12_in_arb_lim[2] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[3] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_1_ARB_2_LIMIT);
+ context_icn.esram12_in_arb_lim[4] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[5] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_2_ARB_2_LIMIT);
+ context_icn.esram12_in_arb_lim[6] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_3_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[7] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_3_ARB_2_LIMIT);
+
+ context_icn.esram34_in_prio[0] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_0_PRIORITY);
+ context_icn.esram34_in_prio[1] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_1_PRIORITY);
+ context_icn.esram34_in_prio[2] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_2_PRIORITY);
+ context_icn.esram34_in_prio[3] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_3_PRIORITY);
+
+ context_icn.esram34_in_arb_lim[0] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[1] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_0_ARB_2_LIMIT);
+ context_icn.esram34_in_arb_lim[2] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[3] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_1_ARB_2_LIMIT);
+ context_icn.esram34_in_arb_lim[4] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[5] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_2_ARB_2_LIMIT);
+ context_icn.esram34_in_arb_lim[6] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_3_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[7] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_3_ARB_2_LIMIT);
+}
+
+/**
+ * u8500_context_restore_icn() - restore ICN context
+ *
+ */
+void u8500_context_restore_icn(void)
+{
+ void __iomem *b = context_icn.base;
+
+ writel_relaxed(context_icn.hibw1_esram_in_pri[0],
+ b + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw1_esram_in_pri[1],
+ b + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw1_esram_in_pri[2],
+ b + NODE_HIBW1_ESRAM_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[0],
+ b + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[1],
+ b + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[2],
+ b + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[0],
+ b + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[1],
+ b + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[2],
+ b + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_esram_in2_arb[0],
+ b + NODE_HIBW1_ESRAM_IN_2_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in2_arb[1],
+ b + NODE_HIBW1_ESRAM_IN_2_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in2_arb[2],
+ b + NODE_HIBW1_ESRAM_IN_2_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[0],
+ b + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[1],
+ b + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[2],
+ b + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[0],
+ b + NODE_HIBW1_DDR_IN_0_LIMIT);
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[1],
+ b + NODE_HIBW1_DDR_IN_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[2],
+ b + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_ddr_out_prio,
+ b + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ writel_relaxed(context_icn.hibw2_esram_in_pri[0],
+ b + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw2_esram_in_pri[1],
+ b + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[0],
+ b + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[1],
+ b + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[2],
+ b + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[0],
+ b + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[1],
+ b + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[2],
+ b + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[0],
+ b + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[1],
+ b + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[2],
+ b + NODE_HIBW2_DDR_IN_2_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[0],
+ b + NODE_HIBW2_DDR_IN_0_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[1],
+ b + NODE_HIBW2_DDR_IN_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[2],
+ b + NODE_HIBW2_DDR_IN_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_out_prio,
+ b + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ writel_relaxed(context_icn.esram0_in_prio[0],
+ b + NODE_ESRAM0_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram0_in_prio[1],
+ b + NODE_ESRAM0_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram0_in_prio[2],
+ b + NODE_ESRAM0_IN_2_PRIORITY);
+ writel_relaxed(context_icn.esram0_in_prio[3],
+ b + NODE_ESRAM0_IN_3_PRIORITY);
+
+ writel_relaxed(context_icn.esram0_in_lim[0],
+ b + NODE_ESRAM0_IN_0_LIMIT);
+ writel_relaxed(context_icn.esram0_in_lim[1],
+ b + NODE_ESRAM0_IN_1_LIMIT);
+ writel_relaxed(context_icn.esram0_in_lim[2],
+ b + NODE_ESRAM0_IN_2_LIMIT);
+ writel_relaxed(context_icn.esram0_in_lim[3],
+ b + NODE_ESRAM0_IN_3_LIMIT);
+
+ writel_relaxed(context_icn.esram12_in_prio[0],
+ b + NODE_ESRAM1_2_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[1],
+ b + NODE_ESRAM1_2_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[2],
+ b + NODE_ESRAM1_2_IN_2_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[3],
+ b + NODE_ESRAM1_2_IN_3_PRIORITY);
+
+ writel_relaxed(context_icn.esram12_in_arb_lim[0],
+ b + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[1],
+ b + NODE_ESRAM1_2_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[2],
+ b + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[3],
+ b + NODE_ESRAM1_2_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[4],
+ b + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[5],
+ b + NODE_ESRAM1_2_IN_2_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[6],
+ b + NODE_ESRAM1_2_IN_3_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[7],
+ b + NODE_ESRAM1_2_IN_3_ARB_2_LIMIT);
+
+ writel_relaxed(context_icn.esram34_in_prio[0],
+ b + NODE_ESRAM3_4_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[1],
+ b + NODE_ESRAM3_4_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[2],
+ b + NODE_ESRAM3_4_IN_2_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[3],
+ b + NODE_ESRAM3_4_IN_3_PRIORITY);
+
+ writel_relaxed(context_icn.esram34_in_arb_lim[0],
+ b + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[1],
+ b + NODE_ESRAM3_4_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[2],
+ b + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[3],
+ b + NODE_ESRAM3_4_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[4],
+ b + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[5],
+ b + NODE_ESRAM3_4_IN_2_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[6],
+ b + NODE_ESRAM3_4_IN_3_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[7],
+ b + NODE_ESRAM3_4_IN_3_ARB_2_LIMIT);
+}
+
+void u8500_context_init(void)
+{
+ context_icn.base = ioremap(U8500_ICN_BASE, SZ_8K);
+}
diff --git a/arch/arm/mach-ux500/pm/context.c b/arch/arm/mach-ux500/pm/context.c
new file mode 100644
index 00000000000..1fa41b5cfca
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context.c
@@ -0,0 +1,978 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>,
+ * Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com>,
+ * Sundar Iyer for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/nomadik.h>
+
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <mach/pm.h>
+#include <mach/context.h>
+
+#include <asm/hardware/gic.h>
+#include <asm/smp_twd.h>
+
+#include "scu.h"
+#include "../product.h"
+
+#define GPIO_NUM_BANKS 9
+#define GPIO_NUM_SAVE_REGISTERS 7
+
+/*
+ * TODO:
+ * - Use the "UX500*"-macros instead where possible
+ */
+
+#define U8500_BACKUPRAM_SIZE SZ_64K
+
+#define U8500_PUBLIC_BOOT_ROM_BASE (U8500_BOOT_ROM_BASE + 0x17000)
+#define U5500_PUBLIC_BOOT_ROM_BASE (U5500_BOOT_ROM_BASE + 0x18000)
+
+/*
+ * Special dedicated addresses in backup RAM. The 5500 addresses are identical
+ * to the 8500 ones.
+ */
+#define U8500_EXT_RAM_LOC_BACKUPRAM_ADDR 0x80151FDC
+#define U8500_CPU0_CP15_CR_BACKUPRAM_ADDR 0x80151F80
+#define U8500_CPU1_CP15_CR_BACKUPRAM_ADDR 0x80151FA0
+
+#define U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR 0x80151FD8
+#define U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR 0x80151FE0
+
+#define GIC_DIST_ENABLE_NS 0x0
+
+/* 32 interrupts fits in 4 bytes */
+#define GIC_DIST_ENABLE_SET_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - \
+ IRQ_SHPI_START) / 32)
+#define GIC_DIST_ENABLE_SET_CPU_NUM (IRQ_SHPI_START / 32)
+#define GIC_DIST_ENABLE_SET_SPI0 GIC_DIST_ENABLE_SET
+#define GIC_DIST_ENABLE_SET_SPI32 (GIC_DIST_ENABLE_SET + IRQ_SHPI_START / 8)
+
+#define GIC_DIST_ENABLE_CLEAR_0 GIC_DIST_ENABLE_CLEAR
+#define GIC_DIST_ENABLE_CLEAR_32 (GIC_DIST_ENABLE_CLEAR + 4)
+#define GIC_DIST_ENABLE_CLEAR_64 (GIC_DIST_ENABLE_CLEAR + 8)
+#define GIC_DIST_ENABLE_CLEAR_96 (GIC_DIST_ENABLE_CLEAR + 12)
+#define GIC_DIST_ENABLE_CLEAR_128 (GIC_DIST_ENABLE_CLEAR + 16)
+
+#define GIC_DIST_PRI_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - IRQ_SHPI_START) / 4)
+#define GIC_DIST_PRI_CPU_NUM (IRQ_SHPI_START / 4)
+#define GIC_DIST_PRI_SPI0 GIC_DIST_PRI
+#define GIC_DIST_PRI_SPI32 (GIC_DIST_PRI + IRQ_SHPI_START)
+
+#define GIC_DIST_SPI_TARGET_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - \
+ IRQ_SHPI_START) / 4)
+#define GIC_DIST_SPI_TARGET_CPU_NUM (IRQ_SHPI_START / 4)
+#define GIC_DIST_SPI_TARGET_SPI0 GIC_DIST_TARGET
+#define GIC_DIST_SPI_TARGET_SPI32 (GIC_DIST_TARGET + IRQ_SHPI_START)
+
+/* 16 interrupts per 4 bytes */
+#define GIC_DIST_CONFIG_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - IRQ_SHPI_START) \
+ / 16)
+#define GIC_DIST_CONFIG_CPU_NUM (IRQ_SHPI_START / 16)
+#define GIC_DIST_CONFIG_SPI0 GIC_DIST_CONFIG
+#define GIC_DIST_CONFIG_SPI32 (GIC_DIST_CONFIG + IRQ_SHPI_START / 4)
+
+/* TODO! Move STM reg offsets to suitable place */
+#define STM_CR_OFFSET 0x00
+#define STM_MMC_OFFSET 0x08
+#define STM_TER_OFFSET 0x10
+
+#define TPIU_PORT_SIZE 0x4
+#define TPIU_TRIGGER_COUNTER 0x104
+#define TPIU_TRIGGER_MULTIPLIER 0x108
+#define TPIU_CURRENT_TEST_PATTERN 0x204
+#define TPIU_TEST_PATTERN_REPEAT 0x208
+#define TPIU_FORMATTER 0x304
+#define TPIU_FORMATTER_SYNC 0x308
+#define TPIU_LOCK_ACCESS_REGISTER 0xFB0
+
+#define TPIU_UNLOCK_CODE 0xc5acce55
+
+#define SCU_FILTER_STARTADDR 0x40
+#define SCU_FILTER_ENDADDR 0x44
+#define SCU_ACCESS_CTRL_SAC 0x50
+
+/*
+ * Periph clock cluster context
+ */
+#define PRCC_BCK_EN 0x00
+#define PRCC_KCK_EN 0x08
+#define PRCC_BCK_STATUS 0x10
+#define PRCC_KCK_STATUS 0x14
+
+/* The context of the Trace Port Interface Unit (TPIU) */
+static struct {
+ void __iomem *base;
+ u32 port_size;
+ u32 trigger_counter;
+ u32 trigger_multiplier;
+ u32 current_test_pattern;
+ u32 test_pattern_repeat;
+ u32 formatter;
+ u32 formatter_sync;
+} context_tpiu;
+
+static struct {
+ void __iomem *base;
+ u32 cr;
+ u32 mmc;
+ u32 ter;
+} context_stm_ape;
+
+struct context_gic_cpu {
+ void __iomem *base;
+ u32 ctrl;
+ u32 primask;
+ u32 binpoint;
+};
+static DEFINE_PER_CPU(struct context_gic_cpu, context_gic_cpu);
+
+static struct {
+ void __iomem *base;
+ u32 ns;
+ u32 enable_set[GIC_DIST_ENABLE_SET_COMMON_NUM]; /* IRQ 32 to 160 */
+ u32 priority_level[GIC_DIST_PRI_COMMON_NUM];
+ u32 spi_target[GIC_DIST_SPI_TARGET_COMMON_NUM];
+ u32 config[GIC_DIST_CONFIG_COMMON_NUM];
+} context_gic_dist_common;
+
+struct context_gic_dist_cpu {
+ void __iomem *base;
+ u32 enable_set[GIC_DIST_ENABLE_SET_CPU_NUM]; /* IRQ 0 to 31 */
+ u32 priority_level[GIC_DIST_PRI_CPU_NUM];
+ u32 spi_target[GIC_DIST_SPI_TARGET_CPU_NUM];
+ u32 config[GIC_DIST_CONFIG_CPU_NUM];
+};
+static DEFINE_PER_CPU(struct context_gic_dist_cpu, context_gic_dist_cpu);
+
+static struct {
+ void __iomem *base;
+ u32 ctrl;
+ u32 cpu_pwrstatus;
+ u32 inv_all_nonsecure;
+ u32 filter_start_addr;
+ u32 filter_end_addr;
+ u32 access_ctrl_sac;
+} context_scu;
+
+#define UX500_NR_PRCC_BANKS 5
+static struct {
+ void __iomem *base;
+ struct clk *clk;
+ u32 bus_clk;
+ u32 kern_clk;
+} context_prcc[UX500_NR_PRCC_BANKS];
+
+static u32 backup_sram_storage[NR_CPUS] = {
+ IO_ADDRESS(U8500_CPU0_CP15_CR_BACKUPRAM_ADDR),
+ IO_ADDRESS(U8500_CPU1_CP15_CR_BACKUPRAM_ADDR),
+};
+
+static u32 gpio_bankaddr[GPIO_NUM_BANKS] = {IO_ADDRESS(U8500_GPIOBANK0_BASE),
+ IO_ADDRESS(U8500_GPIOBANK1_BASE),
+ IO_ADDRESS(U8500_GPIOBANK2_BASE),
+ IO_ADDRESS(U8500_GPIOBANK3_BASE),
+ IO_ADDRESS(U8500_GPIOBANK4_BASE),
+ IO_ADDRESS(U8500_GPIOBANK5_BASE),
+ IO_ADDRESS(U8500_GPIOBANK6_BASE),
+ IO_ADDRESS(U8500_GPIOBANK7_BASE),
+ IO_ADDRESS(U8500_GPIOBANK8_BASE)
+};
+
+static u32 gpio_save[GPIO_NUM_BANKS][GPIO_NUM_SAVE_REGISTERS];
+
+/*
+ * Stacks and stack pointers
+ */
+static DEFINE_PER_CPU(u32[128], varm_registers_backup_stack);
+static DEFINE_PER_CPU(u32 *, varm_registers_pointer);
+
+static DEFINE_PER_CPU(u32[128], varm_cp15_backup_stack);
+static DEFINE_PER_CPU(u32 *, varm_cp15_pointer);
+
+
+static ATOMIC_NOTIFIER_HEAD(context_ape_notifier_list);
+static ATOMIC_NOTIFIER_HEAD(context_arm_notifier_list);
+
+/*
+ * Register a simple callback for handling vape context save/restore
+ */
+int context_ape_notifier_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&context_ape_notifier_list, nb);
+}
+EXPORT_SYMBOL(context_ape_notifier_register);
+
+/*
+ * Remove a previously registered callback
+ */
+int context_ape_notifier_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&context_ape_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL(context_ape_notifier_unregister);
+
+/*
+ * Register a simple callback for handling varm context save/restore
+ */
+int context_arm_notifier_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&context_arm_notifier_list, nb);
+}
+EXPORT_SYMBOL(context_arm_notifier_register);
+
+/*
+ * Remove a previously registered callback
+ */
+int context_arm_notifier_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&context_arm_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL(context_arm_notifier_unregister);
+
+static void save_prcc(void)
+{
+ int i;
+
+ for (i = 0; i < UX500_NR_PRCC_BANKS; i++) {
+ clk_enable(context_prcc[i].clk);
+
+ context_prcc[i].bus_clk =
+ readl(context_prcc[i].base + PRCC_BCK_STATUS);
+ context_prcc[i].kern_clk =
+ readl(context_prcc[i].base + PRCC_KCK_STATUS);
+
+ clk_disable(context_prcc[i].clk);
+ }
+}
+
+static void restore_prcc(void)
+{
+ int i;
+
+ for (i = 0; i < UX500_NR_PRCC_BANKS; i++) {
+ clk_enable(context_prcc[i].clk);
+
+ writel(context_prcc[i].bus_clk,
+ context_prcc[i].base + PRCC_BCK_EN);
+ writel(context_prcc[i].kern_clk,
+ context_prcc[i].base + PRCC_KCK_EN);
+
+ clk_disable(context_prcc[i].clk);
+ }
+}
+
+static void save_stm_ape(void)
+{
+ /*
+ * TODO: Check with PRCMU developers how STM is handled by PRCMU
+ * firmware. According to DB5500 design spec there is a "flush"
+ * mechanism supposed to be used by the PRCMU before power down,
+ * PRCMU fw might save/restore the following three registers
+ * at the same time.
+ */
+ context_stm_ape.cr = readl(context_stm_ape.base +
+ STM_CR_OFFSET);
+ context_stm_ape.mmc = readl(context_stm_ape.base +
+ STM_MMC_OFFSET);
+ context_stm_ape.ter = readl(context_stm_ape.base +
+ STM_TER_OFFSET);
+}
+
+static void restore_stm_ape(void)
+{
+ writel(context_stm_ape.ter,
+ context_stm_ape.base + STM_TER_OFFSET);
+ writel(context_stm_ape.mmc,
+ context_stm_ape.base + STM_MMC_OFFSET);
+ writel(context_stm_ape.cr,
+ context_stm_ape.base + STM_CR_OFFSET);
+}
+
+static bool inline tpiu_clocked(void)
+{
+ return ux500_jtag_enabled();
+}
+
+/*
+ * Save the context of the Trace Port Interface Unit (TPIU).
+ * Saving/restoring is needed for the PTM tracing to work together
+ * with the sleep states ApSleep and ApDeepSleep.
+ */
+static void save_tpiu(void)
+{
+ if (!tpiu_clocked())
+ return;
+
+ context_tpiu.port_size = readl(context_tpiu.base +
+ TPIU_PORT_SIZE);
+ context_tpiu.trigger_counter = readl(context_tpiu.base +
+ TPIU_TRIGGER_COUNTER);
+ context_tpiu.trigger_multiplier = readl(context_tpiu.base +
+ TPIU_TRIGGER_MULTIPLIER);
+ context_tpiu.current_test_pattern = readl(context_tpiu.base +
+ TPIU_CURRENT_TEST_PATTERN);
+ context_tpiu.test_pattern_repeat = readl(context_tpiu.base +
+ TPIU_TEST_PATTERN_REPEAT);
+ context_tpiu.formatter = readl(context_tpiu.base +
+ TPIU_FORMATTER);
+ context_tpiu.formatter_sync = readl(context_tpiu.base +
+ TPIU_FORMATTER_SYNC);
+}
+
+/*
+ * Restore the context of the Trace Port Interface Unit (TPIU).
+ * Saving/restoring is needed for the PTM tracing to work together
+ * with the sleep states ApSleep and ApDeepSleep.
+ */
+static void restore_tpiu(void)
+{
+ if (!tpiu_clocked())
+ return;
+
+ writel(TPIU_UNLOCK_CODE,
+ context_tpiu.base + TPIU_LOCK_ACCESS_REGISTER);
+
+ writel(context_tpiu.port_size,
+ context_tpiu.base + TPIU_PORT_SIZE);
+ writel(context_tpiu.trigger_counter,
+ context_tpiu.base + TPIU_TRIGGER_COUNTER);
+ writel(context_tpiu.trigger_multiplier,
+ context_tpiu.base + TPIU_TRIGGER_MULTIPLIER);
+ writel(context_tpiu.current_test_pattern,
+ context_tpiu.base + TPIU_CURRENT_TEST_PATTERN);
+ writel(context_tpiu.test_pattern_repeat,
+ context_tpiu.base + TPIU_TEST_PATTERN_REPEAT);
+ writel(context_tpiu.formatter,
+ context_tpiu.base + TPIU_FORMATTER);
+ writel(context_tpiu.formatter_sync,
+ context_tpiu.base + TPIU_FORMATTER_SYNC);
+}
+
+/*
+ * Save GIC CPU IF registers
+ *
+ * This is per cpu so it needs to be called for each one.
+ */
+
+static void save_gic_if_cpu(struct context_gic_cpu *c_gic_cpu)
+{
+ c_gic_cpu->ctrl = readl_relaxed(c_gic_cpu->base + GIC_CPU_CTRL);
+ c_gic_cpu->primask = readl_relaxed(c_gic_cpu->base + GIC_CPU_PRIMASK);
+ c_gic_cpu->binpoint = readl_relaxed(c_gic_cpu->base + GIC_CPU_BINPOINT);
+}
+
+/*
+ * Restore GIC CPU IF registers
+ *
+ * This is per cpu so it needs to be called for each one.
+ */
+static void restore_gic_if_cpu(struct context_gic_cpu *c_gic_cpu)
+{
+ writel_relaxed(c_gic_cpu->ctrl, c_gic_cpu->base + GIC_CPU_CTRL);
+ writel_relaxed(c_gic_cpu->primask, c_gic_cpu->base + GIC_CPU_PRIMASK);
+ writel_relaxed(c_gic_cpu->binpoint, c_gic_cpu->base + GIC_CPU_BINPOINT);
+}
+
+/*
+ * Save GIC Distributor Common registers
+ *
+ * This context is common. Only one CPU needs to call.
+ *
+ * Save SPI (Shared Peripheral Interrupt) settings, IRQ 32-159.
+ */
+
+static void save_gic_dist_common(void)
+{
+ int i;
+
+ context_gic_dist_common.ns = readl_relaxed(context_gic_dist_common.base
+ + GIC_DIST_ENABLE_NS);
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_COMMON_NUM; i++)
+ context_gic_dist_common.enable_set[i] =
+ readl_relaxed(context_gic_dist_common.base +
+ GIC_DIST_ENABLE_SET_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_COMMON_NUM; i++)
+ context_gic_dist_common.priority_level[i] =
+ readl_relaxed(context_gic_dist_common.base +
+ GIC_DIST_PRI_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_COMMON_NUM; i++)
+ context_gic_dist_common.spi_target[i] =
+ readl_relaxed(context_gic_dist_common.base +
+ GIC_DIST_SPI_TARGET_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_CONFIG_COMMON_NUM; i++)
+ context_gic_dist_common.config[i] =
+ readl_relaxed(context_gic_dist_common.base +
+ GIC_DIST_CONFIG_SPI32 + i * 4);
+}
+
+/*
+ * Restore GIC Distributor Common registers
+ *
+ * This context is common. Only one CPU needs to call.
+ *
+ * Save SPI (Shared Peripheral Interrupt) settings, IRQ 32-159.
+ */
+static void restore_gic_dist_common(void)
+{
+
+ int i;
+
+ for (i = 0; i < GIC_DIST_CONFIG_COMMON_NUM; i++)
+ writel_relaxed(context_gic_dist_common.config[i],
+ context_gic_dist_common.base +
+ GIC_DIST_CONFIG_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_COMMON_NUM; i++)
+ writel_relaxed(context_gic_dist_common.spi_target[i],
+ context_gic_dist_common.base +
+ GIC_DIST_SPI_TARGET_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_COMMON_NUM; i++)
+ writel_relaxed(context_gic_dist_common.priority_level[i],
+ context_gic_dist_common.base +
+ GIC_DIST_PRI_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_COMMON_NUM; i++)
+ writel_relaxed(context_gic_dist_common.enable_set[i],
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_SET_SPI32 + i * 4);
+
+ writel_relaxed(context_gic_dist_common.ns,
+ context_gic_dist_common.base + GIC_DIST_ENABLE_NS);
+}
+
+
+
+/*
+ * Save GIC Dist CPU registers
+ *
+ * This needs to be called by all cpu:s which will not call
+ * save_gic_dist_common(). Only the registers of the GIC which are
+ * banked will be saved.
+ */
+static void save_gic_dist_cpu(struct context_gic_dist_cpu *c_gic)
+{
+ int i;
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_CPU_NUM; i++)
+ c_gic->enable_set[i] =
+ readl_relaxed(c_gic->base +
+ GIC_DIST_ENABLE_SET_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_CPU_NUM; i++)
+ c_gic->priority_level[i] =
+ readl_relaxed(c_gic->base +
+ GIC_DIST_PRI_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_CPU_NUM; i++)
+ c_gic->spi_target[i] =
+ readl_relaxed(c_gic->base +
+ GIC_DIST_SPI_TARGET_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_CONFIG_CPU_NUM; i++)
+ c_gic->config[i] =
+ readl_relaxed(c_gic->base +
+ GIC_DIST_CONFIG_SPI0 + i * 4);
+}
+
+/*
+ * Restore GIC Dist CPU registers
+ *
+ * This needs to be called by all cpu:s which will not call
+ * restore_gic_dist_common(). Only the registers of the GIC which are
+ * banked will be saved.
+ */
+static void restore_gic_dist_cpu(struct context_gic_dist_cpu *c_gic)
+{
+
+ int i;
+
+ for (i = 0; i < GIC_DIST_CONFIG_CPU_NUM; i++)
+ writel_relaxed(c_gic->config[i],
+ c_gic->base +
+ GIC_DIST_CONFIG_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_CPU_NUM; i++)
+ writel_relaxed(c_gic->spi_target[i],
+ c_gic->base +
+ GIC_DIST_SPI_TARGET_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_CPU_NUM; i++)
+ writel_relaxed(c_gic->priority_level[i],
+ c_gic->base +
+ GIC_DIST_PRI_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_CPU_NUM; i++)
+ writel_relaxed(c_gic->enable_set[i],
+ c_gic->base +
+ GIC_DIST_ENABLE_SET_SPI0 + i * 4);
+}
+
+/*
+ * Disable interrupts that are not necessary
+ * to have turned on during ApDeepSleep.
+ */
+void context_gic_dist_disable_unneeded_irqs(void)
+{
+
+ writel(0xffffffff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_0);
+
+ writel(0xffffffff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_32);
+
+ /* Leave PRCMU IRQ 0 and 1 enabled */
+ writel(0xffff3fff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_64);
+
+ writel(0xffffffff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_96);
+
+ writel(0xffffffff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_128);
+
+}
+
+static void save_scu(void)
+{
+ context_scu.ctrl =
+ readl_relaxed(context_scu.base + SCU_CTRL);
+ context_scu.cpu_pwrstatus =
+ readl_relaxed(context_scu.base + SCU_CPU_STATUS);
+ context_scu.inv_all_nonsecure =
+ readl_relaxed(context_scu.base + SCU_INVALIDATE);
+ context_scu.filter_start_addr =
+ readl_relaxed(context_scu.base + SCU_FILTER_STARTADDR);
+ context_scu.filter_end_addr =
+ readl_relaxed(context_scu.base + SCU_FILTER_ENDADDR);
+ context_scu.access_ctrl_sac =
+ readl_relaxed(context_scu.base + SCU_ACCESS_CTRL_SAC);
+}
+
+static void restore_scu(void)
+{
+ writel_relaxed(context_scu.ctrl,
+ context_scu.base + SCU_CTRL);
+ writel_relaxed(context_scu.cpu_pwrstatus,
+ context_scu.base + SCU_CPU_STATUS);
+ writel_relaxed(context_scu.inv_all_nonsecure,
+ context_scu.base + SCU_INVALIDATE);
+ writel_relaxed(context_scu.filter_start_addr,
+ context_scu.base + SCU_FILTER_STARTADDR);
+ writel_relaxed(context_scu.filter_end_addr,
+ context_scu.base + SCU_FILTER_ENDADDR);
+ writel_relaxed(context_scu.access_ctrl_sac,
+ context_scu.base + SCU_ACCESS_CTRL_SAC);
+}
+
+/*
+ * Save VAPE context
+ */
+void context_vape_save(void)
+{
+ atomic_notifier_call_chain(&context_ape_notifier_list,
+ CONTEXT_APE_SAVE, NULL);
+
+ if (cpu_is_u5500())
+ u5500_context_save_icn();
+ if (cpu_is_u8500())
+ u8500_context_save_icn();
+
+ save_stm_ape();
+
+ save_tpiu();
+
+ save_prcc();
+
+}
+
+/*
+ * Restore VAPE context
+ */
+void context_vape_restore(void)
+{
+ restore_prcc();
+
+ restore_tpiu();
+
+ restore_stm_ape();
+
+ if (cpu_is_u5500())
+ u5500_context_restore_icn();
+ if (cpu_is_u8500())
+ u8500_context_restore_icn();
+
+ atomic_notifier_call_chain(&context_ape_notifier_list,
+ CONTEXT_APE_RESTORE, NULL);
+}
+
+/*
+ * Save GPIO registers that might be modified
+ * for power save reasons.
+ */
+void context_gpio_save(void)
+{
+ int i;
+
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ gpio_save[i][0] = readl(gpio_bankaddr[i] + NMK_GPIO_AFSLA);
+ gpio_save[i][1] = readl(gpio_bankaddr[i] + NMK_GPIO_AFSLB);
+ gpio_save[i][2] = readl(gpio_bankaddr[i] + NMK_GPIO_PDIS);
+ gpio_save[i][3] = readl(gpio_bankaddr[i] + NMK_GPIO_DIR);
+ gpio_save[i][4] = readl(gpio_bankaddr[i] + NMK_GPIO_DAT);
+ gpio_save[i][6] = readl(gpio_bankaddr[i] + NMK_GPIO_SLPC);
+ }
+}
+
+/*
+ * Restore GPIO registers that might be modified
+ * for power save reasons.
+ */
+void context_gpio_restore(void)
+{
+ int i;
+ u32 output_state;
+ u32 pull_up;
+ u32 pull_down;
+ u32 pull;
+
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ writel(gpio_save[i][2], gpio_bankaddr[i] + NMK_GPIO_PDIS);
+
+ writel(gpio_save[i][3], gpio_bankaddr[i] + NMK_GPIO_DIR);
+
+ /* Set the high outputs. outpute_state = GPIO_DIR & GPIO_DAT */
+ output_state = gpio_save[i][3] & gpio_save[i][4];
+ writel(output_state, gpio_bankaddr[i] + NMK_GPIO_DATS);
+
+ /*
+ * Set the low outputs.
+ * outpute_state = ~(GPIO_DIR & GPIO_DAT) & GPIO_DIR
+ */
+ output_state = ~(gpio_save[i][3] & gpio_save[i][4]) &
+ gpio_save[i][3];
+ writel(output_state, gpio_bankaddr[i] + NMK_GPIO_DATC);
+
+ /*
+ * Restore pull up/down.
+ * Only write pull up/down settings on inputs where
+ * PDIS is not set.
+ * pull = (~GPIO_DIR & ~GPIO_PDIS)
+ */
+ pull = (~gpio_save[i][3] & ~gpio_save[i][2]);
+ nmk_gpio_read_pull(i, &pull_up);
+
+ pull_down = pull & ~pull_up;
+ pull_up = pull & pull_up;
+ /* Set pull ups */
+ writel(pull_up, gpio_bankaddr[i] + NMK_GPIO_DATS);
+ /* Set pull downs */
+ writel(pull_down, gpio_bankaddr[i] + NMK_GPIO_DATC);
+
+ writel(gpio_save[i][6], gpio_bankaddr[i] + NMK_GPIO_SLPC);
+
+ }
+
+}
+
+/*
+ * Restore GPIO mux registers that might be modified
+ * for power save reasons.
+ */
+void context_gpio_restore_mux(void)
+{
+ int i;
+
+ /* Change mux settings */
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ writel(gpio_save[i][0], gpio_bankaddr[i] + NMK_GPIO_AFSLA);
+ writel(gpio_save[i][1], gpio_bankaddr[i] + NMK_GPIO_AFSLB);
+ }
+}
+
+/*
+ * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+ * - Save SLPM registers (Not done.)
+ * - Set SLPM=0 for the IOs you want to switch. (We assume that all
+ * SLPM registers already are 0 except for the ones that wants to
+ * have the mux connected in sleep (e.g modem STM)).
+ * - Configure the GPIO registers for the IOs that are being switched
+ * - Set IOFORCE=1
+ * - Modify the AFLSA/B registers for the IOs that are being switched
+ * - Set IOFORCE=0
+ * - Restore SLPM registers (Not done.)
+ * - Any spurious wake up event during switch sequence to be ignored
+ * and cleared
+ */
+void context_gpio_mux_safe_switch(bool begin)
+{
+ int i;
+
+ static u32 rwimsc[GPIO_NUM_BANKS];
+ static u32 fwimsc[GPIO_NUM_BANKS];
+
+ if (begin) {
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ /* Save registers */
+ rwimsc[i] = readl(gpio_bankaddr[i] + NMK_GPIO_RWIMSC);
+ fwimsc[i] = readl(gpio_bankaddr[i] + NMK_GPIO_FWIMSC);
+
+ /* Prevent spurious wakeups */
+ writel(0, gpio_bankaddr[i] + NMK_GPIO_RWIMSC);
+ writel(0, gpio_bankaddr[i] + NMK_GPIO_FWIMSC);
+ }
+
+ ux500_pm_prcmu_set_ioforce(true);
+ } else {
+ ux500_pm_prcmu_set_ioforce(false);
+
+ /* Restore wake up settings */
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ writel(rwimsc[i], gpio_bankaddr[i] + NMK_GPIO_RWIMSC);
+ writel(fwimsc[i], gpio_bankaddr[i] + NMK_GPIO_FWIMSC);
+ }
+
+ }
+
+}
+
+/*
+ * Save common
+ *
+ * This function must be called once for all cores before going to deep sleep.
+ */
+void context_varm_save_common(void)
+{
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_COMMON_SAVE, NULL);
+
+ /* Save common parts */
+ save_gic_dist_common();
+ save_scu();
+}
+
+/*
+ * Restore common
+ *
+ * This function must be called once for all cores when waking up from deep
+ * sleep.
+ */
+void context_varm_restore_common(void)
+{
+ /* Restore common parts */
+ restore_scu();
+ restore_gic_dist_common();
+
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_COMMON_RESTORE, NULL);
+}
+
+/*
+ * Save core
+ *
+ * This function must be called once for each cpu core before going to deep
+ * sleep.
+ */
+void context_varm_save_core(void)
+{
+
+ int cpu = smp_processor_id();
+
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_CORE_SAVE, NULL);
+
+ per_cpu(varm_cp15_pointer, cpu) = per_cpu(varm_cp15_backup_stack, cpu);
+
+ /* Save core */
+ twd_save();
+ save_gic_if_cpu(&per_cpu(context_gic_cpu, cpu));
+ save_gic_dist_cpu(&per_cpu(context_gic_dist_cpu, cpu));
+ context_save_cp15_registers(&per_cpu(varm_cp15_pointer, cpu));
+
+}
+
+/*
+ * Restore core
+ *
+ * This function must be called once for each cpu core when waking up from
+ * deep sleep.
+ */
+void context_varm_restore_core(void)
+{
+ int cpu = smp_processor_id();
+
+ /* Restore core */
+ context_restore_cp15_registers(&per_cpu(varm_cp15_pointer, cpu));
+ restore_gic_dist_cpu(&per_cpu(context_gic_dist_cpu, cpu));
+ restore_gic_if_cpu(&per_cpu(context_gic_cpu, cpu));
+ twd_restore();
+
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_CORE_RESTORE, NULL);
+
+}
+
+/*
+ * Save CPU registers
+ *
+ * This function saves ARM registers.
+ */
+void context_save_cpu_registers(void)
+{
+ int cpu = smp_processor_id();
+
+ per_cpu(varm_registers_pointer, cpu) =
+ per_cpu(varm_registers_backup_stack, cpu);
+ context_save_arm_registers(&per_cpu(varm_registers_pointer, cpu));
+}
+
+/*
+ * Restore CPU registers
+ *
+ * This function restores ARM registers.
+ */
+void context_restore_cpu_registers(void)
+{
+ int cpu = smp_processor_id();
+
+ context_restore_arm_registers(&per_cpu(varm_registers_pointer, cpu));
+}
+
+/*
+ * This function stores CP15 registers related to cache and mmu
+ * in backup SRAM. It also stores stack pointer, CPSR
+ * and return address for the PC in backup SRAM and
+ * does wait for interrupt.
+ */
+void context_save_to_sram_and_wfi(bool cleanL2cache)
+{
+ int cpu = smp_processor_id();
+
+ context_save_to_sram_and_wfi_internal(backup_sram_storage[cpu],
+ cleanL2cache);
+}
+
+static int __init context_init(void)
+{
+ int i;
+ void __iomem *ux500_backup_ptr;
+
+ /* allocate backup pointer for RAM data */
+ ux500_backup_ptr = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(U8500_BACKUPRAM_SIZE));
+
+ if (!ux500_backup_ptr) {
+ pr_warning("context: could not allocate backup memory\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * ROM code addresses to store backup contents,
+ * pass the physical address of back up to ROM code
+ */
+ writel(virt_to_phys(ux500_backup_ptr),
+ IO_ADDRESS(U8500_EXT_RAM_LOC_BACKUPRAM_ADDR));
+
+
+ if (cpu_is_u5500()) {
+ writel(IO_ADDRESS(U5500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ writel(IO_ADDRESS(U5500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ context_tpiu.base = ioremap(U5500_TPIU_BASE, SZ_4K);
+ context_stm_ape.base = ioremap(U5500_STM_REG_BASE, SZ_4K);
+ context_scu.base = ioremap(U5500_SCU_BASE, SZ_4K);
+
+ context_prcc[0].base = ioremap(U5500_CLKRST1_BASE, SZ_4K);
+ context_prcc[1].base = ioremap(U5500_CLKRST2_BASE, SZ_4K);
+ context_prcc[2].base = ioremap(U5500_CLKRST3_BASE, SZ_4K);
+ context_prcc[3].base = ioremap(U5500_CLKRST5_BASE, SZ_4K);
+ context_prcc[4].base = ioremap(U5500_CLKRST6_BASE, SZ_4K);
+
+ context_gic_dist_common.base = ioremap(U5500_GIC_DIST_BASE, SZ_4K);
+ per_cpu(context_gic_cpu, 0).base = ioremap(U5500_GIC_CPU_BASE, SZ_4K);
+ } else if (cpu_is_u8500()) {
+ /* Give logical address to backup RAM. For both CPUs */
+ writel(IO_ADDRESS(U8500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ writel(IO_ADDRESS(U8500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ context_tpiu.base = ioremap(U8500_TPIU_BASE, SZ_4K);
+ context_stm_ape.base = ioremap(U8500_STM_REG_BASE, SZ_4K);
+ context_scu.base = ioremap(U8500_SCU_BASE, SZ_4K);
+
+ /* PERIPH4 is always on, so no need saving prcc */
+ context_prcc[0].base = ioremap(U8500_CLKRST1_BASE, SZ_4K);
+ context_prcc[1].base = ioremap(U8500_CLKRST2_BASE, SZ_4K);
+ context_prcc[2].base = ioremap(U8500_CLKRST3_BASE, SZ_4K);
+ context_prcc[3].base = ioremap(U8500_CLKRST5_BASE, SZ_4K);
+ context_prcc[4].base = ioremap(U8500_CLKRST6_BASE, SZ_4K);
+
+ context_gic_dist_common.base = ioremap(U8500_GIC_DIST_BASE, SZ_4K);
+ per_cpu(context_gic_cpu, 0).base = ioremap(U8500_GIC_CPU_BASE, SZ_4K);
+ }
+
+ per_cpu(context_gic_dist_cpu, 0).base = context_gic_dist_common.base;
+
+ for (i = 1; i < num_possible_cpus(); i++) {
+ per_cpu(context_gic_cpu, i).base
+ = per_cpu(context_gic_cpu, 0).base;
+ per_cpu(context_gic_dist_cpu, i).base
+ = per_cpu(context_gic_dist_cpu, 0).base;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(context_prcc); i++) {
+ const int clusters[] = {1, 2, 3, 5, 6};
+ char clkname[10];
+
+ snprintf(clkname, sizeof(clkname), "PERIPH%d", clusters[i]);
+
+ context_prcc[i].clk = clk_get_sys(clkname, NULL);
+ BUG_ON(IS_ERR(context_prcc[i].clk));
+ }
+
+ if (cpu_is_u8500()) {
+ u8500_context_init();
+ } else if (cpu_is_u5500()) {
+ u5500_context_init();
+ } else {
+ printk(KERN_ERR "context: unknown hardware!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+subsys_initcall(context_init);
diff --git a/arch/arm/mach-ux500/pm/context_arm.S b/arch/arm/mach-ux500/pm/context_arm.S
new file mode 100644
index 00000000000..55e2accc85f
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context_arm.S
@@ -0,0 +1,385 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/linkage.h>
+#include <mach/hardware.h>
+#include <asm/hardware/cache-l2x0.h>
+
+/*
+ * Save and increment macro
+ */
+.macro SAVE_AND_INCREMENT FROM_REG TO_REG
+ str \FROM_REG, [\TO_REG], #+4
+.endm
+
+/*
+ * Decrement and restore macro
+ */
+.macro DECREMENT_AND_RESTORE FROM_REG TO_REG
+ ldr \TO_REG, [\FROM_REG, #-4]!
+.endm
+
+/*
+ * Save ARM registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ *
+ * Backup stack operations:
+ * + {sp, lr}^
+ * + cpsr
+ * + {r3, r8-r14} (FIQ mode: r3=spsr)
+ * + {r3, r13, r14} (IRQ mode: r3=spsr)
+ * + {r3, r13, r14} (abort mode: r3=spsr)
+ * + {r3, r13, r14} (undef mode: r3=spsr)
+ */
+ .align
+ .section ".text", "ax"
+ENTRY(context_save_arm_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack
+ ldr r1, [r0] @ Read backup stack pointer
+
+ stmia r1, {sp, lr}^ @ Store user mode sp and lr
+ @ registers
+ add r1, r1, #8 @ Update backup pointer (not
+ @ done in previous instruction)
+
+ mrs r2, cpsr @ Get CPSR
+ SAVE_AND_INCREMENT r2 r1 @ Save CPSR register
+ orr r2, r2, #0xc0 @ Disable FIQ and IRQ
+ bic r2, r2, #0x1f @ Setup r2 to change mode
+
+ @ The suffix to CPSR refers to which field(s) of the CPSR is
+ @ rereferenced (you can specify one or more). Defined fields are:
+ @
+ @ c - control
+ @ x - extension
+ @ s - status
+ @ f - flags
+
+ orr r3, r2, #0x11 @ Save FIQ mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ stmia r1!, {r3, r8-r14}
+
+ orr r3, r2, #0x12 @ Save IRQ mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ stmia r1!, {r3, r13, r14}
+
+ orr r3, r2, #0x17 @ Save abort mode registers +
+ @ common mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ stmia r1!, {r3, r13, r14}
+
+ orr r3, r2, #0x1B @ Save undef mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ stmia r1!, {r3, r13, r14}
+
+ orr r3, r2, #0x13 @ Return to supervisor mode
+ msr cpsr_cxsf, r3
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+
+/*
+ * Restore ARM registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ *
+ * Backup stack operations:
+ * - {r3, r13, r14} (undef mode: spsr=r3)
+ * - {r3, r13, r14} (abort mode: spsr=r3)
+ * - {r3, r13, r14} (IRQ mode: spsr=r3)
+ * - {r3, r8-r14} (FIQ mode: spsr=r3)
+ * - cpsr
+ * - {sp, lr}^
+ */
+ .align
+ .section ".text", "ax"
+ENTRY(context_restore_arm_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack
+ ldr r1, [r0] @ Read backup stack pointer
+
+ mrs r2, cpsr @ Get CPSR
+ orr r2, r2, #0xc0 @ Disable FIQ and IRQ
+ bic r2, r2, #0x1f @ Setup r2 to change mode
+
+ orr r3, r2, #0x1b @ Restore undef mode registers
+ msr cpsr_cxsf, r3
+ ldmdb r1!, {r3, r13, r14}
+ msr spsr_cxsf, r3
+
+ orr r3, r2, #0x17 @ Restore abort mode registers
+ msr cpsr_cxsf, r3
+ ldmdb r1!, {r3, r13, r14}
+ msr spsr_cxsf, r3
+
+ orr r3, r2, #0x12 @ Restore IRQ mode registers
+ msr cpsr_cxsf, r3
+ ldmdb r1!, {r3, r13, r14}
+ msr spsr_cxsf, r3
+
+ orr r3, r2, #0x11 @ Restore FIQ mode registers
+ msr cpsr_cxsf, r3
+ ldmdb r1!, {r3, r8-r14}
+ msr spsr_cxsf, r3
+
+ DECREMENT_AND_RESTORE r1 r3 @ Restore cpsr register
+ msr cpsr_cxsf, r3
+
+ ldmdb r1, {sp, lr}^ @ Restore sp and lr registers
+ sub r1, r1, #8 @ Update backup pointer (not
+ @ done in previous instruction)
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+
+/*
+ * Save CP15 registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ *
+ * TTBR0, TTBR1, TTBRC, DACR CP15 registers are restored by boot ROM from SRAM.
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_save_cp15_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack (r3 is saved due
+ @ to 8 byte aligned stack)
+ ldr r1, [r0] @ Read backup stack pointer
+
+ mrc p15, 0, r2, c12, c0, 0 @ Read Non-secure Vector Base
+ @ Address Register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c10, c2, 0 @ Access primary memory region
+ @ remap register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c10, c2, 1 @ Access normal memory region
+ @ remap register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c13, c0, 1 @ Read Context ID Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c13, c0, 2 @ Read Thread ID registers,
+ @ this register is both user
+ @ and privileged R/W accessible
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c13, c0, 3 @ Read Thread ID registers,
+ @ this register is user
+ @ read-only and privileged R/W
+ @ accessible.
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c13, c0, 4 @ Read Thread ID registers,
+ @ this register is privileged
+ @ R/W accessible only.
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 2, r2, c0, c0, 0 @ Cache Size Selection Register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c9, c12, 0 @ Read PMNC Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c12, 1 @ Read PMCNTENSET Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c12, 5 @ Read PMSELR Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c13, 0 @ Read PMCCNTR Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c13, 1 @ Read PMXEVTYPER Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c14, 0 @ Read PMUSERENR Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c14, 1 @ Read PMINTENSET Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c14, 2 @ Read PMINTENCLR Register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c1, c0, 2 @ Read CPACR Register
+ SAVE_AND_INCREMENT r2 r1
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+
+/*
+ * Restore CP15 registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_restore_cp15_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack (r3 is saved due
+ @ to 8 byte aligned stack)
+ ldr r1, [r0] @ Read backup stack pointer
+
+ DECREMENT_AND_RESTORE r1 r2 @ Write CPACR register
+ mcr p15, 0, r2, c1, c0, 2
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c14, 2 @ Write PMINTENCLR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c14, 1 @ Write PMINTENSET Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c14, 0 @ Write PMUSERENR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c13, 1 @ Write PMXEVTYPER Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c13, 0 @ Write PMCCNTR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c12, 5 @ Write PMSELR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c12, 1 @ Write PMCNTENSET Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c12, 0 @ Write PMNC Register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 2, r2, c0, c0, 0 @ Cache Size Selection Register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 4 @ Write Thread ID registers,
+ @ this register is privileged
+ @ R/W accessible only
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 3 @ Write Thread ID registers,
+ @ this register is user
+ @ read-only and privileged R/W
+ @ accessible
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 2 @ Write Thread ID registers,
+ @ this register is both user
+ @ and privileged R/W accessible
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 1 @ Write Context ID Register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c10, c2, 1 @ Access normal memory region
+ @ remap register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c10, c2, 0 @ Access primary memory region
+ @ remap register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c12, c0, 0 @ Write Non-secure Vector Base
+ @ Address Register
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+/*
+ * L1 cache clean function. Commit 'dirty' data from L1
+ * to L2 cache.
+ *
+ * r0, r1, r2, used locally
+ *
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_clean_l1_cache_all)
+
+ mov r0, #0 @ swith to cache level 0
+ @ (L1 cache)
+ mcr p15, 2, r0, c0, c0, 0 @ select current cache level
+ @ in cssr
+
+ dmb
+ mov r1, #0 @ r1 = way index
+wayLoopL1clean:
+ mov r0, #0 @ r0 = line index
+lineLoopL1clean:
+ mov r2, r1, lsl #30 @ TODO: OK to hard-code
+ @ SoC-specific L1 cache details?
+ add r2, r0, lsl #5
+ mcr p15, 0, r2, c7, c10, 2 @ Clean cache by set/way
+ add r0, r0, #1
+ cmp r0, #256 @ TODO: Ok with hard-coded
+ @ set/way sizes or do we have to
+ @ read them from ARM regs? Is it
+ @ set correctly in silicon?
+ bne lineLoopL1clean
+ add r1, r1, #1
+ cmp r1, #4 @ TODO: Ditto, sizes...
+ bne wayLoopL1clean
+
+ dsb
+ isb
+ mov pc, lr
+
+ENDPROC(context_clean_l1_cache_all)
+
+/*
+ * Last saves to backup RAM, cache clean and WFI
+ *
+ * r0 = address to backup_sram_storage base adress
+ * r1 = indicate whether also L2 cache should be cleaned
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_save_to_sram_and_wfi_internal)
+
+ stmfd sp!, {r2-r12, lr} @ save on stack.
+
+ mrc p15, 0, r2, c1, c0, 0 @ read cp15 system control
+ @ register
+ str r2, [r0, #0x00]
+ mrc p15, 0, r2, c2, c0, 0 @ read cp15 ttb0 register
+ str r2, [r0, #0x04]
+ mrc p15, 0, r2, c2, c0, 1 @ read cp15 ttb1 register
+ str r2, [r0, #0x08]
+ mrc p15, 0, r2, c2, c0, 2 @ read cp15 ttb control register
+ str r2, [r0, #0x0C]
+ mrc p15, 0, r2, c3, c0, 0 @ read domain access control
+ @ register
+ str r2, [r0, #0x10]
+
+ ldr r2, =return_here
+ str r2, [r0, #0x14] @ save program counter restore
+ @ value to backup_sram_storage
+ mrs r2, cpsr
+ str r2, [r0, #0x18] @ save cpsr to
+ @ backup_sram_storage
+ str sp, [r0, #0x1c] @ save sp to backup_sram_storage
+
+ mov r4, r1 @ Set r4 = cleanL2cache, r1
+ @ will be destroyed by
+ @ v7_clean_l1_cache_all
+
+ bl context_clean_l1_cache_all @ Commit all dirty data in L1
+ @ cache to L2 without
+ @ invalidating
+
+ dsb @ data synchronization barrier
+ isb @ instruction synchronization
+ @ barrier
+ wfi @ wait for interrupt
+
+return_here: @ both cores return here
+ @ now we are out deep sleep
+ @ with all the context lost
+ @ except pc, sp and cpsr
+
+ ldmfd sp!, {r2-r12, pc} @ restore from stack
+
diff --git a/arch/arm/mach-ux500/pm/performance.c b/arch/arm/mach-ux500/pm/performance.c
new file mode 100644
index 00000000000..b09840274a0
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/performance.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Johan Rudholm <johan.rudholm@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/genhd.h>
+#include <linux/major.h>
+#include <linux/cdev.h>
+#include <linux/kernel_stat.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/cpu.h>
+#include <linux/pm_qos.h>
+
+#include <mach/irqs.h>
+
+#define WLAN_PROBE_DELAY 3000 /* 3 seconds */
+#define WLAN_LIMIT (3000/3) /* If we have more than 1000 irqs per second */
+
+/*
+ * MMC TODO:
+ * o Develop a more power-aware algorithm
+ * o Make the parameters visible through debugfs
+ * o Get the value of CONFIG_MMC_BLOCK_MINORS in runtime instead, since
+ * it may be altered by drivers/mmc/card/block.c
+ */
+
+/* Sample reads and writes every n ms */
+#define PERF_MMC_PROBE_DELAY 1000
+/* Read threshold, sectors/second */
+#define PERF_MMC_LIMIT_READ 10240
+/* Write threshold, sectors/second */
+#define PERF_MMC_LIMIT_WRITE 8192
+/* Nr of MMC devices */
+#define PERF_MMC_HOSTS 8
+
+/*
+ * Rescan for new MMC devices every
+ * PERF_MMC_PROBE_DELAY * PERF_MMC_RESCAN_CYCLES ms
+ */
+#define PERF_MMC_RESCAN_CYCLES 10
+
+#ifdef CONFIG_MMC_BLOCK
+static struct delayed_work work_mmc;
+#endif
+
+static struct delayed_work work_wlan_workaround;
+static struct pm_qos_request wlan_pm_qos_latency;
+static bool wlan_pm_qos_is_latency_0;
+
+static void wlan_load(struct work_struct *work)
+{
+ int cpu;
+ unsigned int num_irqs = 0;
+ static unsigned int old_num_irqs = UINT_MAX;
+
+ for_each_online_cpu(cpu)
+ num_irqs += kstat_irqs_cpu(IRQ_DB8500_SDMMC1, cpu);
+
+ if ((num_irqs > old_num_irqs) &&
+ (num_irqs - old_num_irqs) > WLAN_LIMIT) {
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "wlan", 125);
+ if (!wlan_pm_qos_is_latency_0) {
+ /*
+ * The wake up latency is set to 0 to prevent
+ * the system from going to sleep. This improves
+ * the wlan throughput in DMA mode.
+ * The wake up latency from sleep adds ~5% overhead
+ * for TX in some cases.
+ * This change doesn't increase performance for wlan
+ * PIO since the CPU usage prevents sleep in this mode.
+ */
+ pm_qos_add_request(&wlan_pm_qos_latency,
+ PM_QOS_CPU_DMA_LATENCY, 0);
+ wlan_pm_qos_is_latency_0 = true;
+ }
+ } else {
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "wlan", 25);
+ if (wlan_pm_qos_is_latency_0) {
+ pm_qos_remove_request(&wlan_pm_qos_latency);
+ wlan_pm_qos_is_latency_0 = false;
+ }
+ }
+
+ old_num_irqs = num_irqs;
+
+ schedule_delayed_work_on(0,
+ &work_wlan_workaround,
+ msecs_to_jiffies(WLAN_PROBE_DELAY));
+}
+
+#ifdef CONFIG_MMC_BLOCK
+/*
+ * Loop through every CONFIG_MMC_BLOCK_MINORS'th minor device for
+ * MMC_BLOCK_MAJOR, get the struct gendisk for each device. Returns
+ * nr of found disks. Populate mmc_disks.
+ */
+static int scan_mmc_devices(struct gendisk *mmc_disks[])
+{
+ dev_t devnr;
+ int i, j = 0, part;
+ struct gendisk *mmc_devices[256 / CONFIG_MMC_BLOCK_MINORS];
+
+ memset(&mmc_devices, 0, sizeof(mmc_devices));
+
+ for (i = 0; i * CONFIG_MMC_BLOCK_MINORS < 256; i++) {
+ devnr = MKDEV(MMC_BLOCK_MAJOR, i * CONFIG_MMC_BLOCK_MINORS);
+ mmc_devices[i] = get_gendisk(devnr, &part);
+
+ /* Invalid capacity of device, do not add to list */
+ if (!mmc_devices[i] || !get_capacity(mmc_devices[i]))
+ continue;
+
+ mmc_disks[j] = mmc_devices[i];
+ j++;
+
+ if (j == PERF_MMC_HOSTS)
+ break;
+ }
+
+ return j;
+}
+
+/*
+ * Sample sectors read and written to any MMC devices, update PRCMU
+ * qos requirement
+ */
+static void mmc_load(struct work_struct *work)
+{
+ static unsigned long long old_sectors_read[PERF_MMC_HOSTS];
+ static unsigned long long old_sectors_written[PERF_MMC_HOSTS];
+ static struct gendisk *mmc_disks[PERF_MMC_HOSTS];
+ static int cycle, nrdisk;
+ static bool old_mode;
+ unsigned long long sectors;
+ bool new_mode = false;
+ int i;
+
+ if (!cycle) {
+ memset(&mmc_disks, 0, sizeof(mmc_disks));
+ nrdisk = scan_mmc_devices(mmc_disks);
+ cycle = PERF_MMC_RESCAN_CYCLES;
+ }
+ cycle--;
+
+ for (i = 0; i < nrdisk; i++) {
+ sectors = part_stat_read(&(mmc_disks[i]->part0),
+ sectors[READ]);
+
+ if (old_sectors_read[i] &&
+ sectors > old_sectors_read[i] &&
+ (sectors - old_sectors_read[i]) >
+ PERF_MMC_LIMIT_READ)
+ new_mode = true;
+
+ old_sectors_read[i] = sectors;
+ sectors = part_stat_read(&(mmc_disks[i]->part0),
+ sectors[WRITE]);
+
+ if (old_sectors_written[i] &&
+ sectors > old_sectors_written[i] &&
+ (sectors - old_sectors_written[i]) >
+ PERF_MMC_LIMIT_WRITE)
+ new_mode = true;
+
+ old_sectors_written[i] = sectors;
+ }
+
+ if (!old_mode && new_mode)
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "mmc", 125);
+
+ if (old_mode && !new_mode)
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "mmc", 25);
+
+ old_mode = new_mode;
+
+ schedule_delayed_work(&work_mmc,
+ msecs_to_jiffies(PERF_MMC_PROBE_DELAY));
+
+}
+#endif /* CONFIG_MMC_BLOCK */
+
+static int __init performance_register(void)
+{
+ int ret;
+
+#ifdef CONFIG_MMC_BLOCK
+ prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "mmc", 25);
+
+ INIT_DELAYED_WORK_DEFERRABLE(&work_mmc, mmc_load);
+
+ ret = schedule_delayed_work(&work_mmc,
+ msecs_to_jiffies(PERF_MMC_PROBE_DELAY));
+ if (ret) {
+ pr_err("ux500: performance: Fail to schedudle mmc work\n");
+ goto out;
+ }
+#endif
+
+ prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "wlan", 25);
+
+ INIT_DELAYED_WORK_DEFERRABLE(&work_wlan_workaround,
+ wlan_load);
+
+ ret = schedule_delayed_work_on(0,
+ &work_wlan_workaround,
+ msecs_to_jiffies(WLAN_PROBE_DELAY));
+ if (ret) {
+ pr_err("ux500: performance: Fail to schedudle wlan work\n");
+ goto out;
+ }
+out:
+ return ret;
+}
+late_initcall(performance_register);
diff --git a/arch/arm/mach-ux500/pm/pm.c b/arch/arm/mach-ux500/pm/pm.c
new file mode 100644
index 00000000000..d986d1024d0
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/pm.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
+
+#include <asm/hardware/gic.h>
+#include <asm/processor.h>
+
+#include <mach/hardware.h>
+#include <mach/pm.h>
+
+#define STABILIZATION_TIME 30 /* us */
+#define GIC_FREEZE_DELAY 1 /* us */
+
+#define PRCM_ARM_WFI_STANDBY_CPU0_WFI 0x8
+#define PRCM_ARM_WFI_STANDBY_CPU1_WFI 0x10
+
+/* Dual A9 core interrupt management unit registers */
+#define PRCM_A9_MASK_REQ (_PRCMU_BASE + 0x328)
+#define PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ 0x1
+#define PRCM_A9_MASK_ACK (_PRCMU_BASE + 0x32c)
+
+#define PRCM_ARMITMSK31TO0 (_PRCMU_BASE + 0x11c)
+#define PRCM_ARMITMSK63TO32 (_PRCMU_BASE + 0x120)
+#define PRCM_ARMITMSK95TO64 (_PRCMU_BASE + 0x124)
+#define PRCM_ARMITMSK127TO96 (_PRCMU_BASE + 0x128)
+#define PRCM_POWER_STATE_VAL (_PRCMU_BASE + 0x25C)
+#define PRCM_ARMITVAL31TO0 (_PRCMU_BASE + 0x260)
+#define PRCM_ARMITVAL63TO32 (_PRCMU_BASE + 0x264)
+#define PRCM_ARMITVAL95TO64 (_PRCMU_BASE + 0x268)
+#define PRCM_ARMITVAL127TO96 (_PRCMU_BASE + 0x26C)
+
+/* ARM WFI Standby signal register */
+#define PRCM_ARM_WFI_STANDBY (_PRCMU_BASE + 0x130)
+
+/* IO force */
+#define PRCM_IOCR (_PRCMU_BASE + 0x310)
+#define PRCM_IOCR_IOFORCE 0x1
+
+static u32 u8500_gpio_banks[] = {U8500_GPIOBANK0_BASE,
+ U8500_GPIOBANK1_BASE,
+ U8500_GPIOBANK2_BASE,
+ U8500_GPIOBANK3_BASE,
+ U8500_GPIOBANK4_BASE,
+ U8500_GPIOBANK5_BASE,
+ U8500_GPIOBANK6_BASE,
+ U8500_GPIOBANK7_BASE,
+ U8500_GPIOBANK8_BASE};
+
+static u32 u5500_gpio_banks[] = {U5500_GPIOBANK0_BASE,
+ U5500_GPIOBANK1_BASE,
+ U5500_GPIOBANK2_BASE,
+ U5500_GPIOBANK3_BASE,
+ U5500_GPIOBANK4_BASE,
+ U5500_GPIOBANK5_BASE,
+ U5500_GPIOBANK6_BASE,
+ U5500_GPIOBANK7_BASE};
+
+static u32 ux500_gpio_wks[ARRAY_SIZE(u8500_gpio_banks)];
+
+inline int ux500_pm_arm_on_ext_clk(bool leave_arm_pll_on)
+{
+ return 0;
+}
+
+/* Decouple GIC from the interrupt bus */
+void ux500_pm_gic_decouple(void)
+{
+ writel(readl(PRCM_A9_MASK_REQ) | PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ,
+ PRCM_A9_MASK_REQ);
+
+ while (!readl(PRCM_A9_MASK_REQ))
+ cpu_relax();
+
+ /* TODO: Use the ack bit when possible */
+ udelay(GIC_FREEZE_DELAY); /* Wait for the GIC to freeze */
+}
+
+/* Recouple GIC with the interrupt bus */
+void ux500_pm_gic_recouple(void)
+{
+ writel((readl(PRCM_A9_MASK_REQ) & ~PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ),
+ PRCM_A9_MASK_REQ);
+
+ /* TODO: Use the ack bit when possible */
+}
+
+#define GIC_NUMBER_REGS 5
+bool ux500_pm_gic_pending_interrupt(void)
+{
+ u32 pr; /* Pending register */
+ u32 er; /* Enable register */
+ int i;
+
+ /* 5 registers. STI & PPI not skipped */
+ for (i = 0; i < GIC_NUMBER_REGS; i++) {
+
+ pr = readl_relaxed(__io_address(U8500_GIC_DIST_BASE) +
+ GIC_DIST_PENDING_SET + i * 4);
+ er = readl_relaxed(__io_address(U8500_GIC_DIST_BASE) +
+ GIC_DIST_ENABLE_SET + i * 4);
+
+ if (pr & er)
+ return true; /* There is a pending interrupt */
+ }
+ return false;
+}
+
+#define GIC_NUMBER_SPI_REGS 4
+bool ux500_pm_prcmu_pending_interrupt(void)
+{
+ u32 it;
+ u32 im;
+ int i;
+
+ for (i = 0; i < GIC_NUMBER_SPI_REGS; i++) { /* There are 4 registers */
+
+ it = readl(PRCM_ARMITVAL31TO0 + i * 4);
+ im = readl(PRCM_ARMITMSK31TO0 + i * 4);
+
+ if (it & im)
+ return true; /* There is a pending interrupt */
+ }
+
+ return false;
+}
+
+void ux500_pm_prcmu_set_ioforce(bool enable)
+{
+ if (enable)
+ writel(readl(PRCM_IOCR) | PRCM_IOCR_IOFORCE, PRCM_IOCR);
+ else
+ writel(readl(PRCM_IOCR) & ~PRCM_IOCR_IOFORCE, PRCM_IOCR);
+}
+
+void ux500_pm_prcmu_copy_gic_settings(void)
+{
+ u32 er; /* Enable register */
+ int i;
+
+ for (i = 0; i < GIC_NUMBER_SPI_REGS; i++) { /* 4*32 SPI interrupts */
+ /* +1 due to skip STI and PPI */
+ er = readl_relaxed(__io_address(U8500_GIC_DIST_BASE) +
+ GIC_DIST_ENABLE_SET + (i + 1) * 4);
+ writel(er, PRCM_ARMITMSK31TO0 + i * 4);
+ }
+}
+
+void ux500_pm_gpio_save_wake_up_status(void)
+{
+ int num_banks;
+ u32 *banks;
+ int i;
+
+ if (cpu_is_u5500()) {
+ num_banks = ARRAY_SIZE(u5500_gpio_banks);
+ banks = u5500_gpio_banks;
+ } else {
+ num_banks = ARRAY_SIZE(u8500_gpio_banks);
+ banks = u8500_gpio_banks;
+ }
+
+ nmk_gpio_clocks_enable();
+
+ for (i = 0; i < num_banks; i++)
+ ux500_gpio_wks[i] = readl(__io_address(banks[i]) + NMK_GPIO_WKS);
+
+ nmk_gpio_clocks_disable();
+}
+
+u32 ux500_pm_gpio_read_wake_up_status(unsigned int bank_num)
+{
+ if (WARN_ON(cpu_is_u5500() && bank_num >=
+ ARRAY_SIZE(u5500_gpio_banks)))
+ return 0;
+
+ if (WARN_ON(cpu_is_u8500() && bank_num >=
+ ARRAY_SIZE(u8500_gpio_banks)))
+ return 0;
+
+ return ux500_gpio_wks[bank_num];
+}
+
+/* Check if the other CPU is in WFI */
+bool ux500_pm_other_cpu_wfi(void)
+{
+ if (smp_processor_id()) {
+ /* We are CPU 1 => check if CPU0 is in WFI */
+ if (readl(PRCM_ARM_WFI_STANDBY) &
+ PRCM_ARM_WFI_STANDBY_CPU0_WFI)
+ return true;
+ } else {
+ /* We are CPU 0 => check if CPU1 is in WFI */
+ if (readl(PRCM_ARM_WFI_STANDBY) &
+ PRCM_ARM_WFI_STANDBY_CPU1_WFI)
+ return true;
+ }
+
+ return false;
+}
+
+/* PRCM_ACK_MB0_AP_PWRSTTR_STATUS */
+#define DB8500_PRCMU_STATUS_REGISTER 0x801b8e08
+#define DB5500_PRCMU_STATUS_REGISTER 0x80168f38
+
+enum prcmu_idle_stat ux500_pm_prcmu_idle_stat(void)
+{
+ u32 val;
+ void __iomem *prcmu_status_reg;
+
+ if (cpu_is_u8500())
+ prcmu_status_reg = __io_address(DB8500_PRCMU_STATUS_REGISTER);
+ else if (cpu_is_u5500())
+ prcmu_status_reg = __io_address(DB5500_PRCMU_STATUS_REGISTER);
+ else
+ ux500_unknown_soc();
+
+ val = readl(prcmu_status_reg) & 0xff;
+
+ return (enum prcmu_idle_stat)val;
+}
diff --git a/arch/arm/mach-ux500/pm/prcmu-qos-power.c b/arch/arm/mach-ux500/pm/prcmu-qos-power.c
new file mode 100644
index 00000000000..e095eed4b70
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/prcmu-qos-power.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Martin Persson
+ * Per Fransson <per.xx.fransson@stericsson.com>
+ *
+ * Quality of Service for the U8500 PRCM Unit interface driver
+ *
+ * Strongly influenced by kernel/pm_qos_params.c.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/cpufreq.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/cpufreq-dbx500.h>
+
+#include <mach/prcmu-debug.h>
+
+#define ARM_THRESHOLD_FREQ (400000)
+
+static int qos_delayed_cpufreq_notifier(struct notifier_block *,
+ unsigned long, void *);
+
+static s32 cpufreq_requirement_queued;
+static s32 cpufreq_requirement_set;
+
+/*
+ * locking rule: all changes to requirements or prcmu_qos_object list
+ * and prcmu_qos_objects need to happen with prcmu_qos_lock
+ * held, taken with _irqsave. One lock to rule them all
+ */
+struct requirement_list {
+ struct list_head list;
+ union {
+ s32 value;
+ s32 usec;
+ s32 kbps;
+ };
+ char *name;
+};
+
+static s32 max_compare(s32 v1, s32 v2);
+
+struct prcmu_qos_object {
+ struct requirement_list requirements;
+ struct blocking_notifier_head *notifiers;
+ struct miscdevice prcmu_qos_power_miscdev;
+ char *name;
+ s32 default_value;
+ s32 force_value;
+ atomic_t target_value;
+ s32 (*comparitor)(s32, s32);
+};
+
+static struct prcmu_qos_object null_qos;
+static BLOCKING_NOTIFIER_HEAD(prcmu_ape_opp_notifier);
+static BLOCKING_NOTIFIER_HEAD(prcmu_ddr_opp_notifier);
+
+static struct prcmu_qos_object ape_opp_qos = {
+ .requirements = {
+ LIST_HEAD_INIT(ape_opp_qos.requirements.list)
+ },
+ .notifiers = &prcmu_ape_opp_notifier,
+ .name = "ape_opp",
+ /* Target value in % APE OPP */
+ .default_value = 50,
+ .force_value = 0,
+ .target_value = ATOMIC_INIT(0),
+ .comparitor = max_compare
+};
+
+static struct prcmu_qos_object ddr_opp_qos = {
+ .requirements = {
+ LIST_HEAD_INIT(ddr_opp_qos.requirements.list)
+ },
+ .notifiers = &prcmu_ddr_opp_notifier,
+ .name = "ddr_opp",
+ /* Target value in % DDR OPP */
+ .default_value = 25,
+ .force_value = 0,
+ .target_value = ATOMIC_INIT(0),
+ .comparitor = max_compare
+};
+
+static struct prcmu_qos_object arm_opp_qos = {
+ .requirements = {
+ LIST_HEAD_INIT(arm_opp_qos.requirements.list)
+ },
+ /*
+ * No notifier on ARM opp qos request, since this won't actually
+ * do anything, except changing limits for cpufreq
+ */
+ .name = "arm_opp",
+ /* Target value in % ARM OPP, note can be 125% */
+ .default_value = 25,
+ .force_value = 0,
+ .target_value = ATOMIC_INIT(0),
+ .comparitor = max_compare
+};
+
+static struct prcmu_qos_object *prcmu_qos_array[] = {
+ &null_qos,
+ &ape_opp_qos,
+ &ddr_opp_qos,
+ &arm_opp_qos,
+};
+
+static DEFINE_MUTEX(prcmu_qos_mutex);
+static DEFINE_SPINLOCK(prcmu_qos_lock);
+
+static bool ape_opp_forced_to_50_partly_25;
+
+static unsigned long cpufreq_opp_delay = HZ / 5;
+
+unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
+{
+ return cpufreq_opp_delay;
+}
+
+static struct notifier_block qos_delayed_cpufreq_notifier_block = {
+ .notifier_call = qos_delayed_cpufreq_notifier,
+};
+
+void prcmu_qos_set_cpufreq_opp_delay(unsigned long n)
+{
+ if (n == 0) {
+ cpufreq_unregister_notifier(&qos_delayed_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ cpufreq_requirement_set = PRCMU_QOS_DEFAULT_VALUE;
+ cpufreq_requirement_queued = PRCMU_QOS_DEFAULT_VALUE;
+ } else if (cpufreq_opp_delay != 0) {
+ cpufreq_register_notifier(&qos_delayed_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+ cpufreq_opp_delay = n;
+}
+#ifdef CONFIG_CPU_FREQ
+static void update_cpu_limits(s32 extreme_value)
+{
+ int cpu;
+ struct cpufreq_policy policy;
+ int ret;
+ int min_freq, max_freq;
+
+ for_each_online_cpu(cpu) {
+ ret = cpufreq_get_policy(&policy, cpu);
+ if (ret) {
+ pr_err("prcmu qos: get cpufreq policy failed (cpu%d)\n",
+ cpu);
+ continue;
+ }
+
+ ret = dbx500_cpufreq_get_limits(cpu, extreme_value,
+ &min_freq, &max_freq);
+ if (ret)
+ continue;
+ /*
+ * cpufreq fw does not allow frequency change if
+ * "current min freq" > "new max freq" or
+ * "current max freq" < "new min freq".
+ * Thus the intermediate steps below.
+ */
+ if (policy.min > max_freq) {
+ ret = cpufreq_update_freq(cpu, min_freq, policy.max);
+ if (ret)
+ pr_err("prcmu qos: update min cpufreq failed (1)\n");
+ }
+ if (policy.max < min_freq) {
+ ret = cpufreq_update_freq(cpu, policy.min, max_freq);
+ if (ret)
+ pr_err("prcmu qos: update max cpufreq failed (2)\n");
+ }
+
+ ret = cpufreq_update_freq(cpu, min_freq, max_freq);
+ if (ret)
+ pr_err("prcmu qos: update max cpufreq failed (3)\n");
+ }
+
+}
+#else
+static inline void update_cpu_limits(s32 extreme_value) { }
+#endif
+/* static helper function */
+static s32 max_compare(s32 v1, s32 v2)
+{
+ return max(v1, v2);
+}
+
+static void update_target(int target)
+{
+ s32 extreme_value;
+ struct requirement_list *node;
+ unsigned long flags;
+ bool update = false;
+ u8 op;
+
+ mutex_lock(&prcmu_qos_mutex);
+
+ spin_lock_irqsave(&prcmu_qos_lock, flags);
+ extreme_value = prcmu_qos_array[target]->default_value;
+
+ if (prcmu_qos_array[target]->force_value != 0) {
+ extreme_value = prcmu_qos_array[target]->force_value;
+ update = true;
+ } else {
+ list_for_each_entry(node,
+ &prcmu_qos_array[target]->requirements.list,
+ list) {
+ extreme_value = prcmu_qos_array[target]->comparitor(
+ extreme_value, node->value);
+ }
+ if (atomic_read(&prcmu_qos_array[target]->target_value)
+ != extreme_value) {
+ update = true;
+ atomic_set(&prcmu_qos_array[target]->target_value,
+ extreme_value);
+ pr_debug("prcmu qos: new target for qos %d is %d\n",
+ target, atomic_read(
+ &prcmu_qos_array[target]->target_value
+ ));
+ }
+ }
+ spin_unlock_irqrestore(&prcmu_qos_lock, flags);
+
+ if (!update)
+ goto unlock_and_return;
+
+ if (prcmu_qos_array[target]->notifiers)
+ blocking_notifier_call_chain(prcmu_qos_array[target]->notifiers,
+ (unsigned long)extreme_value,
+ NULL);
+ switch (target) {
+ case PRCMU_QOS_DDR_OPP:
+ switch (extreme_value) {
+ case 50:
+ op = DDR_50_OPP;
+ pr_debug("prcmu qos: set ddr opp to 50%%\n");
+ break;
+ case 100:
+ op = DDR_100_OPP;
+ pr_debug("prcmu qos: set ddr opp to 100%%\n");
+ break;
+ case 25:
+ /* 25% DDR OPP is not supported on 5500 */
+ if (!cpu_is_u5500()) {
+ op = DDR_25_OPP;
+ pr_debug("prcmu qos: set ddr opp to 25%%\n");
+ break;
+ }
+ default:
+ pr_err("prcmu qos: Incorrect ddr target value (%d)",
+ extreme_value);
+ goto unlock_and_return;
+ }
+ prcmu_set_ddr_opp(op);
+ prcmu_debug_ddr_opp_log(op);
+ break;
+ case PRCMU_QOS_APE_OPP:
+ switch (extreme_value) {
+ case 50:
+ op = APE_50_OPP;
+ pr_debug("prcmu qos: set ape opp to 50%%\n");
+ break;
+ case 100:
+ op = APE_100_OPP;
+ pr_debug("prcmu qos: set ape opp to 100%%\n");
+ break;
+ default:
+ pr_err("prcmu qos: Incorrect ape target value (%d)",
+ extreme_value);
+ goto unlock_and_return;
+ }
+
+ if (!ape_opp_forced_to_50_partly_25)
+ (void)prcmu_set_ape_opp(op);
+ prcmu_debug_ape_opp_log(op);
+ break;
+ case PRCMU_QOS_ARM_OPP:
+ {
+ mutex_unlock(&prcmu_qos_mutex);
+ /*
+ * We can't hold the mutex since changing cpufreq
+ * will trigger an prcmu fw callback.
+ */
+ update_cpu_limits(extreme_value);
+ /* Return since the lock is unlocked */
+ return;
+
+ break;
+ }
+ default:
+ pr_err("prcmu qos: Incorrect target\n");
+ break;
+ }
+
+unlock_and_return:
+ mutex_unlock(&prcmu_qos_mutex);
+}
+
+void prcmu_qos_force_opp(int prcmu_qos_class, s32 i)
+{
+ prcmu_qos_array[prcmu_qos_class]->force_value = i;
+ update_target(prcmu_qos_class);
+}
+
+void prcmu_qos_voice_call_override(bool enable)
+{
+ u8 op;
+
+ mutex_lock(&prcmu_qos_mutex);
+
+ ape_opp_forced_to_50_partly_25 = enable;
+
+ if (enable) {
+ (void)prcmu_set_ape_opp(APE_50_PARTLY_25_OPP);
+ goto unlock_and_return;
+ }
+
+ /* Disable: set the OPP according to the current target value. */
+ switch (atomic_read(
+ &prcmu_qos_array[PRCMU_QOS_APE_OPP]->target_value)) {
+ case 50:
+ op = APE_50_OPP;
+ break;
+ case 100:
+ op = APE_100_OPP;
+ break;
+ default:
+ goto unlock_and_return;
+ }
+
+ (void)prcmu_set_ape_opp(op);
+
+unlock_and_return:
+ mutex_unlock(&prcmu_qos_mutex);
+}
+
+/**
+ * prcmu_qos_requirement - returns current prcmu qos expectation
+ * @prcmu_qos_class: identification of which qos value is requested
+ *
+ * This function returns the current target value in an atomic manner.
+ */
+int prcmu_qos_requirement(int prcmu_qos_class)
+{
+ return atomic_read(&prcmu_qos_array[prcmu_qos_class]->target_value);
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_requirement);
+
+/**
+ * prcmu_qos_add_requirement - inserts new qos request into the list
+ * @prcmu_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the prcmu_qos_class list of requested
+ * qos performance characteristics. It recomputes the aggregate QoS
+ * expectations for the prcmu_qos_class of parameters.
+ */
+int prcmu_qos_add_requirement(int prcmu_qos_class, char *name, s32 value)
+{
+ struct requirement_list *dep;
+ unsigned long flags;
+
+ dep = kzalloc(sizeof(struct requirement_list), GFP_KERNEL);
+ if (dep == NULL)
+ return -ENOMEM;
+
+ if (value == PRCMU_QOS_DEFAULT_VALUE)
+ dep->value = prcmu_qos_array[prcmu_qos_class]->default_value;
+ else
+ dep->value = value;
+ dep->name = kstrdup(name, GFP_KERNEL);
+ if (!dep->name)
+ goto cleanup;
+
+ spin_lock_irqsave(&prcmu_qos_lock, flags);
+ list_add(&dep->list,
+ &prcmu_qos_array[prcmu_qos_class]->requirements.list);
+ spin_unlock_irqrestore(&prcmu_qos_lock, flags);
+ update_target(prcmu_qos_class);
+
+ return 0;
+
+cleanup:
+ kfree(dep);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_add_requirement);
+
+/**
+ * prcmu_qos_update_requirement - modifies an existing qos request
+ * @prcmu_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * Updates an existing qos requirement for the prcmu_qos_class of parameters
+ * along with updating the target prcmu_qos_class value.
+ *
+ * If the named request isn't in the list then no change is made.
+ */
+int prcmu_qos_update_requirement(int prcmu_qos_class, char *name, s32 new_value)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&prcmu_qos_lock, flags);
+ list_for_each_entry(node,
+ &prcmu_qos_array[prcmu_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ if (new_value == PRCMU_QOS_DEFAULT_VALUE)
+ node->value =
+ prcmu_qos_array[prcmu_qos_class]->default_value;
+ else
+ node->value = new_value;
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&prcmu_qos_lock, flags);
+ if (pending_update)
+ update_target(prcmu_qos_class);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_update_requirement);
+
+/**
+ * prcmu_qos_remove_requirement - modifies an existing qos request
+ * @prcmu_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ *
+ * Will remove named qos request from prcmu_qos_class list of parameters and
+ * recompute the current target value for the prcmu_qos_class.
+ */
+void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&prcmu_qos_lock, flags);
+ list_for_each_entry(node,
+ &prcmu_qos_array[prcmu_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ kfree(node->name);
+ list_del(&node->list);
+ kfree(node);
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&prcmu_qos_lock, flags);
+ if (pending_update)
+ update_target(prcmu_qos_class);
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_remove_requirement);
+
+/**
+ * prcmu_qos_add_notifier - sets notification entry for changes to target value
+ * @prcmu_qos_class: identifies which qos target changes should be notified.
+ * @notifier: notifier block managed by caller.
+ *
+ * will register the notifier into a notification chain that gets called
+ * upon changes to the prcmu_qos_class target value.
+ */
+int prcmu_qos_add_notifier(int prcmu_qos_class, struct notifier_block *notifier)
+{
+ int retval = -EINVAL;
+
+ if (prcmu_qos_array[prcmu_qos_class]->notifiers)
+ retval = blocking_notifier_chain_register(
+ prcmu_qos_array[prcmu_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_add_notifier);
+
+/**
+ * prcmu_qos_remove_notifier - deletes notification entry from chain.
+ * @prcmu_qos_class: identifies which qos target changes are notified.
+ * @notifier: notifier block to be removed.
+ *
+ * will remove the notifier from the notification chain that gets called
+ * upon changes to the prcmu_qos_class target value.
+ */
+int prcmu_qos_remove_notifier(int prcmu_qos_class,
+ struct notifier_block *notifier)
+{
+ int retval = -EINVAL;
+ if (prcmu_qos_array[prcmu_qos_class]->notifiers)
+ retval = blocking_notifier_chain_unregister(
+ prcmu_qos_array[prcmu_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_remove_notifier);
+
+#define USER_QOS_NAME_LEN 32
+
+static int prcmu_qos_power_open(struct inode *inode, struct file *filp,
+ long prcmu_qos_class)
+{
+ int ret;
+ char name[USER_QOS_NAME_LEN];
+
+ filp->private_data = (void *)prcmu_qos_class;
+ snprintf(name, USER_QOS_NAME_LEN, "file_%08x", (unsigned int)filp);
+ ret = prcmu_qos_add_requirement(prcmu_qos_class, name,
+ PRCMU_QOS_DEFAULT_VALUE);
+ if (ret >= 0)
+ return 0;
+
+ return -EPERM;
+}
+
+
+static int prcmu_qos_ape_power_open(struct inode *inode, struct file *filp)
+{
+ return prcmu_qos_power_open(inode, filp, PRCMU_QOS_APE_OPP);
+}
+
+static int prcmu_qos_ddr_power_open(struct inode *inode, struct file *filp)
+{
+ return prcmu_qos_power_open(inode, filp, PRCMU_QOS_DDR_OPP);
+}
+
+static int prcmu_qos_power_release(struct inode *inode, struct file *filp)
+{
+ int prcmu_qos_class;
+ char name[USER_QOS_NAME_LEN];
+
+ prcmu_qos_class = (long)filp->private_data;
+ snprintf(name, USER_QOS_NAME_LEN, "file_%08x", (unsigned int)filp);
+ prcmu_qos_remove_requirement(prcmu_qos_class, name);
+
+ return 0;
+}
+
+static ssize_t prcmu_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ s32 value;
+ int prcmu_qos_class;
+ char name[USER_QOS_NAME_LEN];
+
+ prcmu_qos_class = (long)filp->private_data;
+ if (count != sizeof(s32))
+ return -EINVAL;
+ if (copy_from_user(&value, buf, sizeof(s32)))
+ return -EFAULT;
+ snprintf(name, USER_QOS_NAME_LEN, "file_%08x", (unsigned int)filp);
+ prcmu_qos_update_requirement(prcmu_qos_class, name, value);
+
+ return sizeof(s32);
+}
+
+/* Functions to provide QoS to user space */
+static const struct file_operations prcmu_qos_ape_power_fops = {
+ .write = prcmu_qos_power_write,
+ .open = prcmu_qos_ape_power_open,
+ .release = prcmu_qos_power_release,
+};
+
+/* Functions to provide QoS to user space */
+static const struct file_operations prcmu_qos_ddr_power_fops = {
+ .write = prcmu_qos_power_write,
+ .open = prcmu_qos_ddr_power_open,
+ .release = prcmu_qos_power_release,
+};
+
+static int register_prcmu_qos_misc(struct prcmu_qos_object *qos,
+ const struct file_operations *fops)
+{
+ qos->prcmu_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
+ qos->prcmu_qos_power_miscdev.name = qos->name;
+ qos->prcmu_qos_power_miscdev.fops = fops;
+
+ return misc_register(&qos->prcmu_qos_power_miscdev);
+}
+
+static void qos_delayed_work_up_fn(struct work_struct *work)
+{
+ prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "cpufreq", 100);
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "cpufreq", 100);
+ cpufreq_requirement_set = 100;
+}
+
+static void qos_delayed_work_down_fn(struct work_struct *work)
+{
+ prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ cpufreq_requirement_set = PRCMU_QOS_DEFAULT_VALUE;
+}
+
+static DECLARE_DELAYED_WORK(qos_delayed_work_up, qos_delayed_work_up_fn);
+static DECLARE_DELAYED_WORK(qos_delayed_work_down, qos_delayed_work_down_fn);
+
+static int qos_delayed_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ s32 new_ddr_target;
+
+ /* Only react once per transition and only for one core, e.g. core 0 */
+ if (event != CPUFREQ_POSTCHANGE || freq->cpu != 0)
+ return 0;
+
+ /*
+ * APE and DDR OPP are always handled together in this solution.
+ * Hence no need to check both DDR and APE opp in the code below.
+ */
+
+ /* Which DDR OPP are we aiming for? */
+ if (freq->new > ARM_THRESHOLD_FREQ)
+ new_ddr_target = 100;
+ else
+ new_ddr_target = PRCMU_QOS_DEFAULT_VALUE;
+
+ if (new_ddr_target == cpufreq_requirement_queued) {
+ /*
+ * We're already at, or going to, the target requirement.
+ * This is only a fluctuation within the interval
+ * corresponding to the same DDR requirement.
+ */
+ return 0;
+ }
+ cpufreq_requirement_queued = new_ddr_target;
+
+ if (freq->new > ARM_THRESHOLD_FREQ) {
+ cancel_delayed_work_sync(&qos_delayed_work_down);
+ /*
+ * Only schedule this requirement if it is not the current
+ * one.
+ */
+ if (new_ddr_target != cpufreq_requirement_set)
+ schedule_delayed_work(&qos_delayed_work_up,
+ cpufreq_opp_delay);
+ } else {
+ cancel_delayed_work_sync(&qos_delayed_work_up);
+ /*
+ * Only schedule this requirement if it is not the current
+ * one.
+ */
+ if (new_ddr_target != cpufreq_requirement_set)
+ schedule_delayed_work(&qos_delayed_work_down,
+ cpufreq_opp_delay);
+ }
+
+ return 0;
+}
+
+static int __init prcmu_qos_power_init(void)
+{
+ int ret;
+
+ /* 25% DDR OPP is not supported on u5500 */
+ if (cpu_is_u5500())
+ ddr_opp_qos.default_value = 50;
+
+ ret = register_prcmu_qos_misc(&ape_opp_qos, &prcmu_qos_ape_power_fops);
+ if (ret < 0) {
+ pr_err("prcmu ape qos: setup failed\n");
+ return ret;
+ }
+
+ ret = register_prcmu_qos_misc(&ddr_opp_qos, &prcmu_qos_ddr_power_fops);
+ if (ret < 0) {
+ pr_err("prcmu ddr qos: setup failed\n");
+ return ret;
+ }
+
+ prcmu_qos_add_requirement(PRCMU_QOS_DDR_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ cpufreq_requirement_set = PRCMU_QOS_DEFAULT_VALUE;
+ cpufreq_requirement_queued = PRCMU_QOS_DEFAULT_VALUE;
+
+ cpufreq_register_notifier(&qos_delayed_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return ret;
+}
+
+late_initcall(prcmu_qos_power_init);
diff --git a/arch/arm/mach-ux500/pm/runtime.c b/arch/arm/mach-ux500/pm/runtime.c
new file mode 100644
index 00000000000..f896f6803f3
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/runtime.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson> for ST-Ericsson
+ *
+ * Based on:
+ * Runtime PM support code for SuperH Mobile ARM
+ * Copyright (C) 2009-2010 Magnus Damm
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/regulator/dbx500-prcmu.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <plat/pincfg.h>
+
+#include "../pins.h"
+
+#ifdef CONFIG_PM_RUNTIME
+#define BIT_ONCE 0
+#define BIT_ACTIVE 1
+#define BIT_ENABLED 2
+
+struct pm_runtime_data {
+ unsigned long flags;
+ struct ux500_regulator *regulator;
+ struct ux500_pins *pins;
+};
+
+static void __devres_release(struct device *dev, void *res)
+{
+ struct pm_runtime_data *prd = res;
+
+ dev_dbg(dev, "__devres_release()\n");
+
+ if (test_bit(BIT_ENABLED, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_disable(prd->pins);
+ if (prd->regulator)
+ ux500_regulator_atomic_disable(prd->regulator);
+ }
+
+ if (test_bit(BIT_ACTIVE, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_put(prd->pins);
+ if (prd->regulator)
+ ux500_regulator_put(prd->regulator);
+ }
+}
+
+static struct pm_runtime_data *__to_prd(struct device *dev)
+{
+ return devres_find(dev, __devres_release, NULL, NULL);
+}
+
+static void platform_pm_runtime_init(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ prd->pins = ux500_pins_get(dev_name(dev));
+
+ prd->regulator = ux500_regulator_get(dev);
+ if (IS_ERR(prd->regulator))
+ prd->regulator = NULL;
+
+ if (prd->pins || prd->regulator) {
+ dev_info(dev, "managed by runtime pm: %s%s\n",
+ prd->pins ? "pins " : "",
+ prd->regulator ? "regulator " : "");
+
+ set_bit(BIT_ACTIVE, &prd->flags);
+ }
+}
+
+static void platform_pm_runtime_bug(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
+ dev_err(dev, "runtime pm suspend before resume\n");
+}
+
+static void platform_pm_runtime_used(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ if (prd)
+ set_bit(BIT_ONCE, &prd->flags);
+}
+
+static int ux500_pd_runtime_idle(struct device *dev)
+{
+ return pm_runtime_suspend(dev);
+}
+
+static int ux500_pd_runtime_suspend(struct device *dev)
+{
+ int ret;
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ platform_pm_runtime_bug(dev, prd);
+
+ ret = pm_generic_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+
+ if (prd->pins)
+ ux500_pins_disable(prd->pins);
+
+ if (prd->regulator)
+ ux500_regulator_atomic_disable(prd->regulator);
+
+ clear_bit(BIT_ENABLED, &prd->flags);
+ }
+
+ return 0;
+}
+
+static int ux500_pd_runtime_resume(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ platform_pm_runtime_used(dev, prd);
+
+ if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_enable(prd->pins);
+
+ if (prd->regulator)
+ ux500_regulator_atomic_enable(prd->regulator);
+
+ set_bit(BIT_ENABLED, &prd->flags);
+
+ }
+ return pm_generic_runtime_resume(dev);
+}
+
+static int ux500_pd_suspend_noirq(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /* Only handle devices that use runtime pm */
+ if (!prd || !test_bit(BIT_ONCE, &prd->flags))
+ return 0;
+
+ /* Already is runtime suspended? Nothing to do. */
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ /*
+ * We get here only if the device was not runtime suspended for some
+ * reason. We still need to do the power save stuff when going into
+ * suspend, so force it here.
+ */
+ return ux500_pd_runtime_suspend(dev);
+}
+
+static int ux500_pd_resume_noirq(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /* Only handle devices that use runtime pm */
+ if (!prd || !test_bit(BIT_ONCE, &prd->flags))
+ return 0;
+
+ /*
+ * Already was runtime suspended? No need to resume here, runtime
+ * resume will take care of it.
+ */
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ /*
+ * We get here only if the device was not runtime suspended,
+ * but we forced it down in suspend_noirq above. Bring it
+ * up since pm-runtime thinks it is not suspended.
+ */
+ return ux500_pd_runtime_resume(dev);
+}
+
+static int ux500_pd_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct pm_runtime_data *prd;
+
+ dev_dbg(dev, "%s() %ld !\n", __func__, action);
+
+ if (action == BUS_NOTIFY_BIND_DRIVER) {
+ prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
+ if (prd) {
+ devres_add(dev, prd);
+ platform_pm_runtime_init(dev, prd);
+ } else
+ dev_err(dev, "unable to alloc memory for runtime pm\n");
+ }
+
+ return 0;
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
+#define ux500_pd_suspend_noirq NULL
+#define ux500_pd_resume_noirq NULL
+#define ux500_pd_runtime_idle NULL
+#define ux500_pd_runtime_suspend NULL
+#define ux500_pd_runtime_resume NULL
+
+static int ux500_pd_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ux500_regulator *regulator = NULL;
+ struct ux500_pins *pins = NULL;
+ struct device *dev = data;
+ const char *onoff = NULL;
+
+ dev_dbg(dev, "%s() %ld !\n", __func__, action);
+
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ pins = ux500_pins_get(dev_name(dev));
+ if (pins) {
+ ux500_pins_enable(pins);
+ ux500_pins_put(pins);
+ }
+
+ regulator = ux500_regulator_get(dev);
+ if (IS_ERR(regulator))
+ regulator = NULL;
+ else {
+ ux500_regulator_atomic_enable(regulator);
+ ux500_regulator_put(regulator);
+ }
+
+ onoff = "on";
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ pins = ux500_pins_get(dev_name(dev));
+ if (pins) {
+ ux500_pins_disable(pins);
+ ux500_pins_put(pins);
+ }
+
+ regulator = ux500_regulator_get(dev);
+ if (IS_ERR(regulator))
+ regulator = NULL;
+ else {
+ ux500_regulator_atomic_disable(regulator);
+ ux500_regulator_put(regulator);
+ }
+
+ onoff = "off";
+ break;
+ }
+
+ if (pins || regulator) {
+ dev_info(dev, "runtime pm disabled, forced %s: %s%s\n",
+ onoff,
+ pins ? "pins " : "",
+ regulator ? "regulator " : "");
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+struct dev_pm_domain ux500_amba_dev_power_domain = {
+ .ops = {
+ /* USE_AMBA_PM_SLEEP_OPS minus the two we replace */
+ .prepare = amba_pm_prepare,
+ .complete = amba_pm_complete,
+ .suspend = amba_pm_suspend,
+ .resume = amba_pm_resume,
+ .freeze = amba_pm_freeze,
+ .thaw = amba_pm_thaw,
+ .poweroff = amba_pm_poweroff,
+ .restore = amba_pm_restore,
+ .freeze_noirq = amba_pm_freeze_noirq,
+ .thaw_noirq = amba_pm_thaw_noirq,
+ .poweroff_noirq = amba_pm_poweroff_noirq,
+ .restore_noirq = amba_pm_restore_noirq,
+
+ .suspend_noirq = ux500_pd_suspend_noirq,
+ .resume_noirq = ux500_pd_resume_noirq,
+ .runtime_idle = ux500_pd_runtime_idle,
+ .runtime_suspend = ux500_pd_runtime_suspend,
+ .runtime_resume = ux500_pd_runtime_resume,
+ },
+};
+
+struct dev_pm_domain ux500_dev_power_domain = {
+ .ops = {
+ /* USE_PLATFORM_PM_SLEEP_OPS minus the two we replace */
+ .prepare = platform_pm_prepare,
+ .complete = platform_pm_complete,
+ .suspend = platform_pm_suspend,
+ .resume = platform_pm_resume,
+ .freeze = platform_pm_freeze,
+ .thaw = platform_pm_thaw,
+ .poweroff = platform_pm_poweroff,
+ .restore = platform_pm_restore,
+ .freeze_noirq = platform_pm_freeze_noirq,
+ .thaw_noirq = platform_pm_thaw_noirq,
+ .poweroff_noirq = platform_pm_poweroff_noirq,
+ .restore_noirq = platform_pm_restore_noirq,
+
+ .suspend_noirq = ux500_pd_suspend_noirq,
+ .resume_noirq = ux500_pd_resume_noirq,
+ .runtime_idle = ux500_pd_runtime_idle,
+ .runtime_suspend = ux500_pd_runtime_suspend,
+ .runtime_resume = ux500_pd_runtime_resume,
+ },
+};
+
+static struct notifier_block ux500_pd_platform_notifier = {
+ .notifier_call = ux500_pd_bus_notify,
+};
+
+static struct notifier_block ux500_pd_amba_notifier = {
+ .notifier_call = ux500_pd_bus_notify,
+};
+
+static int __init ux500_pm_runtime_platform_init(void)
+{
+ bus_register_notifier(&platform_bus_type, &ux500_pd_platform_notifier);
+ return 0;
+}
+core_initcall(ux500_pm_runtime_platform_init);
+
+/*
+ * The amba bus itself gets registered in a core_initcall, so we can't use
+ * that.
+ */
+static int __init ux500_pm_runtime_amba_init(void)
+{
+ bus_register_notifier(&amba_bustype, &ux500_pd_amba_notifier);
+ return 0;
+}
+arch_initcall(ux500_pm_runtime_amba_init);
diff --git a/arch/arm/mach-ux500/pm/scu.h b/arch/arm/mach-ux500/pm/scu.h
new file mode 100644
index 00000000000..a09e86a9d3c
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/scu.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson SA
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASMARM_ARCH_SCU_H
+#define __ASMARM_ARCH_SCU_H
+
+#include <mach/hardware.h>
+
+#define SCU_BASE U8500_SCU_BASE
+/*
+ * * SCU registers
+ * */
+#define SCU_CTRL 0x00
+#define SCU_CONFIG 0x04
+#define SCU_CPU_STATUS 0x08
+#define SCU_INVALIDATE 0x0c
+#define SCU_FPGA_REVISION 0x10
+
+#endif
diff --git a/arch/arm/mach-ux500/pm/suspend.c b/arch/arm/mach-ux500/pm/suspend.c
new file mode 100644
index 00000000000..d64faa99a3a
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/suspend.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Authors: Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com>,
+ * Sundar Iyer for ST-Ericsson.
+ */
+
+#include <linux/suspend.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/regulator/ab8500-debug.h>
+#include <linux/regulator/dbx500-prcmu.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include <mach/context.h>
+#include <mach/pm.h>
+
+#include "suspend_dbg.h"
+
+static void (*pins_suspend_force)(void);
+static void (*pins_suspend_force_mux)(void);
+
+void suspend_set_pins_force_fn(void (*force)(void), void (*force_mux)(void))
+{
+ pins_suspend_force = force;
+ pins_suspend_force_mux = force_mux;
+}
+
+static atomic_t block_sleep = ATOMIC_INIT(0);
+
+void suspend_block_sleep(void)
+{
+ atomic_inc(&block_sleep);
+}
+
+void suspend_unblock_sleep(void)
+{
+ atomic_dec(&block_sleep);
+}
+
+static bool sleep_is_blocked(void)
+{
+ return (atomic_read(&block_sleep) != 0);
+}
+
+static int suspend(bool do_deepsleep)
+{
+ bool pins_force = pins_suspend_force_mux && pins_suspend_force;
+ int ret = 0;
+
+ if (sleep_is_blocked()) {
+ pr_info("suspend/resume: interrupted by modem.\n");
+ return -EBUSY;
+ }
+
+ nmk_gpio_clocks_enable();
+
+ ux500_suspend_dbg_add_wake_on_uart();
+
+ nmk_gpio_wakeups_suspend();
+
+ /* configure the prcm for a sleep wakeup */
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ABB));
+
+ context_vape_save();
+
+ if (pins_force) {
+ /*
+ * Save GPIO settings before applying power save
+ * settings
+ */
+ context_gpio_save();
+
+ /* Apply GPIO power save mux settings */
+ context_gpio_mux_safe_switch(true);
+ pins_suspend_force_mux();
+ context_gpio_mux_safe_switch(false);
+
+ /* Apply GPIO power save settings */
+ pins_suspend_force();
+ }
+
+ ux500_pm_gic_decouple();
+
+ if (ux500_pm_gic_pending_interrupt()) {
+ pr_info("suspend/resume: pending interrupt\n");
+
+ /* Recouple GIC with the interrupt bus */
+ ux500_pm_gic_recouple();
+ ret = -EBUSY;
+
+ goto exit;
+ }
+ ux500_pm_prcmu_set_ioforce(true);
+
+ if (do_deepsleep) {
+ context_varm_save_common();
+ context_varm_save_core();
+ context_gic_dist_disable_unneeded_irqs();
+ context_save_cpu_registers();
+
+ /*
+ * Due to we have only 100us between requesting a powerstate
+ * and wfi, we clean the cache before as well to assure the
+ * final cache clean before wfi has as little as possible to
+ * do.
+ */
+ context_clean_l1_cache_all();
+
+ (void) prcmu_set_power_state(PRCMU_AP_DEEP_SLEEP,
+ false, false);
+ context_save_to_sram_and_wfi(true);
+
+ context_restore_cpu_registers();
+ context_varm_restore_core();
+ context_varm_restore_common();
+
+ } else {
+
+ context_clean_l1_cache_all();
+ (void) prcmu_set_power_state(APEXECUTE_TO_APSLEEP,
+ false, false);
+ dsb();
+ __asm__ __volatile__("wfi\n\t" : : : "memory");
+ }
+
+ context_vape_restore();
+
+ /* If GPIO woke us up then save the pins that caused the wake up */
+ ux500_pm_gpio_save_wake_up_status();
+
+ ux500_suspend_dbg_sleep_status(do_deepsleep);
+
+ /* APE was turned off, restore IO ring */
+ ux500_pm_prcmu_set_ioforce(false);
+
+exit:
+ if (pins_force) {
+ /* Restore gpio settings */
+ context_gpio_mux_safe_switch(true);
+ context_gpio_restore_mux();
+ context_gpio_mux_safe_switch(false);
+ context_gpio_restore();
+ }
+
+ /* This is what cpuidle wants */
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
+ PRCMU_WAKEUP(ABB));
+
+ nmk_gpio_wakeups_resume();
+
+ ux500_suspend_dbg_remove_wake_on_uart();
+
+ nmk_gpio_clocks_disable();
+
+ return ret;
+}
+
+static int ux500_suspend_enter(suspend_state_t state)
+{
+ if (ux500_suspend_enabled()) {
+ if (ux500_suspend_deepsleep_enabled() &&
+ state == PM_SUSPEND_MEM)
+ return suspend(true);
+ if (ux500_suspend_sleep_enabled())
+ return suspend(false);
+ }
+
+ ux500_suspend_dbg_add_wake_on_uart();
+ /*
+ * Set IOFORCE in order to wake on GPIO the same way
+ * as in deeper sleep.
+ * (U5500 is not ready for IOFORCE)
+ */
+ if (!cpu_is_u5500())
+ ux500_pm_prcmu_set_ioforce(true);
+
+ dsb();
+ __asm__ __volatile__("wfi\n\t" : : : "memory");
+
+ if (!cpu_is_u5500())
+ ux500_pm_prcmu_set_ioforce(false);
+ ux500_suspend_dbg_remove_wake_on_uart();
+
+ return 0;
+}
+
+static int ux500_suspend_valid(suspend_state_t state)
+{
+ return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
+}
+
+static int ux500_suspend_prepare_late(void)
+{
+ /* ESRAM to retention instead of OFF until ROM is fixed */
+ (void) prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
+ ab8500_regulator_debug_force();
+ ux500_regulator_suspend_debug();
+ return 0;
+}
+
+static void ux500_suspend_wake(void)
+{
+ ux500_regulator_resume_debug();
+ ab8500_regulator_debug_restore();
+ (void) prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
+}
+
+static int ux500_suspend_begin(suspend_state_t state)
+{
+ (void) prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "suspend", 100);
+ return ux500_suspend_dbg_begin(state);
+}
+
+static void ux500_suspend_end(void)
+{
+ (void) prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "suspend", 25);
+}
+
+static struct platform_suspend_ops ux500_suspend_ops = {
+ .enter = ux500_suspend_enter,
+ .valid = ux500_suspend_valid,
+ .prepare_late = ux500_suspend_prepare_late,
+ .wake = ux500_suspend_wake,
+ .begin = ux500_suspend_begin,
+ .end = ux500_suspend_end,
+};
+
+static __init int ux500_suspend_init(void)
+{
+ ux500_suspend_dbg_init();
+ prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "suspend", 25);
+ suspend_set_ops(&ux500_suspend_ops);
+ return 0;
+}
+device_initcall(ux500_suspend_init);
diff --git a/arch/arm/mach-ux500/pm/suspend_dbg.c b/arch/arm/mach-ux500/pm/suspend_dbg.c
new file mode 100644
index 00000000000..cd058bad91e
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/suspend_dbg.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/suspend.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include <mach/pm.h>
+
+#ifdef CONFIG_UX500_SUSPEND_STANDBY
+static u32 sleep_enabled = 1;
+#else
+static u32 sleep_enabled;
+#endif
+
+#ifdef CONFIG_UX500_SUSPEND_MEM
+static u32 deepsleep_enabled = 1;
+#else
+static u32 deepsleep_enabled;
+#endif
+
+static u32 suspend_enabled = 1;
+
+static u32 deepsleeps_done;
+static u32 deepsleeps_failed;
+static u32 sleeps_done;
+static u32 sleeps_failed;
+static u32 suspend_count;
+
+#ifdef CONFIG_UX500_SUSPEND_DBG_WAKE_ON_UART
+void ux500_suspend_dbg_add_wake_on_uart(void)
+{
+ irq_set_irq_wake(GPIO_TO_IRQ(CONFIG_UX500_CONSOLE_UART_GPIO_PIN), 1);
+ irq_set_irq_type(GPIO_TO_IRQ(CONFIG_UX500_CONSOLE_UART_GPIO_PIN),
+ IRQ_TYPE_EDGE_BOTH);
+}
+
+void ux500_suspend_dbg_remove_wake_on_uart(void)
+{
+ irq_set_irq_wake(GPIO_TO_IRQ(CONFIG_UX500_CONSOLE_UART_GPIO_PIN), 0);
+}
+#endif
+
+bool ux500_suspend_enabled(void)
+{
+ return suspend_enabled != 0;
+}
+
+bool ux500_suspend_sleep_enabled(void)
+{
+ return sleep_enabled != 0;
+}
+
+bool ux500_suspend_deepsleep_enabled(void)
+{
+ return deepsleep_enabled != 0;
+}
+
+void ux500_suspend_dbg_sleep_status(bool is_deepsleep)
+{
+ enum prcmu_idle_stat prcmu_status;
+
+ prcmu_status = ux500_pm_prcmu_idle_stat();
+
+ if (is_deepsleep) {
+ pr_info("Returning from ApDeepSleep. PRCMU ret: 0x%x - %s\n",
+ prcmu_status,
+ prcmu_status == DEEP_SLEEP_OK ? "Success" : "Fail!");
+ if (prcmu_status == DEEP_SLEEP_OK)
+ deepsleeps_done++;
+ else
+ deepsleeps_failed++;
+ } else {
+ pr_info("Returning from ApSleep. PRCMU ret: 0x%x - %s\n",
+ prcmu_status,
+ prcmu_status == SLEEP_OK ? "Success" : "Fail!");
+ if (prcmu_status == SLEEP_OK)
+ sleeps_done++;
+ else
+ sleeps_failed++;
+ }
+}
+
+int ux500_suspend_dbg_begin(suspend_state_t state)
+{
+ suspend_count++;
+ return 0;
+}
+
+void ux500_suspend_dbg_init(void)
+{
+ struct dentry *suspend_dir;
+ struct dentry *file;
+
+ suspend_dir = debugfs_create_dir("suspend", NULL);
+ if (IS_ERR_OR_NULL(suspend_dir))
+ return;
+
+ file = debugfs_create_bool("sleep", S_IWUGO | S_IRUGO,
+ suspend_dir,
+ &sleep_enabled);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_bool("deepsleep", S_IWUGO | S_IRUGO,
+ suspend_dir,
+ &deepsleep_enabled);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_bool("enable", S_IWUGO | S_IRUGO,
+ suspend_dir,
+ &suspend_enabled);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_u32("count", S_IRUGO,
+ suspend_dir,
+ &suspend_count);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_u32("sleep_count", S_IRUGO,
+ suspend_dir,
+ &sleeps_done);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_u32("deepsleep_count", S_IRUGO,
+ suspend_dir,
+ &deepsleeps_done);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+
+ file = debugfs_create_u32("sleep_failed", S_IRUGO,
+ suspend_dir,
+ &sleeps_failed);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_u32("deepsleep_failed", S_IRUGO,
+ suspend_dir,
+ &deepsleeps_failed);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ return;
+error:
+ if (!IS_ERR_OR_NULL(suspend_dir))
+ debugfs_remove_recursive(suspend_dir);
+}
diff --git a/arch/arm/mach-ux500/pm/suspend_dbg.h b/arch/arm/mach-ux500/pm/suspend_dbg.h
new file mode 100644
index 00000000000..29bfec7e269
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/suspend_dbg.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ */
+
+#ifndef UX500_SUSPEND_DBG_H
+#define UX500_SUSPEND_DBG_H
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+
+#ifdef CONFIG_UX500_SUSPEND_DBG_WAKE_ON_UART
+void ux500_suspend_dbg_add_wake_on_uart(void);
+void ux500_suspend_dbg_remove_wake_on_uart(void);
+#else
+static inline void ux500_suspend_dbg_add_wake_on_uart(void) { }
+static inline void ux500_suspend_dbg_remove_wake_on_uart(void) { }
+#endif
+
+#ifdef CONFIG_UX500_SUSPEND_DBG
+bool ux500_suspend_enabled(void);
+bool ux500_suspend_sleep_enabled(void);
+bool ux500_suspend_deepsleep_enabled(void);
+void ux500_suspend_dbg_sleep_status(bool is_deepsleep);
+void ux500_suspend_dbg_init(void);
+int ux500_suspend_dbg_begin(suspend_state_t state);
+
+#else
+static inline bool ux500_suspend_enabled(void)
+{
+ return true;
+}
+static inline bool ux500_suspend_sleep_enabled(void)
+{
+#ifdef CONFIG_UX500_SUSPEND_STANDBY
+ return true;
+#else
+ return false;
+#endif
+}
+static inline bool ux500_suspend_deepsleep_enabled(void)
+{
+#ifdef CONFIG_UX500_SUSPEND_MEM
+ return true;
+#else
+ return false;
+#endif
+}
+static inline void ux500_suspend_dbg_sleep_status(bool is_deepsleep) { }
+static inline void ux500_suspend_dbg_init(void) { }
+
+static inline int ux500_suspend_dbg_begin(suspend_state_t state)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-ux500/pm/timer.c b/arch/arm/mach-ux500/pm/timer.c
new file mode 100644
index 00000000000..79652a6af23
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/timer.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * The RTC timer block is a ST Microelectronics variant of ARM PL031.
+ * Clockwatch part is the same as PL031, while the timer part is only
+ * present on the ST Microelectronics variant.
+ * Here only the timer part is used.
+ *
+ * The timer part is quite troublesome to program correctly. Lots
+ * of long delays must be there in order to secure that you actually get what
+ * you wrote.
+ *
+ * In other words, this timer is and should only used from cpuidle during
+ * special conditions when the surroundings are know in order to be able
+ * to remove the number of delays.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+
+#include <asm/errno.h>
+
+#include <mach/hardware.h>
+
+#define RTC_IMSC 0x10
+#define RTC_MIS 0x18
+#define RTC_ICR 0x1C
+#define RTC_TDR 0x20
+#define RTC_TLR1 0x24
+#define RTC_TCR 0x28
+
+#define RTC_TLR2 0x2C
+#define RTC_TPR1 0x3C
+
+#define RTC_TCR_RTTOS (1 << 0)
+#define RTC_TCR_RTTEN (1 << 1)
+#define RTC_TCR_RTTSS (1 << 2)
+
+#define RTC_IMSC_TIMSC (1 << 1)
+#define RTC_ICR_TIC (1 << 1)
+#define RTC_MIS_RTCTMIS (1 << 1)
+
+#define RTC_TCR_RTTPS_2 (1 << 4)
+#define RTC_TCR_RTTPS_3 (2 << 4)
+#define RTC_TCR_RTTPS_4 (3 << 4)
+#define RTC_TCR_RTTPS_5 (4 << 4)
+#define RTC_TCR_RTTPS_6 (5 << 4)
+#define RTC_TCR_RTTPS_7 (6 << 4)
+#define RTC_TCR_RTTPS_8 (7 << 4)
+
+#define WRITE_DELAY 130 /* 4 cycles plus margin */
+
+/*
+ * Count down measure point. It just have to be high to differ
+ * from scheduled values.
+ */
+#define MEASURE_VAL 0xffffffff
+
+/* Just a value bigger than any reason able scheduled timeout. */
+#define MEASURE_VAL_LIMIT 0xf0000000
+
+
+#define TICKS_TO_NS(x) ((s64)x * 30512)
+#define US_TO_TICKS(x) ((u32)((1000 * x) / 30512))
+
+static void __iomem *rtc_base;
+static bool measure_latency;
+
+#ifdef CONFIG_U8500_CPUIDLE_DEBUG
+
+/*
+ * The plan here is to be able to measure the ApSleep/ApDeepSleep exit latency
+ * by having a know timer pattern.
+ * The first entry in the pattern, LR1, is the value that the scheduler
+ * wants us to sleep. The second pattern in a high value, too large to be
+ * scheduled, so we can differ between a running scheduled value and a
+ * time measure value.
+ * When a RTT interrupt has occured, the block will automatically start
+ * to execute the measure value in LR2 and when the ARM is awake, it reads
+ * how far the RTT has decreased the value loaded from LR2 and from that
+ * calculate how long time it took to wake up.
+ */
+ktime_t u8500_rtc_exit_latency_get(void)
+{
+ u32 ticks;
+
+ if (measure_latency) {
+ ticks = MEASURE_VAL - readl(rtc_base + RTC_TDR);
+
+ /*
+ * Check if we are actually counting on a LR2 value.
+ * If not we have woken on another interrupt.
+ */
+ if (ticks < MEASURE_VAL_LIMIT) {
+ /* convert 32 kHz ticks to ns */
+ return ktime_set(0, TICKS_TO_NS(ticks));
+ }
+ }
+ return ktime_set(0, 0);
+}
+
+static void measure_latency_start(void)
+{
+ udelay(WRITE_DELAY);
+ /*
+ * Disable RTT and clean self-start due to we want to restart,
+ * not continue from current pattern. (See below)
+ */
+ writel(0, rtc_base + RTC_TCR);
+ udelay(WRITE_DELAY);
+
+ /*
+ * Program LR2 (load register two) to maximum value to ease
+ * identification of timer interrupt vs other.
+ */
+ writel(MEASURE_VAL, rtc_base + RTC_TLR2);
+ /*
+ * Set Load Register execution pattern, bit clear
+ * means pick LR1, bit set means LR2
+ * 0xfe, binary 11111110 means first do LR1 then do
+ * LR2 seven times
+ */
+ writel(0xfe, rtc_base + RTC_TPR1);
+
+ udelay(WRITE_DELAY);
+
+ /*
+ * Enable self-start, plus a pattern of eight.
+ */
+ writel(RTC_TCR_RTTSS | RTC_TCR_RTTPS_8,
+ rtc_base + RTC_TCR);
+ udelay(WRITE_DELAY);
+}
+
+void ux500_rtcrtt_measure_latency(bool enable)
+{
+ if (enable) {
+ measure_latency_start();
+ } else {
+ writel(RTC_TCR_RTTSS | RTC_TCR_RTTOS, rtc_base + RTC_TCR);
+ writel(RTC_ICR_TIC, rtc_base + RTC_ICR);
+ writel(RTC_IMSC_TIMSC, rtc_base + RTC_IMSC);
+ }
+ measure_latency = enable;
+}
+#else
+static inline void measure_latency_start(void) { }
+static inline void ux500_rtcrtt_measure_latency(bool enable) { }
+#endif
+
+void ux500_rtcrtt_off(void)
+{
+ if (measure_latency) {
+ measure_latency_start();
+ } else {
+ /* Clear eventual interrupts */
+ if (readl(rtc_base + RTC_MIS) & RTC_MIS_RTCTMIS)
+ writel(RTC_ICR_TIC, rtc_base + RTC_ICR);
+
+ /* Disable, self start and oneshot mode */
+ writel(RTC_TCR_RTTSS | RTC_TCR_RTTOS, rtc_base + RTC_TCR);
+ }
+}
+
+void ux500_rtcrtt_next(u32 time_us)
+{
+ writel(US_TO_TICKS(time_us), rtc_base + RTC_TLR1);
+}
+
+static int __init ux500_rtcrtt_init(void)
+{
+ if (cpu_is_u8500()) {
+ rtc_base = __io_address(U8500_RTC_BASE);
+ } else if (cpu_is_u5500()) {
+ rtc_base = __io_address(U5500_RTC_BASE);
+ } else {
+ pr_err("timer-rtt: Unknown DB Asic!\n");
+ return -EINVAL;
+ }
+ ux500_rtcrtt_measure_latency(false);
+ return 0;
+}
+subsys_initcall(ux500_rtcrtt_init);
diff --git a/arch/arm/mach-ux500/pm/usecase_gov.c b/arch/arm/mach-ux500/pm/usecase_gov.c
new file mode 100644
index 00000000000..8ca064da0a1
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/usecase_gov.c
@@ -0,0 +1,943 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@stericsson.com> for ST-Ericsson
+ * Author: Vincent Guittot <vincent.guittot@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/io.h>
+#include <linux/earlysuspend.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/kernel_stat.h>
+#include <linux/ktime.h>
+#include <linux/cpufreq.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include "../../../../drivers/cpuidle/cpuidle-dbx500.h"
+
+
+#define CPULOAD_MEAS_DELAY 3000 /* 3 secondes of delta */
+
+/* debug */
+static unsigned long debug;
+
+#define hp_printk \
+ if (debug) \
+ printk \
+
+enum ux500_uc {
+ UX500_UC_NORMAL = 0,
+ UX500_UC_AUTO, /* Add use case below this. */
+ UX500_UC_VC,
+ UX500_UC_LPA,
+ UX500_UC_USER, /* Add use case above this. */
+ UX500_UC_MAX,
+};
+
+/* cpu load monitor struct */
+#define LOAD_MONITOR 4
+struct hotplug_cpu_info {
+ cputime64_t prev_cpu_wall;
+ cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_io;
+ unsigned int load[LOAD_MONITOR];
+ unsigned int io[LOAD_MONITOR];
+ unsigned int idx;
+};
+
+static DEFINE_PER_CPU(struct hotplug_cpu_info, hotplug_info);
+
+/* Auto trigger criteria */
+/* loadavg threshold */
+static unsigned long lower_threshold = 175;
+static unsigned long upper_threshold = 450;
+/* load balancing */
+static unsigned long max_unbalance = 210;
+/* trend load */
+static unsigned long trend_unbalance = 40;
+static unsigned long min_trend = 5;
+/* instant load */
+static unsigned long max_instant = 85;
+
+/* Number of interrupts per second before exiting auto mode */
+static u32 exit_irq_per_s = 1000;
+static u64 old_num_irqs;
+
+static DEFINE_MUTEX(usecase_mutex);
+static bool user_config_updated;
+static enum ux500_uc current_uc = UX500_UC_MAX;
+static bool is_work_scheduled;
+static bool is_early_suspend;
+static bool uc_master_enable = true;
+
+static unsigned int cpuidle_deepest_state;
+
+struct usecase_config {
+ char *name;
+ unsigned long max_freq;
+ unsigned long min_freq; /* if no requirement set 0 */
+ unsigned long cpuidle_multiplier;
+ bool second_cpu_online;
+ bool l2_prefetch_en;
+ bool enable;
+ unsigned int forced_state; /* Forced cpu idle state. */
+ bool vc_override; /* QOS override for voice-call. */
+};
+
+static struct usecase_config usecase_conf[UX500_UC_MAX] = {
+ [UX500_UC_NORMAL] = {
+ .name = "normal",
+ .max_freq = 1000000,
+ .min_freq = 200000,
+ .cpuidle_multiplier = 1024,
+ .second_cpu_online = true,
+ .l2_prefetch_en = true,
+ .enable = true,
+ .forced_state = 0,
+ .vc_override = false,
+ },
+ [UX500_UC_AUTO] = {
+ .name = "auto",
+ .max_freq = 400000,
+ .min_freq = 200000,
+ .cpuidle_multiplier = 0,
+ .second_cpu_online = false,
+ .l2_prefetch_en = true,
+ .enable = false,
+ .forced_state = 0,
+ .vc_override = false,
+ },
+ [UX500_UC_VC] = {
+ .name = "voice-call",
+ .max_freq = 400000,
+ .min_freq = 400000,
+ .cpuidle_multiplier = 0,
+ .second_cpu_online = false,
+ .l2_prefetch_en = false,
+ .enable = false,
+ .forced_state = 0,
+ .vc_override = true,
+ },
+ [UX500_UC_LPA] = {
+ .name = "low-power-audio",
+ .max_freq = 400000,
+ .min_freq = 400000,
+ .cpuidle_multiplier = 0,
+ .second_cpu_online = false,
+ .l2_prefetch_en = false,
+ .enable = false,
+ .forced_state = 0, /* Updated dynamically */
+ .vc_override = false,
+ },
+};
+
+/* daemon */
+static struct delayed_work work_usecase;
+static struct early_suspend usecase_early_suspend;
+
+/* calculate loadavg */
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+extern int cpufreq_update_freq(int cpu, unsigned int min, unsigned int max);
+extern int cpuidle_set_multiplier(unsigned int value);
+extern int cpuidle_force_state(unsigned int state);
+
+static unsigned long determine_loadavg(void)
+{
+ unsigned long avg = 0;
+ unsigned long avnrun[3];
+
+ get_avenrun(avnrun, FIXED_1 / 200, 0);
+ avg += (LOAD_INT(avnrun[0]) * 100) + (LOAD_FRAC(avnrun[0]) % 100);
+
+ return avg;
+}
+
+static unsigned long determine_cpu_load(void)
+{
+ int i;
+ unsigned long total_load = 0;
+
+ /* get cpu load of each cpu */
+ for_each_online_cpu(i) {
+ unsigned int load, iowait;
+ unsigned int idle_time, iowait_time, wall_time;
+ cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
+ struct hotplug_cpu_info *info;
+
+ info = &per_cpu(hotplug_info, i);
+
+ /* update both cur_idle_time and cur_wall_time */
+ cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);
+ cur_iowait_time = get_cpu_iowait_time_us(i, &cur_wall_time);
+
+ /* how much wall time has passed since last iteration? */
+ wall_time = (unsigned int) cputime64_sub(cur_wall_time,
+ info->prev_cpu_wall);
+ info->prev_cpu_wall = cur_wall_time;
+
+ /* how much idle time has passed since last iteration? */
+ idle_time = (unsigned int) cputime64_sub(cur_idle_time,
+ info->prev_cpu_idle);
+ info->prev_cpu_idle = cur_idle_time;
+
+ /* how much io wait time has passed since last iteration? */
+ iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
+ info->prev_cpu_io);
+ info->prev_cpu_io = cur_iowait_time;
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ continue;
+
+ /* load is the percentage of time not spent in idle */
+ load = 100 * (wall_time - idle_time) / wall_time;
+ info->load[info->idx] = load;
+ hp_printk("cpu %d load %u ", i, load);
+
+ /* iowait is the percentage of time not spent in io wait */
+ iowait = 100 * (iowait_time) / wall_time;
+ info->io[info->idx++] = load;
+ hp_printk("iowait %u\n", iowait);
+
+ if (info->idx >= LOAD_MONITOR)
+ info->idx = 0;
+
+ total_load += load;
+ }
+
+ return total_load;
+}
+
+static unsigned long determine_cpu_load_trend(void)
+{
+ int i, j, k;
+ unsigned long total_load = 0;
+
+ /* Get cpu load of each cpu */
+ for_each_online_cpu(i) {
+ unsigned int load = 0;
+ struct hotplug_cpu_info *info;
+
+ info = &per_cpu(hotplug_info, i);
+
+ for (k = 0, j = info->idx; k < LOAD_MONITOR; k++, j++)
+ load += info->load[j];
+
+ load /= LOAD_MONITOR;
+
+ hp_printk("cpu %d load trend %u\n", i, load);
+
+ total_load += load;
+ }
+
+ return total_load;
+}
+
+static unsigned long determine_cpu_balance_trend(void)
+{
+ int i, j, k;
+ unsigned long total_load = 0;
+ unsigned long min_load = (unsigned long) (-1);
+
+ /* Get cpu load of each cpu */
+ for_each_online_cpu(i) {
+ unsigned int load = 0;
+ struct hotplug_cpu_info *info;
+
+ info = &per_cpu(hotplug_info, i);
+
+ for (k = 0, j = info->idx; k < LOAD_MONITOR; k++, j++)
+ load += info->load[j];
+
+ load /= LOAD_MONITOR;
+
+ if (min_load > load)
+ min_load = load;
+ total_load += load;
+ }
+
+ if (min_load > min_trend)
+ total_load = (100 * total_load) / min_load;
+ else
+ total_load = 50 << num_online_cpus();
+
+ return total_load;
+}
+
+static void init_cpu_load_trend(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct hotplug_cpu_info *info;
+ int j;
+
+ info = &per_cpu(hotplug_info, i);
+
+ info->prev_cpu_idle = get_cpu_idle_time_us(i,
+ &(info->prev_cpu_wall));
+ info->prev_cpu_io = get_cpu_iowait_time_us(i,
+ &(info->prev_cpu_wall));
+
+ for (j = 0; j < LOAD_MONITOR; j++) {
+ info->load[j] = 100;
+ info->io[j] = 100;
+ }
+ info->idx = 0;
+ }
+}
+
+static u32 get_num_interrupts_per_s(void)
+{
+ int cpu;
+ int i;
+ u64 num_irqs = 0;
+ ktime_t now;
+ static ktime_t last;
+ unsigned int delta;
+ u32 irqs = 0;
+
+ now = ktime_get();
+
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < NR_IRQS; i++)
+ num_irqs += kstat_irqs_cpu(i, cpu);
+ }
+ pr_debug("%s: total num irqs: %lld, previous %lld\n",
+ __func__, num_irqs, old_num_irqs);
+
+ if (old_num_irqs > 0) {
+ delta = (u32)ktime_to_ms(ktime_sub(now, last)) / 1000;
+ irqs = ((u32)(num_irqs - old_num_irqs)) / delta;
+ }
+
+ old_num_irqs = num_irqs;
+ last = now;
+
+ pr_debug("delta irqs per sec:%d\n", irqs);
+
+ return irqs;
+}
+
+static void set_cpu_config(enum ux500_uc new_uc)
+{
+ struct cpufreq_policy policy;
+ int err;
+ bool update = false;
+ u32 min_freq, max_freq;
+
+ if (new_uc != current_uc)
+ update = true;
+ else if ((user_config_updated) && (new_uc == UX500_UC_USER))
+ update = true;
+
+ pr_debug("%s: new_usecase=%d, current_usecase=%d, update=%d\n",
+ __func__, new_uc, current_uc, update);
+
+ if (!update)
+ goto exit;
+
+ /* Cpu hotplug */
+ if (!(usecase_conf[new_uc].second_cpu_online) &&
+ (num_online_cpus() > 1))
+ cpu_down(1);
+ else if ((usecase_conf[new_uc].second_cpu_online) &&
+ (num_online_cpus() < 2))
+ cpu_up(1);
+
+ /* Cpu freq */
+ err = cpufreq_get_policy(&policy, 0);
+ if (err)
+ pr_err("usecase-gov: get cpufreq policy failed\n");
+
+ /* If requirement is 0, use current policy value */
+ min_freq = usecase_conf[new_uc].min_freq ?
+ usecase_conf[new_uc].min_freq : policy.min;
+
+ max_freq = usecase_conf[new_uc].max_freq ?
+ usecase_conf[new_uc].max_freq : policy.max;
+
+ /*
+ * cpufreq fw does not allow frequency change if
+ * "current min freq" > "new max freq" or
+ * "current max freq" < "new min freq".
+ * Thus the intermediate steps below.
+ */
+ if (policy.min > max_freq) {
+ err = cpufreq_update_freq(0, min_freq, policy.max);
+ if (err)
+ pr_err("usecase-gov: update min cpufreq failed\n");
+ }
+ if (policy.max < min_freq) {
+ err = cpufreq_update_freq(0, policy.min, max_freq);
+ if (err)
+ pr_err("usecase-gov: update max cpufreq failed\n");
+ }
+
+ err = cpufreq_update_freq(0, min_freq, max_freq);
+ if (err)
+ pr_err("usecase-gov: update min/max cpufreq failed\n");
+
+ /* Cpu idle */
+ cpuidle_set_multiplier(usecase_conf[new_uc].cpuidle_multiplier);
+
+ /* L2 prefetch */
+ if (usecase_conf[new_uc].l2_prefetch_en)
+ outer_prefetch_enable();
+ else
+ outer_prefetch_disable();
+
+ /* Force cpuidle state */
+ cpuidle_force_state(usecase_conf[new_uc].forced_state);
+
+ /* QOS override */
+ prcmu_qos_voice_call_override(usecase_conf[new_uc].vc_override);
+
+ current_uc = new_uc;
+
+exit:
+ /* Its ok to clear even if new_uc != UX500_UC_USER */
+ user_config_updated = false;
+}
+
+void usecase_update_governor_state(void)
+{
+ bool cancel_work = false;
+
+ mutex_lock(&usecase_mutex);
+
+ if (uc_master_enable && (usecase_conf[UX500_UC_AUTO].enable ||
+ usecase_conf[UX500_UC_USER].enable)) {
+ /*
+ * Usecases are enabled. If we are in early suspend put
+ * governor to work.
+ */
+ if (is_early_suspend && !is_work_scheduled) {
+ schedule_delayed_work_on(0, &work_usecase,
+ msecs_to_jiffies(CPULOAD_MEAS_DELAY));
+ is_work_scheduled = true;
+ } else if (!is_early_suspend && is_work_scheduled) {
+ /* Exiting from early suspend. */
+ cancel_work = true;
+ }
+
+ } else if (is_work_scheduled) {
+ /* No usecase enabled or governor is not enabled. */
+ cancel_work = true;
+ }
+
+ if (cancel_work) {
+ cancel_delayed_work_sync(&work_usecase);
+ is_work_scheduled = false;
+
+ /* Set the default settings before exiting. */
+ set_cpu_config(UX500_UC_NORMAL);
+ }
+
+ mutex_unlock(&usecase_mutex);
+
+}
+
+/*
+ * Start load measurment every 6 s in order detrmine if can unplug one CPU.
+ * In order to not corrupt measurment, the first load average is not done
+ * here call in early suspend.
+ */
+static void usecase_earlysuspend_callback(struct early_suspend *h)
+{
+ init_cpu_load_trend();
+
+ is_early_suspend = true;
+
+ usecase_update_governor_state();
+}
+
+/* Stop measurement, call LCD early resume */
+static void usecase_lateresume_callback(struct early_suspend *h)
+{
+ is_early_suspend = false;
+
+ usecase_update_governor_state();
+}
+
+static void delayed_usecase_work(struct work_struct *work)
+{
+ unsigned long avg, load, trend, balance;
+ bool inc_perf = false;
+ bool dec_perf = false;
+ u32 irqs_per_s;
+
+ /* determine loadavg */
+ avg = determine_loadavg();
+ hp_printk("loadavg = %lu lower th %lu upper th %lu\n",
+ avg, lower_threshold, upper_threshold);
+
+ /* determine instant load */
+ load = determine_cpu_load();
+ hp_printk("cpu instant load = %lu max %lu\n", load, max_instant);
+
+ /* determine load trend */
+ trend = determine_cpu_load_trend();
+ hp_printk("cpu load trend = %lu min %lu unbal %lu\n",
+ trend, min_trend, trend_unbalance);
+
+ /* determine load balancing */
+ balance = determine_cpu_balance_trend();
+ hp_printk("load balancing trend = %lu min %lu\n",
+ balance, max_unbalance);
+
+ irqs_per_s = get_num_interrupts_per_s();
+
+ /* Dont let configuration change in the middle of our calculations. */
+ mutex_lock(&usecase_mutex);
+
+ /* detect "instant" load increase */
+ if (load > max_instant || irqs_per_s > exit_irq_per_s) {
+ inc_perf = true;
+ } else if (!usecase_conf[UX500_UC_USER].enable &&
+ usecase_conf[UX500_UC_AUTO].enable) {
+ /* detect high loadavg use case */
+ if (avg > upper_threshold)
+ inc_perf = true;
+ /* detect idle use case */
+ else if (trend < min_trend)
+ dec_perf = true;
+ /* detect unbalanced low cpu load use case */
+ else if ((balance > max_unbalance) && (trend < trend_unbalance))
+ dec_perf = true;
+ /* detect low loadavg use case */
+ else if (avg < lower_threshold)
+ dec_perf = true;
+ /* All user use cases disabled, current load not triggering
+ * any change.
+ */
+ else if (user_config_updated)
+ dec_perf = true;
+ } else {
+ dec_perf = true;
+ }
+
+ /*
+ * set_cpu_config() will not update the config unless it has been
+ * changed.
+ */
+ if (dec_perf) {
+ if (usecase_conf[UX500_UC_USER].enable)
+ set_cpu_config(UX500_UC_USER);
+ else if (usecase_conf[UX500_UC_AUTO].enable)
+ set_cpu_config(UX500_UC_AUTO);
+ } else if (inc_perf) {
+ set_cpu_config(UX500_UC_NORMAL);
+ }
+
+ mutex_unlock(&usecase_mutex);
+
+ /* reprogramm scheduled work */
+ schedule_delayed_work_on(0, &work_usecase,
+ msecs_to_jiffies(CPULOAD_MEAS_DELAY));
+
+}
+
+static struct dentry *usecase_dir;
+
+#ifdef CONFIG_DEBUG_FS
+#define define_set(_name) \
+static ssize_t set_##_name(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ int err; \
+ long unsigned i; \
+ \
+ err = kstrtoul_from_user(user_buf, count, 0, &i); \
+ \
+ if (err) \
+ return err; \
+ \
+ _name = i; \
+ hp_printk("New value : %lu\n", _name); \
+ \
+ return count; \
+}
+
+define_set(upper_threshold);
+define_set(lower_threshold);
+define_set(max_unbalance);
+define_set(trend_unbalance);
+define_set(min_trend);
+define_set(max_instant);
+define_set(debug);
+
+#define define_print(_name) \
+static ssize_t print_##_name(struct seq_file *s, void *p) \
+{ \
+ return seq_printf(s, "%lu\n", _name); \
+}
+
+define_print(upper_threshold);
+define_print(lower_threshold);
+define_print(max_unbalance);
+define_print(trend_unbalance);
+define_print(min_trend);
+define_print(max_instant);
+define_print(debug);
+
+#define define_open(_name) \
+static ssize_t open_##_name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, print_##_name, inode->i_private); \
+}
+
+define_open(upper_threshold);
+define_open(lower_threshold);
+define_open(max_unbalance);
+define_open(trend_unbalance);
+define_open(min_trend);
+define_open(max_instant);
+define_open(debug);
+
+#define define_dbg_file(_name) \
+static const struct file_operations fops_##_name = { \
+ .open = open_##_name, \
+ .write = set_##_name, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .owner = THIS_MODULE, \
+}; \
+static struct dentry *file_##_name;
+
+define_dbg_file(upper_threshold);
+define_dbg_file(lower_threshold);
+define_dbg_file(max_unbalance);
+define_dbg_file(trend_unbalance);
+define_dbg_file(min_trend);
+define_dbg_file(max_instant);
+define_dbg_file(debug);
+
+struct dbg_file {
+ struct dentry **file;
+ const struct file_operations *fops;
+ const char *name;
+};
+
+#define define_dbg_entry(_name) \
+{ \
+ .file = &file_##_name, \
+ .fops = &fops_##_name, \
+ .name = #_name \
+}
+
+static struct dbg_file debug_entry[] = {
+ define_dbg_entry(upper_threshold),
+ define_dbg_entry(lower_threshold),
+ define_dbg_entry(max_unbalance),
+ define_dbg_entry(trend_unbalance),
+ define_dbg_entry(min_trend),
+ define_dbg_entry(max_instant),
+ define_dbg_entry(debug),
+};
+
+static int setup_debugfs(void)
+{
+ int i;
+ usecase_dir = debugfs_create_dir("usecase", NULL);
+
+ if (IS_ERR_OR_NULL(usecase_dir))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(debug_entry); i++) {
+ if (IS_ERR_OR_NULL(debugfs_create_file(debug_entry[i].name,
+ S_IWUGO | S_IRUGO,
+ usecase_dir,
+ NULL,
+ debug_entry[i].fops)))
+ goto fail;
+ }
+
+ if (IS_ERR_OR_NULL(debugfs_create_u32("exit_irq_per_s",
+ S_IWUGO | S_IRUGO, usecase_dir,
+ &exit_irq_per_s)))
+ goto fail;
+ return 0;
+fail:
+ debugfs_remove_recursive(usecase_dir);
+ return -EINVAL;
+}
+#else
+static int setup_debugfs(void)
+{
+ return 0;
+}
+#endif
+
+static void usecase_update_user_config(void)
+{
+ int i;
+ bool config_enable = false;
+ struct usecase_config *user_conf = &usecase_conf[UX500_UC_USER];
+
+ mutex_lock(&usecase_mutex);
+
+ user_conf->max_freq = 0;
+ user_conf->min_freq = 0;
+ user_conf->cpuidle_multiplier = 0;
+ user_conf->second_cpu_online = false;
+ user_conf->l2_prefetch_en = false;
+ user_conf->forced_state = cpuidle_deepest_state;
+ user_conf->vc_override = true; /* A single false will clear it. */
+
+ /* Dont include Auto and Normal modes in this */
+ for (i = (UX500_UC_AUTO + 1); i < UX500_UC_USER; i++) {
+ if (!usecase_conf[i].enable)
+ continue;
+
+ config_enable = true;
+
+ if (usecase_conf[i].max_freq > user_conf->max_freq)
+ user_conf->max_freq = usecase_conf[i].max_freq;
+ /* It's the highest min freq requirement that should be used */
+ if (usecase_conf[i].min_freq > user_conf->min_freq)
+ user_conf->min_freq = usecase_conf[i].min_freq;
+
+ if (usecase_conf[i].cpuidle_multiplier >
+ user_conf->cpuidle_multiplier)
+ user_conf->cpuidle_multiplier =
+ usecase_conf[i].cpuidle_multiplier;
+
+ user_conf->second_cpu_online |=
+ usecase_conf[i].second_cpu_online;
+
+ user_conf->l2_prefetch_en |=
+ usecase_conf[i].l2_prefetch_en;
+
+ /* Take the shallowest state. */
+ if (usecase_conf[i].forced_state < user_conf->forced_state)
+ user_conf->forced_state = usecase_conf[i].forced_state;
+
+ /* Only override QOS if all enabled configurations are
+ * requesting it.
+ */
+ if (!usecase_conf[i].vc_override)
+ user_conf->vc_override = false;
+ }
+
+ user_conf->enable = config_enable;
+ user_config_updated = true;
+
+ mutex_unlock(&usecase_mutex);
+}
+
+struct usecase_devclass_attr {
+ struct sysdev_class_attribute class_attr;
+ u32 index;
+};
+
+/* One for each usecase except "user" + current + enable */
+#define UX500_NUM_SYSFS_NODES (UX500_UC_USER + 2)
+#define UX500_CURRENT_NODE_INDEX (UX500_NUM_SYSFS_NODES - 1)
+#define UX500_ENABLE_NODE_INDEX (UX500_NUM_SYSFS_NODES - 2)
+
+static struct usecase_devclass_attr usecase_dc_attr[UX500_NUM_SYSFS_NODES];
+
+static struct attribute *dbs_attributes[UX500_NUM_SYSFS_NODES + 1] = {NULL};
+
+static struct attribute_group dbs_attr_group = {
+ .attrs = dbs_attributes,
+ .name = "usecase",
+};
+
+static ssize_t show_current(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ enum ux500_uc display_uc = (current_uc == UX500_UC_MAX) ?
+ UX500_UC_NORMAL : current_uc;
+
+ return sprintf(buf, "max_freq: %ld\n"
+ "min_freq: %ld\n"
+ "cpuidle_multiplier: %ld\n"
+ "second_cpu_online: %s\n"
+ "l2_prefetch_en: %s\n"
+ "forced_state: %d\n"
+ "vc_override: %s\n",
+ usecase_conf[display_uc].max_freq,
+ usecase_conf[display_uc].min_freq,
+ usecase_conf[display_uc].cpuidle_multiplier,
+ usecase_conf[display_uc].second_cpu_online ? "true" : "false",
+ usecase_conf[display_uc].l2_prefetch_en ? "true" : "false",
+ usecase_conf[display_uc].forced_state,
+ usecase_conf[display_uc].vc_override ? "true" : "false");
+}
+
+static ssize_t show_enable(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", uc_master_enable);
+}
+
+static ssize_t store_enable(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ uc_master_enable = (bool) input;
+
+ usecase_update_governor_state();
+
+ return count;
+}
+
+static ssize_t show_dc_attr(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ struct usecase_devclass_attr *uattr =
+ container_of(attr, struct usecase_devclass_attr, class_attr);
+
+ return sprintf(buf, "%u\n",
+ usecase_conf[uattr->index].enable);
+}
+
+static ssize_t store_dc_attr(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ struct usecase_devclass_attr *uattr =
+ container_of(attr, struct usecase_devclass_attr, class_attr);
+
+ ret = sscanf(buf, "%u", &input);
+
+ /* Normal mode cant be changed. */
+ if ((ret != 1) || (uattr->index == 0))
+ return -EINVAL;
+
+ usecase_conf[uattr->index].enable = (bool)input;
+
+ usecase_update_user_config();
+
+ usecase_update_governor_state();
+
+ return count;
+}
+
+static int usecase_sysfs_init(void)
+{
+ int err;
+ int i;
+
+ /* Last two nodes are not based on usecase configurations */
+ for (i = 0; i < (UX500_NUM_SYSFS_NODES - 2); i++) {
+ usecase_dc_attr[i].class_attr.attr.name = usecase_conf[i].name;
+ usecase_dc_attr[i].class_attr.attr.mode = 0644;
+ usecase_dc_attr[i].class_attr.show = show_dc_attr;
+ usecase_dc_attr[i].class_attr.store = store_dc_attr;
+ usecase_dc_attr[i].index = i;
+
+ dbs_attributes[i] = &(usecase_dc_attr[i].class_attr.attr);
+ }
+
+ /* sysfs current */
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.attr.name =
+ "current";
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.attr.mode =
+ 0644;
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.show =
+ show_current;
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.store =
+ NULL;
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].index =
+ 0;
+ dbs_attributes[UX500_CURRENT_NODE_INDEX] =
+ &(usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.attr);
+
+ /* sysfs enable */
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.attr.name =
+ "enable";
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.attr.mode =
+ 0644;
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.show =
+ show_enable;
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.store =
+ store_enable;
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].index =
+ 0;
+ dbs_attributes[UX500_ENABLE_NODE_INDEX] =
+ &(usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.attr);
+
+ err = sysfs_create_group(&(cpu_sysdev_class.kset.kobj),
+ &dbs_attr_group);
+ if (err)
+ pr_err("usecase-gov: sysfs_create_group"
+ " failed with error = %d\n", err);
+
+ return err;
+}
+
+static void usecase_cpuidle_init(void)
+{
+ int max_states;
+ int i;
+ struct cstate *state = ux500_ci_get_cstates(&max_states);
+
+ for (i = 0; i < max_states; i++)
+ if ((state[i].APE == APE_OFF) && (state[i].ARM == ARM_RET))
+ break;
+
+ usecase_conf[UX500_UC_LPA].forced_state = i;
+
+ cpuidle_deepest_state = max_states - 1;
+}
+
+/* initialize devices */
+static int __init init_usecase_devices(void)
+{
+ int err;
+
+ pr_info("Use-case governor initialized\n");
+
+ /* add early_suspend callback */
+ usecase_early_suspend.level = 200;
+ usecase_early_suspend.suspend = usecase_earlysuspend_callback;
+ usecase_early_suspend.resume = usecase_lateresume_callback;
+ register_early_suspend(&usecase_early_suspend);
+
+ /* register delayed queuework */
+ INIT_DELAYED_WORK_DEFERRABLE(&work_usecase,
+ delayed_usecase_work);
+
+ init_cpu_load_trend();
+
+ err = setup_debugfs();
+ if (err)
+ goto error;
+ err = usecase_sysfs_init();
+ if (err)
+ goto error2;
+
+ usecase_cpuidle_init();
+
+ return 0;
+error2:
+ debugfs_remove_recursive(usecase_dir);
+error:
+ unregister_early_suspend(&usecase_early_suspend);
+ return err;
+}
+
+device_initcall(init_usecase_devices);
diff --git a/arch/arm/mach-ux500/prcmu-debug.c b/arch/arm/mach-ux500/prcmu-debug.c
new file mode 100644
index 00000000000..61d19c0e1e0
--- /dev/null
+++ b/arch/arm/mach-ux500/prcmu-debug.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Martin Persson for ST-Ericsson
+ * Etienne Carriere <etienne.carriere@stericsson.com> for ST-Ericsson
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include <mach/hardware.h>
+
+#define MAX_STATES 5
+#define MAX_NAMELEN 16
+
+struct state_history {
+ ktime_t start;
+ u32 state;
+ u32 counter[MAX_STATES];
+ u8 opps[MAX_STATES];
+ int max_states;
+ int req;
+ bool reqs[MAX_STATES];
+ ktime_t time[MAX_STATES];
+ int state_names[MAX_STATES];
+ char prefix[MAX_NAMELEN];
+ spinlock_t lock;
+};
+
+static struct state_history ape_sh = {
+ .prefix = "APE",
+ .req = PRCMU_QOS_APE_OPP,
+ .opps = {APE_50_OPP, APE_100_OPP},
+ .state_names = {50, 100},
+ .max_states = 2,
+};
+
+static struct state_history ddr_sh = {
+ .prefix = "DDR",
+ .req = PRCMU_QOS_DDR_OPP,
+ .opps = {DDR_25_OPP, DDR_50_OPP, DDR_100_OPP},
+ .state_names = {25, 50, 100},
+ .max_states = 3,
+};
+
+static struct state_history arm_sh = {
+ .prefix = "ARM",
+ .req = PRCMU_QOS_ARM_OPP,
+ .opps = {ARM_EXTCLK, ARM_50_OPP, ARM_100_OPP, ARM_MAX_OPP},
+ .state_names = {25, 50, 100, 125},
+ .max_states = 4,
+};
+
+static int ape_voltage_count;
+
+static void log_set(struct state_history *sh, u8 opp)
+{
+ ktime_t now;
+ ktime_t dtime;
+ unsigned long flags;
+ int state;
+
+ now = ktime_get();
+ spin_lock_irqsave(&sh->lock, flags);
+
+ for (state = 0 ; sh->opps[state] != opp; state++)
+ ;
+ BUG_ON(state >= sh->max_states);
+
+ dtime = ktime_sub(now, sh->start);
+ sh->time[sh->state] = ktime_add(sh->time[sh->state], dtime);
+ sh->start = now;
+ sh->counter[sh->state]++;
+ sh->state = state;
+
+ spin_unlock_irqrestore(&sh->lock, flags);
+}
+
+void prcmu_debug_ape_opp_log(u8 opp)
+{
+ log_set(&ape_sh, opp);
+}
+
+void prcmu_debug_ddr_opp_log(u8 opp)
+{
+ log_set(&ddr_sh, opp);
+}
+
+void prcmu_debug_arm_opp_log(u8 opp)
+{
+ log_set(&arm_sh, opp);
+}
+
+static void log_reset(struct state_history *sh)
+{
+ unsigned long flags;
+ int i;
+
+ pr_info("reset\n");
+
+ spin_lock_irqsave(&sh->lock, flags);
+ for (i = 0; i < sh->max_states; i++) {
+ sh->counter[i] = 0;
+ sh->time[i] = ktime_set(0, 0);
+ }
+
+ sh->start = ktime_get();
+ spin_unlock_irqrestore(&sh->lock, flags);
+
+}
+
+static ssize_t ape_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ log_reset(&ape_sh);
+ return count;
+}
+
+static ssize_t ddr_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ log_reset(&ddr_sh);
+ return count;
+}
+
+static ssize_t arm_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ log_reset(&arm_sh);
+ return count;
+}
+
+static int log_print(struct seq_file *s, struct state_history *sh)
+{
+ int i;
+ unsigned long flags;
+ ktime_t total;
+ ktime_t dtime;
+ s64 t_ms;
+ s64 perc;
+ s64 total_ms;
+
+ spin_lock_irqsave(&sh->lock, flags);
+
+ dtime = ktime_sub(ktime_get(), sh->start);
+
+ total = dtime;
+
+ for (i = 0; i < sh->max_states; i++)
+ total = ktime_add(total, sh->time[i]);
+ total_ms = ktime_to_ms(total);
+
+ for (i = 0; i < sh->max_states; i++) {
+ ktime_t t = sh->time[i];
+ if (sh->state == i)
+ t = ktime_add(t, dtime);
+
+ t_ms = ktime_to_ms(t);
+ perc = 100 * t_ms;
+ do_div(perc, total_ms);
+
+ seq_printf(s, "%s OPP %d: # %u in %lld ms %d%%\n",
+ sh->prefix, sh->state_names[i],
+ sh->counter[i] + (int)(sh->state == i),
+ t_ms, (u32)perc);
+
+ }
+ spin_unlock_irqrestore(&sh->lock, flags);
+ return 0;
+}
+
+static int ape_stats_print(struct seq_file *s, void *p)
+{
+ log_print(s, &ape_sh);
+ return 0;
+}
+
+static int ddr_stats_print(struct seq_file *s, void *p)
+{
+ log_print(s, &ddr_sh);
+ return 0;
+}
+
+static int arm_stats_print(struct seq_file *s, void *p)
+{
+ log_print(s, &arm_sh);
+ return 0;
+}
+
+static int opp_read(struct seq_file *s, void *p)
+{
+ int opp;
+
+ struct state_history *sh = (struct state_history *)s->private;
+
+ switch (sh->req) {
+ case PRCMU_QOS_DDR_OPP:
+ opp = prcmu_get_ddr_opp();
+ seq_printf(s, "%s (%d)\n",
+ (opp == DDR_100_OPP) ? "100%" :
+ (opp == DDR_50_OPP) ? "50%" :
+ (opp == DDR_25_OPP) ? "25%" :
+ "unknown", opp);
+ break;
+ case PRCMU_QOS_APE_OPP:
+ opp = prcmu_get_ape_opp();
+ seq_printf(s, "%s (%d)\n",
+ (opp == APE_100_OPP) ? "100%" :
+ (opp == APE_50_OPP) ? "50%" :
+ "unknown", opp);
+ break;
+ case PRCMU_QOS_ARM_OPP:
+ opp = prcmu_get_arm_opp();
+ seq_printf(s, "%s (%d)\n",
+ (opp == ARM_MAX_OPP) ? "max" :
+ (opp == ARM_MAX_FREQ100OPP) ? "max-freq100" :
+ (opp == ARM_100_OPP) ? "100%" :
+ (opp == ARM_50_OPP) ? "50%" :
+ (opp == ARM_EXTCLK) ? "25% (extclk)" :
+ "unknown", opp);
+ break;
+ default:
+ break;
+ }
+ return 0;
+
+}
+
+static ssize_t opp_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ long unsigned i;
+ int err;
+ struct state_history *sh = (struct state_history *)
+ ((struct seq_file *)file->private_data)->private;
+
+ err = kstrtoul_from_user(user_buf, count, 0, &i);
+
+ if (err)
+ return err;
+
+ prcmu_qos_force_opp(sh->req, i);
+
+ pr_info("prcmu debug: forced OPP for %s to %d\n", sh->prefix, (int)i);
+
+ return count;
+}
+
+static int cpufreq_delay_read(struct seq_file *s, void *p)
+{
+ return seq_printf(s, "%lu\n", prcmu_qos_get_cpufreq_opp_delay());
+}
+
+static int ape_voltage_read(struct seq_file *s, void *p)
+{
+ return seq_printf(s, "This reference count only includes "
+ "requests via debugfs.\nCount: %d\n",
+ ape_voltage_count);
+}
+
+static ssize_t ape_voltage_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ long unsigned i;
+ int err;
+
+ err = kstrtoul_from_user(user_buf, count, 0, &i);
+
+ if (err)
+ return err;
+
+ switch (i) {
+ case 0:
+ if (ape_voltage_count == 0)
+ pr_info("prcmu debug: reference count is already 0\n");
+ else {
+ err = prcmu_request_ape_opp_100_voltage(false);
+ if (err)
+ pr_err("prcmu debug: drop request failed\n");
+ else
+ ape_voltage_count--;
+ }
+ break;
+ case 1:
+ err = prcmu_request_ape_opp_100_voltage(true);
+ if (err)
+ pr_err("prcmu debug: request failed\n");
+ else
+ ape_voltage_count++;
+ break;
+ default:
+ pr_info("prcmu debug: value not equal to 0 or 1\n");
+ }
+ return count;
+}
+
+static ssize_t cpufreq_delay_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int err;
+ long unsigned i;
+
+ err = kstrtoul_from_user(user_buf, count, 0, &i);
+
+ if (err)
+ return err;
+
+ prcmu_qos_set_cpufreq_opp_delay(i);
+
+ pr_info("prcmu debug: changed delay between cpufreq change and QoS "
+ "requirement to %lu.\n", i);
+
+ return count;
+}
+
+/* These are only for u8500 */
+#define PRCM_AVS_BASE 0x2FC
+#define AVS_VBB_RET 0x0
+#define AVS_VBB_MAX_OPP 0x1
+#define AVS_VBB_100_OPP 0x2
+#define AVS_VBB_50_OPP 0x3
+#define AVS_VARM_MAX_OPP 0x4
+#define AVS_VARM_100_OPP 0x5
+#define AVS_VARM_50_OPP 0x6
+#define AVS_VARM_RET 0x7
+#define AVS_VAPE_100_OPP 0x8
+#define AVS_VAPE_50_OPP 0x9
+#define AVS_VMOD_100_OPP 0xA
+#define AVS_VMOD_50_OPP 0xB
+#define AVS_VSAFE 0xC
+#define AVS_SIZE 14
+
+static int avs_read(struct seq_file *s, void *p)
+{
+
+ u8 avs[AVS_SIZE];
+ void __iomem *tcdm_base;
+
+ if (cpu_is_u8500()) {
+ tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
+
+ memcpy_fromio(avs, tcdm_base + PRCM_AVS_BASE, AVS_SIZE);
+
+ seq_printf(s, "VBB_RET : 0x%2x\n", avs[AVS_VBB_RET]);
+ seq_printf(s, "VBB_MAX_OPP : 0x%2x\n", avs[AVS_VBB_MAX_OPP]);
+ seq_printf(s, "VBB_100_OPP : 0x%2x\n", avs[AVS_VBB_100_OPP]);
+ seq_printf(s, "VBB_50_OPP : 0x%2x\n", avs[AVS_VBB_50_OPP]);
+ seq_printf(s, "VARM_MAX_OPP : 0x%2x\n", avs[AVS_VARM_MAX_OPP]);
+ seq_printf(s, "VARM_100_OPP : 0x%2x\n", avs[AVS_VARM_100_OPP]);
+ seq_printf(s, "VARM_50_OPP : 0x%2x\n", avs[AVS_VARM_50_OPP]);
+ seq_printf(s, "VARM_RET : 0x%2x\n", avs[AVS_VARM_RET]);
+ seq_printf(s, "VAPE_100_OPP : 0x%2x\n", avs[AVS_VAPE_100_OPP]);
+ seq_printf(s, "VAPE_50_OPP : 0x%2x\n", avs[AVS_VAPE_50_OPP]);
+ seq_printf(s, "VMOD_100_OPP : 0x%2x\n", avs[AVS_VMOD_100_OPP]);
+ seq_printf(s, "VMOD_50_OPP : 0x%2x\n", avs[AVS_VMOD_50_OPP]);
+ seq_printf(s, "VSAFE : 0x%2x\n", avs[AVS_VSAFE]);
+ } else {
+ seq_printf(s, "Only u8500 supported.\n");
+ }
+
+ return 0;
+}
+
+static int opp_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, opp_read, inode->i_private);
+}
+
+static int ape_stats_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, ape_stats_print, inode->i_private);
+}
+
+static int ddr_stats_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, ddr_stats_print, inode->i_private);
+}
+
+static int arm_stats_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, arm_stats_print, inode->i_private);
+}
+
+static int cpufreq_delay_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, cpufreq_delay_read, inode->i_private);
+}
+
+static int ape_voltage_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, ape_voltage_read, inode->i_private);
+}
+
+static int avs_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, avs_read, inode->i_private);
+}
+
+static const struct file_operations opp_fops = {
+ .open = opp_open_file,
+ .write = opp_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ape_stats_fops = {
+ .open = ape_stats_open_file,
+ .write = ape_stats_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ddr_stats_fops = {
+ .open = ddr_stats_open_file,
+ .write = ddr_stats_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations arm_stats_fops = {
+ .open = arm_stats_open_file,
+ .write = arm_stats_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations cpufreq_delay_fops = {
+ .open = cpufreq_delay_open_file,
+ .write = cpufreq_delay_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ape_voltage_fops = {
+ .open = ape_voltage_open_file,
+ .write = ape_voltage_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations avs_fops = {
+ .open = avs_open_file,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int setup_debugfs(void)
+{
+ struct dentry *dir;
+ struct dentry *file;
+
+ dir = debugfs_create_dir("prcmu", NULL);
+ if (IS_ERR_OR_NULL(dir))
+ goto fail;
+
+ file = debugfs_create_file("ape_stats", (S_IRUGO | S_IWUGO),
+ dir, NULL, &ape_stats_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("ddr_stats", (S_IRUGO | S_IWUGO),
+ dir, NULL, &ddr_stats_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("arm_stats", (S_IRUGO | S_IWUGO),
+ dir, NULL, &arm_stats_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("ape_opp", (S_IRUGO),
+ dir, (void *)&ape_sh,
+ &opp_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("ddr_opp", (S_IRUGO),
+ dir, (void *)&ddr_sh,
+ &opp_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("arm_opp", (S_IRUGO),
+ dir, (void *)&arm_sh,
+ &opp_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("opp_cpufreq_delay", (S_IRUGO),
+ dir, NULL, &cpufreq_delay_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("ape_voltage", (S_IRUGO),
+ dir, NULL, &ape_voltage_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("avs",
+ (S_IRUGO),
+ dir, NULL, &avs_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ return 0;
+fail:
+ if (!IS_ERR_OR_NULL(dir))
+ debugfs_remove_recursive(dir);
+
+ pr_err("prcmu debug: debugfs entry failed\n");
+ return -ENOMEM;
+}
+
+static __init int prcmu_debug_init(void)
+{
+ spin_lock_init(&ape_sh.lock);
+ spin_lock_init(&ddr_sh.lock);
+ spin_lock_init(&arm_sh.lock);
+ ape_sh.start = ktime_get();
+ ddr_sh.start = ktime_get();
+ arm_sh.start = ktime_get();
+ setup_debugfs();
+ return 0;
+}
+late_initcall(prcmu_debug_init);
diff --git a/arch/arm/mach-ux500/product.c b/arch/arm/mach-ux500/product.c
new file mode 100644
index 00000000000..5e8eba9b16a
--- /dev/null
+++ b/arch/arm/mach-ux500/product.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Jens Wiklander <jens.wiklander@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/tee.h>
+#include <linux/module.h>
+#include <mach/hardware.h>
+
+#define STATIC_TEE_TA_START_LOW 0xBC765EDE
+#define STATIC_TEE_TA_START_MID 0x6724
+#define STATIC_TEE_TA_START_HIGH 0x11DF
+#define STATIC_TEE_TA_START_CLOCKSEQ \
+ {0x8E, 0x12, 0xEC, 0xDB, 0xDF, 0xD7, 0x20, 0x85}
+
+#define U5500_PRCMU_DBG_PWRCTRL (U5500_PRCMU_BASE + 0x4AC)
+#define PRCMU_DBG_PWRCTRL_A9DBGCLKEN (1 << 4)
+
+static struct tee_product_config product_config;
+
+bool ux500_jtag_enabled(void)
+{
+#ifdef CONFIG_UX500_DEBUG_NO_LAUTERBACH
+ return false;
+#else
+ if (cpu_is_u5500())
+ return readl_relaxed(__io_address(U5500_PRCMU_DBG_PWRCTRL))
+ & PRCMU_DBG_PWRCTRL_A9DBGCLKEN;
+
+ if (cpu_is_u8500())
+ return (product_config.rt_flags & TEE_RT_FLAGS_JTAG_ENABLED) ==
+ TEE_RT_FLAGS_JTAG_ENABLED;
+
+ return true;
+#endif
+}
+
+static int __init product_detect(void)
+{
+ int err;
+ int origin_err;
+ struct tee_operation operation = { { { 0 } } };
+ struct tee_context context;
+ struct tee_session session;
+
+ /* Selects trustzone application needed for the job. */
+ struct tee_uuid static_uuid = {
+ STATIC_TEE_TA_START_LOW,
+ STATIC_TEE_TA_START_MID,
+ STATIC_TEE_TA_START_HIGH,
+ STATIC_TEE_TA_START_CLOCKSEQ,
+ };
+
+ if (cpu_is_u5500())
+ return -ENODEV;
+
+ err = teec_initialize_context(NULL, &context);
+ if (err) {
+ pr_err("ux500-product: unable to initialize tee context,"
+ " err = %d\n", err);
+ err = -EINVAL;
+ goto error0;
+ }
+
+ err = teec_open_session(&context, &session, &static_uuid,
+ TEEC_LOGIN_PUBLIC, NULL, NULL, &origin_err);
+ if (err) {
+ pr_err("ux500-product: unable to open tee session,"
+ " tee error = %d, origin error = %d\n",
+ err, origin_err);
+ err = -EINVAL;
+ goto error1;
+ }
+
+ operation.shm[0].buffer = &product_config;
+ operation.shm[0].size = sizeof(product_config);
+ operation.shm[0].flags = TEEC_MEM_OUTPUT;
+ operation.flags = TEEC_MEMREF_0_USED;
+
+ err = teec_invoke_command(&session,
+ TEE_STA_GET_PRODUCT_CONFIG,
+ &operation, &origin_err);
+ if (err) {
+ pr_err("ux500-product: fetching product settings failed, err=%d",
+ err);
+ err = -EINVAL;
+ goto error1;
+ }
+
+ pr_info("ux500-product: JTAG is %s\n",
+ ux500_jtag_enabled()? "enabled" : "disabled");
+error1:
+ (void) teec_finalize_context(&context);
+error0:
+ return err;
+}
+device_initcall(product_detect);
+
diff --git a/arch/arm/mach-ux500/product.h b/arch/arm/mach-ux500/product.h
new file mode 100644
index 00000000000..502eff4df14
--- /dev/null
+++ b/arch/arm/mach-ux500/product.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Jens Wiklander <jens.wiklander@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#ifndef UX500_PRODUCT_H
+#define UX500_PRODUCT_H
+
+#ifdef CONFIG_TEE_UX500
+
+bool ux500_jtag_enabled(void);
+
+#else
+
+static inline bool ux500_jtag_enabled(void)
+{
+ return true;
+}
+
+#endif
+#endif
diff --git a/arch/arm/mach-ux500/reboot_reasons.c b/arch/arm/mach-ux500/reboot_reasons.c
new file mode 100644
index 00000000000..365b0c02e16
--- /dev/null
+++ b/arch/arm/mach-ux500/reboot_reasons.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Rickard Evertsson <rickard.evertsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Use this file to customize your reboot / sw reset reasons. Add, remove or
+ * modify reasons in reboot_reasons[].
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <mach/reboot_reasons.h>
+
+struct reboot_reason reboot_reasons[] = {
+ {"crash", SW_RESET_CRASH},
+ {"factory-reset", SW_RESET_FACTORY_RESET},
+ {"recovery", SW_RESET_RECOVERY},
+ {"charging", SW_RESET_CHARGING},
+ {"coldstart", SW_RESET_COLDSTART},
+ {"none", SW_RESET_NO_ARGUMENT}, /* Normal Boot */
+};
+
+unsigned int reboot_reasons_size = ARRAY_SIZE(reboot_reasons);
+
+/*
+ * The reboot reason string can be 255 characters long and the memory
+ * in which we save the sw reset reason is 2 bytes. Therefore we need to
+ * convert the string into a 16 bit pattern.
+ *
+ * See file reboot_reasons.h for conversion.
+ */
+u16 reboot_reason_code(const char *cmd)
+{
+ int i;
+
+ if (cmd == NULL) {
+ if (oops_in_progress) {
+ /* if we're in an oops assume it's a crash */
+ return SW_RESET_CRASH;
+ } else {
+ /* normal reboot w/o argument */
+ return SW_RESET_NO_ARGUMENT;
+ }
+ }
+
+ /* Search through reboot reason list */
+ for (i = 0; i < reboot_reasons_size; i++) {
+ if (!strcmp(reboot_reasons[i].reason, cmd))
+ return reboot_reasons[i].code;
+ }
+
+ /* No valid reboot reason found */
+ return SW_RESET_NO_ARGUMENT;
+}
+
+/*
+ * The saved sw reset reason is a 2 byte code that is translated into
+ * a reboot reason string which is up to 255 characters long by this
+ * function.
+ *
+ * See file reboot_reasons.h for conversion.
+ */
+const char *reboot_reason_string(u16 code)
+{
+ int i;
+
+ /* Search through reboot reason list */
+ for (i = 0; i < reboot_reasons_size; i++) {
+ if (reboot_reasons[i].code == code)
+ return reboot_reasons[i].reason;
+ }
+
+ /* No valid reboot reason code found */
+ return "unknown";
+}
diff --git a/arch/arm/mach-ux500/regulator-u5500.h b/arch/arm/mach-ux500/regulator-u5500.h
new file mode 100644
index 00000000000..cf3eeed9366
--- /dev/null
+++ b/arch/arm/mach-ux500/regulator-u5500.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#ifndef __REGULATOR_U5500_H
+#define __REGULATOR_U5500_H
+
+enum u5500_regulator_id {
+ U5500_REGULATOR_VAPE,
+ U5500_REGULATOR_SWITCH_SGA,
+ U5500_REGULATOR_SWITCH_HVA,
+ U5500_REGULATOR_SWITCH_SIA,
+ U5500_REGULATOR_SWITCH_DISP,
+ U5500_REGULATOR_SWITCH_ESRAM12,
+ U5500_NUM_REGULATORS
+};
+
+#endif
diff --git a/arch/arm/mach-ux500/sensors1p.c b/arch/arm/mach-ux500/sensors1p.c
new file mode 100644
index 00000000000..e7f4642b1d9
--- /dev/null
+++ b/arch/arm/mach-ux500/sensors1p.c
@@ -0,0 +1,298 @@
+
+/*
+ * Copyright (C) 2009-2010 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * Simple userspace interface for
+ * Proximity Sensor Osram SFH 7741 and HAL switch Samsung HED54XXU11
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ * This driver is only there for making Android happy. It is not ment
+ * for mainline.
+ */
+
+
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+
+#include <mach/sensors1p.h>
+
+struct sensor {
+ struct regulator *regulator;
+ int pin;
+ int startup_time;
+ int active;
+ u64 when_enabled;
+};
+
+struct sensors1p {
+ struct sensor hal;
+ struct sensor proximity;
+};
+
+static int sensors1p_power_write(struct device *dev,
+ struct sensor *s, const char *buf)
+{
+ int val;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ if (val != s->active) {
+ if (val) {
+ regulator_enable(s->regulator);
+ s->when_enabled = get_jiffies_64() +
+ msecs_to_jiffies(s->startup_time);
+ } else
+ regulator_disable(s->regulator);
+ }
+ s->active = val;
+
+ return strnlen(buf, PAGE_SIZE);
+
+}
+
+static ssize_t sensors1p_sysfs_hal_active_set(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+
+
+ struct sensors1p *s = platform_get_drvdata(container_of(dev,
+ struct platform_device,
+ dev));
+ return sensors1p_power_write(dev, &s->hal, buf);
+
+}
+
+static ssize_t sensors1p_sysfs_proximity_active_set(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+
+
+ struct sensors1p *s = platform_get_drvdata(container_of(dev,
+ struct platform_device,
+ dev));
+ return sensors1p_power_write(dev, &s->proximity, buf);
+
+}
+
+static ssize_t sensors1p_sysfs_hal_active_get(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensors1p *s = platform_get_drvdata(container_of(dev,
+ struct platform_device,
+ dev));
+ return sprintf(buf, "%d", s->hal.active);
+}
+
+static ssize_t sensors1p_sysfs_proximity_active_get(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensors1p *s = platform_get_drvdata(container_of(dev,
+ struct platform_device,
+ dev));
+ return sprintf(buf, "%d", s->proximity.active);
+}
+
+static int sensors1p_read(struct device *dev, struct sensor *s, char *buf)
+{
+ int ret;
+
+ if (!s->active)
+ return -EINVAL;
+
+ /* Only wait if read() is called before the sensor is up and running
+ * Since jiffies wraps, always sleep maximum time.
+ */
+ if (time_before64(get_jiffies_64(), s->when_enabled))
+ mdelay(s->startup_time);
+
+ /* For some odd reason, setting direction in the probe function fails */
+ ret = gpio_direction_input(s->pin);
+
+ if (ret)
+ dev_err(dev, "Failed to set GPIO pin %d to input.\n", s->pin);
+ else
+ ret = gpio_get_value(s->pin);
+
+ return sprintf(buf, "%d", ret);
+}
+
+static ssize_t sensors1p_sysfs_hal_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensors1p *s = platform_get_drvdata(container_of(dev,
+ struct platform_device,
+ dev));
+ return sensors1p_read(dev, &s->hal, buf);
+}
+
+static ssize_t sensors1p_sysfs_proximity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensors1p *s = platform_get_drvdata(container_of(dev,
+ struct platform_device,
+ dev));
+ return sensors1p_read(dev, &s->proximity, buf);
+}
+
+static DEVICE_ATTR(proximity_activate, 0666,
+ sensors1p_sysfs_proximity_active_get,
+ sensors1p_sysfs_proximity_active_set);
+static DEVICE_ATTR(hal_activate, 0666,
+ sensors1p_sysfs_hal_active_get,
+ sensors1p_sysfs_hal_active_set);
+static DEVICE_ATTR(proximity, 0444, sensors1p_sysfs_proximity_show, NULL);
+static DEVICE_ATTR(hal, 0444, sensors1p_sysfs_hal_show, NULL);
+
+static struct attribute *sensors1p_attrs[] = {
+ &dev_attr_proximity_activate.attr,
+ &dev_attr_hal_activate.attr,
+ &dev_attr_proximity.attr,
+ &dev_attr_hal.attr,
+ NULL,
+};
+
+static struct attribute_group sensors1p_attr_group = {
+ .name = NULL,
+ .attrs = sensors1p_attrs,
+};
+
+static int __init sensors1p_probe(struct platform_device *pdev)
+{
+ int err = -EINVAL;
+ struct sensors1p_config *c;
+ struct sensors1p *s = NULL;
+
+ if (!pdev)
+ goto out;
+
+ c = pdev->dev.platform_data;
+
+ if (c == NULL) {
+ dev_err(&pdev->dev, "Error: Missconfigured.\n");
+ goto out;
+ }
+
+ s = kzalloc(sizeof(struct sensors1p), GFP_KERNEL);
+
+ if (s == NULL) {
+ dev_err(&pdev->dev,
+ "Could not allocate struct memory!\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ s->hal.pin = c->hal.pin;
+ err = gpio_request(c->hal.pin, "hal sensor");
+ if (err < 0) {
+ dev_err(&pdev->dev, "gpio_request failed with err: %d", err);
+ goto err_hal_gpio;
+ }
+
+ s->proximity.pin = c->proximity.pin;
+ err = gpio_request(c->proximity.pin, "proximity sensor");
+ if (err < 0) {
+ dev_err(&pdev->dev, "gpio_request failed with err: %d", err);
+ goto err_proximity_gpio;
+ }
+
+ s->hal.startup_time = c->hal.startup_time;
+ s->proximity.startup_time = c->proximity.startup_time;
+
+
+ s->hal.regulator = regulator_get(&pdev->dev, c->hal.regulator);
+
+ if (IS_ERR(s->hal.regulator)) {
+ dev_err(&pdev->dev, "regulator_get(\"%s\") failed.\n",
+ c->hal.regulator);
+ err = PTR_ERR(s->hal.regulator);
+ goto err_hal_reg;
+ }
+ s->proximity.regulator = regulator_get(&pdev->dev,
+ c->proximity.regulator);
+
+ if (IS_ERR(s->proximity.regulator)) {
+ dev_err(&pdev->dev, "regulator_get(\"%s\") failed.\n",
+ c->proximity.regulator);
+ err = PTR_ERR(s->proximity.regulator);
+ goto err_proximity_reg;
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &sensors1p_attr_group);
+
+ if (err) {
+ dev_err(&pdev->dev, "Failed to create sysfs entries.\n");
+ goto err_sysfs;
+ }
+
+ platform_set_drvdata(pdev, s);
+
+ return 0;
+
+err_sysfs:
+ regulator_put(s->proximity.regulator);
+err_proximity_reg:
+ regulator_put(s->hal.regulator);
+err_hal_reg:
+ gpio_free(s->proximity.pin);
+err_proximity_gpio:
+ gpio_free(s->hal.pin);
+err_hal_gpio:
+ kfree(s);
+out:
+ return err;
+}
+
+static int __exit sensors1p_remove(struct platform_device *pdev)
+{
+ struct sensors1p *s = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&pdev->dev.kobj, &sensors1p_attr_group);
+ gpio_free(s->hal.pin);
+ gpio_free(s->proximity.pin);
+ regulator_put(s->hal.regulator);
+ regulator_put(s->proximity.regulator);
+ kfree(s);
+ return 0;
+}
+
+static struct platform_driver sensors1p_driver = {
+ .remove = __exit_p(sensors1p_remove),
+ .driver = {
+ .name = "sensors1p",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sensors1p_init(void)
+{
+ return platform_driver_probe(&sensors1p_driver, sensors1p_probe);
+}
+
+static void __exit sensors1p_exit(void)
+{
+ platform_driver_unregister(&sensors1p_driver);
+}
+
+late_initcall(sensors1p_init);
+module_exit(sensors1p_exit);
+
+MODULE_AUTHOR("Jonas Aaberg <jonas.aberg@stericsson.com>");
+MODULE_DESCRIPTION("One pin gpio sensors driver (Proximity+HAL)");
+MODULE_LICENSE("GPLv2");
diff --git a/arch/arm/mach-ux500/tee_service_svp.c b/arch/arm/mach-ux500/tee_service_svp.c
new file mode 100644
index 00000000000..aa65dd961a0
--- /dev/null
+++ b/arch/arm/mach-ux500/tee_service_svp.c
@@ -0,0 +1,66 @@
+/*
+ * TEE service to handle the calls to trusted applications in SVP.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/tee.h>
+#include <linux/err.h>
+#include "mach/tee_ta_start_modem.h"
+
+static int cmp_uuid_start_modem(struct tee_uuid *uuid)
+{
+ int ret = -EINVAL;
+
+ if (uuid == NULL)
+ return -EINVAL;
+
+ /* This handles the calls to TA for start the modem */
+ if ((uuid->timeLow == UUID_TEE_TA_START_MODEM_LOW) &&
+ (uuid->timeMid == UUID_TEE_TA_START_MODEM_MID) &&
+ (uuid->timeHiAndVersion == UUID_TEE_TA_START_MODEM_HIGH)) {
+
+ u8 clockSeqAndNode[TEE_UUID_CLOCK_SIZE] =
+ UUID_TEE_TA_START_MODEM_CLOCKSEQ;
+
+ ret = memcmp(uuid->clockSeqAndNode, clockSeqAndNode,
+ TEE_UUID_CLOCK_SIZE);
+ }
+
+ return ret;
+}
+
+int call_sec_world(struct tee_session *ts, int sec_cmd)
+{
+ int ret = 0;
+
+ if (ts == NULL)
+ return -EINVAL;
+
+ if (cmp_uuid_start_modem(ts->uuid))
+ return -EINVAL;
+
+ switch (ts->cmd) {
+ case COMMAND_ID_START_MODEM:
+ ret = tee_ta_start_modem((struct tee_ta_start_modem *)
+ ts->op);
+ if (ret) {
+ ts->err = TEED_ERROR_GENERIC;
+ ts->origin = TEED_ORIGIN_TEE_APPLICATION;
+ pr_err("tee_ta_start_modem() failed!\n");
+ return ret;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* TODO: to handle more trusted applications. */
+
+ return ret;
+}
diff --git a/arch/arm/mach-ux500/tee_ta_start_modem_svp.c b/arch/arm/mach-ux500/tee_ta_start_modem_svp.c
new file mode 100644
index 00000000000..12337b93154
--- /dev/null
+++ b/arch/arm/mach-ux500/tee_ta_start_modem_svp.c
@@ -0,0 +1,56 @@
+/*
+ * Trusted application for starting the modem.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <mach/hardware.h>
+
+#include "mach/tee_ta_start_modem.h"
+
+static int reset_modem(unsigned long modem_start_addr)
+{
+ void __iomem *base = ioremap(U5500_ACCCON_BASE_SEC, 0x2FF);
+ if (!base)
+ return -ENOMEM;
+
+ pr_info("[%s] Setting modem start address!\n", __func__);
+ writel(base + (U5500_ACCCON_CPUVEC_RESET_ADDR_OFFSET/sizeof(uint32_t)),
+ modem_start_addr);
+
+ pr_info("[%s] resetting the modem!\n", __func__);
+ writel(base + (U5500_ACCCON_ACC_CPU_CTRL_OFFSET/sizeof(uint32_t)), 1);
+
+ iounmap(base);
+
+ return 0;
+}
+
+int tee_ta_start_modem(struct tee_ta_start_modem *data)
+{
+ int ret = 0;
+ struct elfhdr *elfhdr;
+ void __iomem *vaddr;
+
+ vaddr = ioremap((unsigned long)data->access_image_descr.elf_hdr,
+ sizeof(struct elfhdr));
+ if (!vaddr)
+ return -ENOMEM;
+
+ elfhdr = (struct elfhdr *)readl(vaddr);
+ pr_info("Reading in kernel:elfhdr 0x%x:elfhdr->entry=0x%x\n",
+ (uint32_t)elfhdr, (uint32_t)elfhdr->e_entry);
+
+ pr_info("[%s] reset modem()...\n", __func__);
+ ret = reset_modem(elfhdr->e_entry);
+
+ iounmap(vaddr);
+
+ return ret;
+}
diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c
new file mode 100644
index 00000000000..160ca529261
--- /dev/null
+++ b/arch/arm/mach-ux500/tee_ux500.c
@@ -0,0 +1,95 @@
+/*
+ * TEE service to handle the calls to trusted applications.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/kernel.h>
+#include <linux/tee.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+
+#include <mach/hardware.h>
+
+#define ISSWAPI_EXECUTE_TA 0x11000001
+#define ISSWAPI_CLOSE_TA 0x11000002
+
+#define SEC_ROM_NO_FLAG_MASK 0x0000
+
+static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...)
+{
+ typedef u32 (*bridge_func)(u32, u32, va_list);
+ bridge_func hw_sec_rom_pub_bridge;
+ va_list ap;
+ u32 ret;
+
+ if (cpu_is_u8500v20_or_later())
+ hw_sec_rom_pub_bridge = (bridge_func)
+ ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x17300));
+ else if (cpu_is_u5500())
+ hw_sec_rom_pub_bridge = (bridge_func)
+ ((u32)IO_ADDRESS(U5500_BOOT_ROM_BASE + 0x18300));
+ else
+ ux500_unknown_soc();
+
+ va_start(ap, cfg);
+ ret = hw_sec_rom_pub_bridge(service_id, cfg, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+int call_sec_world(struct tee_session *ts, int sec_cmd)
+{
+ /*
+ * ts->ta and ts->uuid is set to NULL when opening the device,
+ * hence it should be safe to just do the call here.
+ */
+
+ switch (sec_cmd) {
+ case TEED_INVOKE:
+ if (!ts->uuid) {
+ call_sec_rom_bridge(ISSWAPI_EXECUTE_TA,
+ SEC_ROM_NO_FLAG_MASK,
+ virt_to_phys(&ts->id),
+ NULL,
+ virt_to_phys(ts->ta),
+ ts->cmd,
+ virt_to_phys((void *)(ts->op)),
+ virt_to_phys((void *)(&ts->err)),
+ virt_to_phys((void *)(&ts->origin)));
+ } else {
+ call_sec_rom_bridge(ISSWAPI_EXECUTE_TA,
+ SEC_ROM_NO_FLAG_MASK,
+ virt_to_phys(&ts->id),
+ virt_to_phys(ts->uuid),
+ virt_to_phys(ts->ta),
+ ts->cmd,
+ virt_to_phys((void *)(ts->op)),
+ virt_to_phys((void *)(&ts->err)),
+ virt_to_phys((void *)(&ts->origin)));
+ }
+ break;
+
+ case TEED_CLOSE_SESSION:
+ call_sec_rom_bridge(ISSWAPI_CLOSE_TA,
+ SEC_ROM_NO_FLAG_MASK,
+ ts->id,
+ NULL,
+ virt_to_phys(ts->ta),
+ virt_to_phys((void *)(&ts->err)));
+
+ /* Since the TEE Client API does NOT take care of
+ * the return value, we print a warning here if
+ * something went wrong in secure world.
+ */
+ if (ts->err != TEED_SUCCESS)
+ pr_warning("[%s] failed in secure world\n",
+ __func__);
+
+ break;
+ }
+
+ return 0;
+}
diff --git a/arch/arm/mach-ux500/timer.c b/arch/arm/mach-ux500/timer.c
index aea467d04ff..052fa837981 100644
--- a/arch/arm/mach-ux500/timer.c
+++ b/arch/arm/mach-ux500/timer.c
@@ -14,6 +14,21 @@
#include <mach/setup.h>
#include <mach/hardware.h>
+#include <mach/context.h>
+
+#ifdef CONFIG_UX500_CONTEXT
+static int mtu_context_notifier_call(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ if (event == CONTEXT_APE_RESTORE)
+ nmdk_clksrc_reset();
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mtu_context_notifier = {
+ .notifier_call = mtu_context_notifier_call,
+};
+#endif
static void __init ux500_timer_init(void)
{
@@ -54,6 +69,10 @@ static void __init ux500_timer_init(void)
nmdk_timer_init();
clksrc_dbx500_prcmu_init(prcmu_timer_base);
+
+#ifdef CONFIG_UX500_CONTEXT
+ WARN_ON(context_ape_notifier_register(&mtu_context_notifier));
+#endif
}
static void ux500_timer_reset(void)
diff --git a/arch/arm/mach-ux500/uart-db8500.c b/arch/arm/mach-ux500/uart-db8500.c
new file mode 100644
index 00000000000..fad9b9a13df
--- /dev/null
+++ b/arch/arm/mach-ux500/uart-db8500.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/amba/serial.h>
+#include <mach/setup.h>
+#include <mach/hardware.h>
+#include <mach/context.h>
+
+#ifdef CONFIG_UX500_CONTEXT
+
+static struct {
+ struct clk *uart_clk;
+ void __iomem *base;
+ /* dr */
+ /* rsr_err */
+ u32 dma_wm;
+ u32 timeout;
+ /* fr */
+ u32 lcrh_rx;
+ u32 ilpr;
+ u32 ibrd;
+ u32 fbrd;
+ u32 lcrh_tx;
+ u32 cr;
+ u32 ifls;
+ u32 imsc;
+ /* ris */
+ /* mis */
+ /* icr */
+ u32 dmacr;
+ u32 xfcr;
+ u32 xon1;
+ u32 xon2;
+ u32 xoff1;
+ u32 xoff2;
+ /* itcr */
+ /* itip */
+ /* itop */
+ /* tdr */
+ u32 abcr;
+ /* absr */
+ /* abfmt */
+ /* abdr */
+ /* abdfr */
+ /* abmr */
+ u32 abimsc;
+ /* abris */
+ /* abmis */
+ /* abicr */
+ /* id_product_h_xy */
+ /* id_provider */
+ /* periphid0 */
+ /* periphid1 */
+ /* periphid2 */
+ /* periphid3 */
+ /* pcellid0 */
+ /* pcellid1 */
+ /* pcellid2 */
+ /* pcellid3 */
+} context_uart;
+
+static void save_uart(void)
+{
+ void __iomem *membase;
+
+ membase = context_uart.base;
+
+ clk_enable(context_uart.uart_clk);
+
+ context_uart.dma_wm = readl_relaxed(membase + ST_UART011_DMAWM);
+ context_uart.timeout = readl_relaxed(membase + ST_UART011_TIMEOUT);
+ context_uart.lcrh_rx = readl_relaxed(membase + ST_UART011_LCRH_RX);
+ context_uart.ilpr = readl_relaxed(membase + UART01x_ILPR);
+ context_uart.ibrd = readl_relaxed(membase + UART011_IBRD);
+ context_uart.fbrd = readl_relaxed(membase + UART011_FBRD);
+ context_uart.lcrh_tx = readl_relaxed(membase + ST_UART011_LCRH_TX);
+ context_uart.cr = readl_relaxed(membase + UART011_CR);
+ context_uart.ifls = readl_relaxed(membase + UART011_IFLS);
+ context_uart.imsc = readl_relaxed(membase + UART011_IMSC);
+ context_uart.dmacr = readl_relaxed(membase + UART011_DMACR);
+ context_uart.xfcr = readl_relaxed(membase + ST_UART011_XFCR);
+ context_uart.xon1 = readl_relaxed(membase + ST_UART011_XON1);
+ context_uart.xon2 = readl_relaxed(membase + ST_UART011_XON2);
+ context_uart.xoff1 = readl_relaxed(membase + ST_UART011_XOFF1);
+ context_uart.xoff2 = readl_relaxed(membase + ST_UART011_XOFF2);
+ context_uart.abcr = readl_relaxed(membase + ST_UART011_ABCR);
+ context_uart.abimsc = readl_relaxed(membase + ST_UART011_ABIMSC);
+
+ clk_disable(context_uart.uart_clk);
+}
+
+static void restore_uart(void)
+{
+ int cnt;
+ int retries = 100;
+ unsigned int cr;
+ void __iomem *membase;
+ u16 dummy;
+ bool show_warn = false;
+
+ membase = context_uart.base;
+ clk_enable(context_uart.uart_clk);
+
+ writew_relaxed(context_uart.ifls, membase + UART011_IFLS);
+ cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
+
+ writew_relaxed(cr, membase + UART011_CR);
+ writew_relaxed(0, membase + UART011_FBRD);
+ writew_relaxed(1, membase + UART011_IBRD);
+ writew_relaxed(0, membase + ST_UART011_LCRH_RX);
+ if (context_uart.lcrh_tx != ST_UART011_LCRH_RX) {
+ int i;
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX register,
+ * to get this delay write read only register 10 times
+ */
+ for (i = 0; i < 10; ++i)
+ dummy = readw(membase + ST_UART011_LCRH_RX);
+ writew_relaxed(0, membase + ST_UART011_LCRH_TX);
+ }
+ writew(0, membase + UART01x_DR);
+ do {
+ if (!(readw(membase + UART01x_FR) & UART01x_FR_BUSY))
+ break;
+ cpu_relax();
+ } while (retries-- > 0);
+ if (retries < 0)
+ /*
+ * We can't print out a warning here since the uart is
+ * not fully restored. Do it later.
+ */
+ show_warn = true;
+
+ writel_relaxed(context_uart.dma_wm, membase + ST_UART011_DMAWM);
+ writel_relaxed(context_uart.timeout, membase + ST_UART011_TIMEOUT);
+ writel_relaxed(context_uart.lcrh_rx, membase + ST_UART011_LCRH_RX);
+ writel_relaxed(context_uart.ilpr, membase + UART01x_ILPR);
+ writel_relaxed(context_uart.ibrd, membase + UART011_IBRD);
+ writel_relaxed(context_uart.fbrd, membase + UART011_FBRD);
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX register,
+ * to get this delay write read only register 10-3
+ * times, as already there are 3 writes after
+ * ST_UART011_LCRH_RX
+ */
+ for (cnt = 0; cnt < 7; cnt++)
+ dummy = readw(membase + ST_UART011_LCRH_RX);
+
+ writel_relaxed(context_uart.lcrh_tx, membase + ST_UART011_LCRH_TX);
+ writel_relaxed(context_uart.ifls, membase + UART011_IFLS);
+ writel_relaxed(context_uart.dmacr, membase + UART011_DMACR);
+ writel_relaxed(context_uart.xfcr, membase + ST_UART011_XFCR);
+ writel_relaxed(context_uart.xon1, membase + ST_UART011_XON1);
+ writel_relaxed(context_uart.xon2, membase + ST_UART011_XON2);
+ writel_relaxed(context_uart.xoff1, membase + ST_UART011_XOFF1);
+ writel_relaxed(context_uart.xoff2, membase + ST_UART011_XOFF2);
+ writel_relaxed(context_uart.abcr, membase + ST_UART011_ABCR);
+ writel_relaxed(context_uart.abimsc, membase + ST_UART011_ABIMSC);
+ writel_relaxed(context_uart.cr, membase + UART011_CR);
+ writel(context_uart.imsc, membase + UART011_IMSC);
+
+ clk_disable(context_uart.uart_clk);
+
+ if (show_warn)
+ pr_warning("%s:uart tx busy\n", __func__);
+}
+
+static int uart_context_notifier_call(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ switch (event) {
+ case CONTEXT_APE_SAVE:
+ save_uart();
+ break;
+
+ case CONTEXT_APE_RESTORE:
+ restore_uart();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block uart_context_notifier = {
+ .notifier_call = uart_context_notifier_call,
+};
+
+#define __UART_BASE(soc, x) soc##_UART##x##_BASE
+#define UART_BASE(soc, x) __UART_BASE(soc, x)
+
+static int __init uart_context_notifier_init(void)
+{
+ unsigned long base;
+ static const char clkname[] __initconst
+ = "uart" __stringify(CONFIG_UX500_DEBUG_UART);
+
+ if (cpu_is_u8500())
+ base = UART_BASE(U8500, CONFIG_UX500_DEBUG_UART);
+ else if (cpu_is_u5500())
+ base = UART_BASE(U5500, CONFIG_UX500_DEBUG_UART);
+ else
+ ux500_unknown_soc();
+
+ context_uart.base = ioremap(base, SZ_4K);
+ context_uart.uart_clk = clk_get_sys(clkname, NULL);
+
+ if (IS_ERR(context_uart.uart_clk)) {
+ pr_err("%s:unable to get clk-uart%d\n", __func__,
+ CONFIG_UX500_DEBUG_UART);
+ return -EINVAL;
+ }
+
+ return WARN_ON(context_ape_notifier_register(&uart_context_notifier));
+}
+arch_initcall(uart_context_notifier_init);
+#endif
diff --git a/arch/arm/mach-ux500/usb.c b/arch/arm/mach-ux500/usb.c
index 0a01cbdfe06..1e7c0cc912e 100644
--- a/arch/arm/mach-ux500/usb.c
+++ b/arch/arm/mach-ux500/usb.c
@@ -10,6 +10,9 @@
#include <plat/ste_dma40.h>
#include <mach/hardware.h>
#include <mach/usb.h>
+#include <plat/pincfg.h>
+#include "pins.h"
+#include "board-ux500-usb.h"
#define MUSB_DMA40_RX_CH { \
.mode = STEDMA40_MODE_LOGICAL, \
@@ -31,6 +34,8 @@
.dst_info.psize = STEDMA40_PSIZE_LOG_16, \
}
+#define USB_OTG_GPIO_CS 76
+
static struct stedma40_chan_cfg musb_dma_rx_ch[UX500_MUSB_DMA_NUM_RX_CHANNELS]
= {
MUSB_DMA40_RX_CH,
@@ -85,7 +90,12 @@ static struct ux500_musb_board_data musb_board_data = {
.dma_filter = stedma40_filter,
};
+#ifdef CONFIG_USB_UX500_DMA
static u64 ux500_musb_dmamask = DMA_BIT_MASK(32);
+#else
+static u64 ux500_musb_dmamask = DMA_BIT_MASK(0);
+#endif
+static struct ux500_pins *usb_gpio_pins;
static struct musb_hdrc_config musb_hdrc_config = {
.multipoint = true,
@@ -95,13 +105,7 @@ static struct musb_hdrc_config musb_hdrc_config = {
};
static struct musb_hdrc_platform_data musb_platform_data = {
-#if defined(CONFIG_USB_MUSB_OTG)
.mode = MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_PERIPHERAL)
- .mode = MUSB_PERIPHERAL,
-#else /* defined(CONFIG_USB_MUSB_HOST) */
- .mode = MUSB_HOST,
-#endif
.config = &musb_hdrc_config,
.board_data = &musb_board_data,
};
@@ -130,6 +134,38 @@ struct platform_device ux500_musb_device = {
.resource = usb_resources,
};
+static void enable_gpio(void)
+{
+ ux500_pins_enable(usb_gpio_pins);
+}
+static void disable_gpio(void)
+{
+ ux500_pins_disable(usb_gpio_pins);
+}
+static int get_gpio(struct device *device)
+{
+ usb_gpio_pins = ux500_pins_get(dev_name(device));
+
+ if (usb_gpio_pins == NULL) {
+ dev_err(device, "Could not get %s:usb_gpio_pins structure\n",
+ dev_name(device));
+
+ return PTR_ERR(usb_gpio_pins);
+ }
+ return 0;
+}
+static void put_gpio(void)
+{
+ ux500_pins_put(usb_gpio_pins);
+}
+struct abx500_usbgpio_platform_data abx500_usbgpio_plat_data = {
+ .get = &get_gpio,
+ .enable = &enable_gpio,
+ .disable = &disable_gpio,
+ .put = &put_gpio,
+ .usb_cs = USB_OTG_GPIO_CS,
+};
+
static inline void ux500_usb_dma_update_rx_ch_config(int *src_dev_type)
{
u32 idx;
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 07201637109..226f8736152 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -240,6 +240,24 @@ ENTRY(fa_dma_unmap_area)
mov pc, lr
ENDPROC(fa_dma_unmap_area)
+/*
+ * clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(fa_clean_dcache_all)
+ mov pc, lr
+ENDPROC(fa_clean_dcache_all)
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(fa_flush_dcache_all)
+ mov pc, lr
+ENDPROC(fa_flush_dcache_all)
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index c2301f22610..ab5bf508a2a 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -127,6 +127,24 @@ ENTRY(v3_dma_map_area)
ENDPROC(v3_dma_unmap_area)
ENDPROC(v3_dma_map_area)
+/*
+ * clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(v3_clean_dcache_all)
+ mov pc, lr
+ENDPROC(v3_clean_dcache_all)
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(v3_flush_dcache_all)
+ mov pc, lr
+ENDPROC(v3_flush_dcache_all)
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index fd9bb7addc8..9d3a055127e 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -139,6 +139,24 @@ ENTRY(v4_dma_map_area)
ENDPROC(v4_dma_unmap_area)
ENDPROC(v4_dma_map_area)
+/*
+ * clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(v4_clean_dcache_all)
+ mov pc, lr
+ENDPROC(v4_clean_dcache_all)
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(v4_flush_dcache_all)
+ mov pc, lr
+ENDPROC(v4_flush_dcache_all)
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 4f2c14151cc..54d3cda4a89 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -251,6 +251,24 @@ ENTRY(v4wb_dma_unmap_area)
mov pc, lr
ENDPROC(v4wb_dma_unmap_area)
+/*
+ * clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(v4wb_clean_dcache_all)
+ mov pc, lr
+ENDPROC(v4wb_clean_dcache_all)
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(v4wb_flush_dcache_all)
+ mov pc, lr
+ENDPROC(v4wb_flush_dcache_all)
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 4d7b467631c..40f7dba11f5 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -195,6 +195,24 @@ ENTRY(v4wt_dma_map_area)
ENDPROC(v4wt_dma_unmap_area)
ENDPROC(v4wt_dma_map_area)
+/*
+ * clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(v4wt_clean_dcache_all)
+ mov pc, lr
+ENDPROC(v4wt_clean_dcache_all)
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(v4wt_flush_dcache_all)
+ mov pc, lr
+ENDPROC(v4wt_flush_dcache_all)
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 74c2e5a33a4..b88dd4ab038 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -328,6 +328,24 @@ ENTRY(v6_dma_unmap_area)
mov pc, lr
ENDPROC(v6_dma_unmap_area)
+/*
+ * clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(v6_clean_dcache_all)
+ mov pc, lr
+ENDPROC(v6_clean_dcache_all)
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(v6_flush_dcache_all)
+ mov pc, lr
+ENDPROC(v6_flush_dcache_all)
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 07c4bc8ea0a..3c77277d8cb 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -33,7 +33,7 @@ ENTRY(v7_flush_icache_all)
ENDPROC(v7_flush_icache_all)
/*
- * v7_flush_dcache_all()
+ * __v7_flush_dcache_all()
*
* Flush the whole D-cache.
*
@@ -41,7 +41,7 @@ ENDPROC(v7_flush_icache_all)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v7_flush_dcache_all)
+ENTRY(__v7_flush_dcache_all)
dmb @ ensure ordering with previous memory accesses
mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0, #0x7000000 @ extract loc from clidr
@@ -88,9 +88,93 @@ finished:
dsb
isb
mov pc, lr
+ENDPROC(__v7_flush_dcache_all)
+
+/*
+ * __v7_clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ *
+ * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
+ */
+ENTRY(__v7_clean_dcache_all)
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq finished1 @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 0
+loop21:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt skip1 @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+loop22:
+ mov r9, r4 @ create working copy of max way size
+loop23:
+ ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
+ THUMB( lsl r6, r9, r5 )
+ THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
+ ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
+ THUMB( lsl r6, r7, r2 )
+ THUMB( orr r11, r11, r6 ) @ factor index number into r11
+ mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
+ subs r9, r9, #1 @ decrement the way
+ bge loop23
+ subs r7, r7, #1 @ decrement the index
+ bge loop22
+skip1:
+ add r10, r10, #2 @ increment cache number
+ cmp r3, r10
+ bgt loop21
+finished1:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+ mov pc, lr
+ENDPROC(__v7_clean_dcache_all)
+
+/*
+ * v7_flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(v7_flush_dcache_all)
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+ bl __v7_flush_dcache_all
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ mov pc, lr
ENDPROC(v7_flush_dcache_all)
/*
+ * v7_clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(v7_clean_dcache_all)
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+ bl __v7_clean_dcache_all
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ mov pc, lr
+ENDPROC(v7_clean_dcache_all)
+
+/*
* v7_flush_cache_all()
*
* Flush the entire cache system.
@@ -102,14 +186,12 @@ ENDPROC(v7_flush_dcache_all)
*
*/
ENTRY(v7_flush_kern_cache_all)
- ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
- THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+ stmfd sp!, {lr}
bl v7_flush_dcache_all
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
- ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
- THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ ldmfd sp!, {lr}
mov pc, lr
ENDPROC(v7_flush_kern_cache_all)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index dc8c550e6cb..3a516edde47 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -281,6 +281,20 @@ static struct mem_type mem_types[] = {
PMD_SECT_UNCACHED | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
+ /* NOTE : this is only a temporary hack!!!
+ * The U8500 ED/V1.0 cuts require such a
+ * memory type for deep sleep resume.
+ * This is expected to be solved in cut v2.0
+ * and we clean this up then. for more details
+ * look @ the commit message please
+ */
+ [MT_BACKUP_RAM] = {
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+ L_PTE_SHARED,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
+ .domain = DOMAIN_IO,
+ },
};
const struct mem_type *get_mem_type(unsigned int type)
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 307a4def8d3..411610d4973 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -303,6 +303,8 @@ ENTRY(\name\()_cache_fns)
.long \name\()_coherent_kern_range
.long \name\()_coherent_user_range
.long \name\()_flush_kern_dcache_area
+ .long \name\()_clean_dcache_all
+ .long \name\()_flush_dcache_all
.long \name\()_dma_map_area
.long \name\()_dma_unmap_area
.long \name\()_dma_flush_range
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index e70a73731ea..32ece9e79a6 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -298,7 +298,7 @@ __v7_ca15mp_setup:
__v7_setup:
adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr}
- bl v7_flush_dcache_all
+ bl __v7_flush_dcache_all
ldmia r12, {r0-r5, r7, r9, r11, lr}
mrc p15, 0, r0, c0, c0, 0 @ read main ID register
diff --git a/arch/arm/plat-nomadik/include/plat/mtu.h b/arch/arm/plat-nomadik/include/plat/mtu.h
index 6508e7694a4..5c97b3ccf76 100644
--- a/arch/arm/plat-nomadik/include/plat/mtu.h
+++ b/arch/arm/plat-nomadik/include/plat/mtu.h
@@ -7,5 +7,7 @@ extern void __iomem *mtu_base;
void nmdk_clkevt_reset(void);
void nmdk_clksrc_reset(void);
+struct clock_event_device *nmdk_clkevt_get(void);
+
#endif /* __PLAT_MTU_H */
diff --git a/arch/arm/plat-nomadik/include/plat/pincfg.h b/arch/arm/plat-nomadik/include/plat/pincfg.h
index 22cb97d2d8a..c015133a7ad 100644
--- a/arch/arm/plat-nomadik/include/plat/pincfg.h
+++ b/arch/arm/plat-nomadik/include/plat/pincfg.h
@@ -24,6 +24,7 @@
* bit 16..18 - SLPM pull up/down state
* bit 19..20 - SLPM direction
* bit 21..22 - SLPM Value (if output)
+ * bit 23..25 - PDIS value (if input)
*
* to facilitate the definition, the following macros are provided
*
@@ -67,6 +68,10 @@ typedef unsigned long pin_cfg_t;
/* These two replace the above in DB8500v2+ */
#define PIN_SLPM_WAKEUP_ENABLE (NMK_GPIO_SLPM_WAKEUP_ENABLE << PIN_SLPM_SHIFT)
#define PIN_SLPM_WAKEUP_DISABLE (NMK_GPIO_SLPM_WAKEUP_DISABLE << PIN_SLPM_SHIFT)
+#define PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP PIN_SLPM_WAKEUP_DISABLE
+
+#define PIN_SLPM_GPIO PIN_SLPM_WAKEUP_ENABLE /* In SLPM, pin is a gpio */
+#define PIN_SLPM_ALTFUNC PIN_SLPM_WAKEUP_DISABLE /* In SLPM, pin is altfunc */
#define PIN_DIR_SHIFT 14
#define PIN_DIR_MASK (0x1 << PIN_DIR_SHIFT)
@@ -105,6 +110,20 @@ typedef unsigned long pin_cfg_t;
#define PIN_SLPM_VAL_LOW ((1 + 0) << PIN_SLPM_VAL_SHIFT)
#define PIN_SLPM_VAL_HIGH ((1 + 1) << PIN_SLPM_VAL_SHIFT)
+#define PIN_SLPM_PDIS_SHIFT 23
+#define PIN_SLPM_PDIS_MASK (0x3 << PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS(x) \
+ (((x) & PIN_SLPM_PDIS_MASK) >> PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS_NO_CHANGE (0 << PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS_DISABLED (1 << PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS_ENABLED (2 << PIN_SLPM_PDIS_SHIFT)
+
+#define PIN_LOWEMI_SHIFT 25
+#define PIN_LOWEMI_MASK (0x1 << PIN_LOWEMI_SHIFT)
+#define PIN_LOWEMI(x) (((x) & PIN_LOWEMI_MASK) >> PIN_LOWEMI_SHIFT)
+#define PIN_LOWEMI_DISABLED (0 << PIN_LOWEMI_SHIFT)
+#define PIN_LOWEMI_ENABLED (1 << PIN_LOWEMI_SHIFT)
+
/* Shortcuts. Use these instead of separate DIR, PULL, and VAL. */
#define PIN_INPUT_PULLDOWN (PIN_DIR_INPUT | PIN_PULL_DOWN)
#define PIN_INPUT_PULLUP (PIN_DIR_INPUT | PIN_PULL_UP)
diff --git a/arch/arm/plat-nomadik/include/plat/ske.h b/arch/arm/plat-nomadik/include/plat/ske.h
index 31382fbc07d..7a4fbdf3c13 100644
--- a/arch/arm/plat-nomadik/include/plat/ske.h
+++ b/arch/arm/plat-nomadik/include/plat/ske.h
@@ -22,6 +22,9 @@
#define SKE_MIS 0x18
#define SKE_ICR 0x1C
+#define SKE_KPD_MAX_ROWS 8
+#define SKE_KPD_MAX_COLS 8
+
/*
* Keypad module
*/
@@ -30,21 +33,27 @@
* struct keypad_platform_data - structure for platform specific data
* @init: pointer to keypad init function
* @exit: pointer to keypad deinitialisation function
+ * @gpio_input_pins: pointer to gpio input pins
+ * @gpio_output_pins: pointer to gpio output pins
* @keymap_data: matrix scan code table for keycodes
* @krow: maximum number of rows
* @kcol: maximum number of columns
* @debounce_ms: platform specific debounce time
* @no_autorepeat: flag for auto repetition
* @wakeup_enable: allow waking up the system
+ * @switch_delay: gpio switch_delay
*/
struct ske_keypad_platform_data {
int (*init)(void);
int (*exit)(void);
+ int *gpio_input_pins;
+ int *gpio_output_pins;
const struct matrix_keymap_data *keymap_data;
u8 krow;
u8 kcol;
u8 debounce_ms;
bool no_autorepeat;
bool wakeup_enable;
+ int switch_delay;
};
#endif /*__SKE_KPD_H*/
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index 685c78716d9..c7df218bb39 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -113,7 +113,8 @@ struct stedma40_half_channel_info {
* @dst_dev_type: Dst device type
* @src_info: Parameters for dst half channel
* @dst_info: Parameters for dst half channel
- *
+ * @use_fixed_channel: if true, use physical channel specified by phy_channel
+ * @phy_channel: physical channel to use, only if use_fixed_channel is true
*
* This structure has to be filled by the client drivers.
* It is recommended to do all dma configurations for clients in the machine.
@@ -129,6 +130,9 @@ struct stedma40_chan_cfg {
int dst_dev_type;
struct stedma40_half_channel_info src_info;
struct stedma40_half_channel_info dst_info;
+
+ bool use_fixed_channel;
+ int phy_channel;
};
/**
@@ -142,6 +146,7 @@ struct stedma40_chan_cfg {
* @memcpy_conf_phy: default configuration of physical channel memcpy
* @memcpy_conf_log: default configuration of logical channel memcpy
* @disabled_channels: A vector, ending with -1, that marks physical channels
+ * @use_esram_lcla: flag for mapping the lcla into esram region
* that are for different reasons not available for the driver.
*/
struct stedma40_platform_data {
@@ -153,6 +158,7 @@ struct stedma40_platform_data {
struct stedma40_chan_cfg *memcpy_conf_phy;
struct stedma40_chan_cfg *memcpy_conf_log;
int disabled_channels[STEDMA40_MAX_PHYS];
+ bool use_esram_lcla;
};
#ifdef CONFIG_STE_DMA40
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index 30b6433d910..7ca131e57df 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/jiffies.h>
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/sched.h>
#include <asm/mach/time.h>
#include <asm/sched_clock.h>
@@ -163,6 +164,28 @@ static struct clock_event_device nmdk_clkevt = {
.set_next_event = nmdk_clkevt_next,
};
+#ifdef ARCH_HAS_READ_CURRENT_TIMER
+static void nmdk_timer_delay_loop(unsigned long loops)
+{
+ unsigned long bclock, now;
+
+ bclock = ~readl(mtu_base + MTU_VAL(0));
+ do {
+ now = ~readl(mtu_base + MTU_VAL(0));
+ /* If timer have been cleared (suspend) or wrapped we exit */
+ if (unlikely(now < bclock))
+ return;
+ } while ((now - bclock) < loops);
+}
+
+/* Used to calibrate the delay */
+int read_current_timer(unsigned long *timer_val)
+{
+ *timer_val = ~readl(mtu_base + MTU_VAL(0));
+ return 0;
+}
+#endif
+
/*
* IRQ Handler for timer 1 of the MTU block.
*/
@@ -195,6 +218,11 @@ void nmdk_clksrc_reset(void)
mtu_base + MTU_CR(0));
}
+struct clock_event_device *nmdk_clkevt_get(void)
+{
+ return &nmdk_clkevt;
+}
+
void __init nmdk_timer_init(void)
{
unsigned long rate;
@@ -247,4 +275,8 @@ void __init nmdk_timer_init(void)
/* Register irq and clockevents */
setup_irq(IRQ_MTU0, &nmdk_timer_irq);
clockevents_register_device(&nmdk_clkevt);
+#ifdef ARCH_HAS_READ_CURRENT_TIMER
+ set_delay_fn(nmdk_timer_delay_loop);
+#endif
+
}
diff --git a/drivers/Kconfig b/drivers/Kconfig
index b5e6f243f74..ea4954fd633 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -124,6 +124,8 @@ source "drivers/platform/Kconfig"
source "drivers/clk/Kconfig"
+source "drivers/tee/Kconfig"
+
source "drivers/hwspinlock/Kconfig"
source "drivers/clocksource/Kconfig"
@@ -137,3 +139,4 @@ source "drivers/hv/Kconfig"
source "drivers/devfreq/Kconfig"
endmenu
+
diff --git a/drivers/Makefile b/drivers/Makefile
index 1b3142127bf..c8dfb8a82d2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -122,6 +122,7 @@ obj-y += platform/
obj-y += ieee802154/
#common clk code
obj-y += clk/
+obj-$(CONFIG_TEE_SUPPORT) += tee/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
obj-$(CONFIG_NFC) += nfc/
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index bd230e80113..b5b24989d28 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -109,7 +109,7 @@ static int amba_legacy_resume(struct device *dev)
return ret;
}
-static int amba_pm_prepare(struct device *dev)
+int amba_pm_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -120,7 +120,7 @@ static int amba_pm_prepare(struct device *dev)
return ret;
}
-static void amba_pm_complete(struct device *dev)
+void amba_pm_complete(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -137,7 +137,7 @@ static void amba_pm_complete(struct device *dev)
#ifdef CONFIG_SUSPEND
-static int amba_pm_suspend(struct device *dev)
+int amba_pm_suspend(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -155,7 +155,7 @@ static int amba_pm_suspend(struct device *dev)
return ret;
}
-static int amba_pm_suspend_noirq(struct device *dev)
+int amba_pm_suspend_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -171,7 +171,7 @@ static int amba_pm_suspend_noirq(struct device *dev)
return ret;
}
-static int amba_pm_resume(struct device *dev)
+int amba_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -189,7 +189,7 @@ static int amba_pm_resume(struct device *dev)
return ret;
}
-static int amba_pm_resume_noirq(struct device *dev)
+int amba_pm_resume_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -216,7 +216,7 @@ static int amba_pm_resume_noirq(struct device *dev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
-static int amba_pm_freeze(struct device *dev)
+int amba_pm_freeze(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -234,7 +234,7 @@ static int amba_pm_freeze(struct device *dev)
return ret;
}
-static int amba_pm_freeze_noirq(struct device *dev)
+int amba_pm_freeze_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -250,7 +250,7 @@ static int amba_pm_freeze_noirq(struct device *dev)
return ret;
}
-static int amba_pm_thaw(struct device *dev)
+int amba_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -268,7 +268,7 @@ static int amba_pm_thaw(struct device *dev)
return ret;
}
-static int amba_pm_thaw_noirq(struct device *dev)
+int amba_pm_thaw_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -284,7 +284,7 @@ static int amba_pm_thaw_noirq(struct device *dev)
return ret;
}
-static int amba_pm_poweroff(struct device *dev)
+int amba_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -302,7 +302,7 @@ static int amba_pm_poweroff(struct device *dev)
return ret;
}
-static int amba_pm_poweroff_noirq(struct device *dev)
+int amba_pm_poweroff_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -318,7 +318,7 @@ static int amba_pm_poweroff_noirq(struct device *dev)
return ret;
}
-static int amba_pm_restore(struct device *dev)
+int amba_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -336,7 +336,7 @@ static int amba_pm_restore(struct device *dev)
return ret;
}
-static int amba_pm_restore_noirq(struct device *dev)
+int amba_pm_restore_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -402,20 +402,7 @@ static int amba_pm_runtime_resume(struct device *dev)
#ifdef CONFIG_PM
static const struct dev_pm_ops amba_pm = {
- .prepare = amba_pm_prepare,
- .complete = amba_pm_complete,
- .suspend = amba_pm_suspend,
- .resume = amba_pm_resume,
- .freeze = amba_pm_freeze,
- .thaw = amba_pm_thaw,
- .poweroff = amba_pm_poweroff,
- .restore = amba_pm_restore,
- .suspend_noirq = amba_pm_suspend_noirq,
- .resume_noirq = amba_pm_resume_noirq,
- .freeze_noirq = amba_pm_freeze_noirq,
- .thaw_noirq = amba_pm_thaw_noirq,
- .poweroff_noirq = amba_pm_poweroff_noirq,
- .restore_noirq = amba_pm_restore_noirq,
+ USE_AMBA_PM_SLEEP_OPS
SET_RUNTIME_PM_OPS(
amba_pm_runtime_suspend,
amba_pm_runtime_resume,
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 21cf46f4524..95f10c51f23 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -172,6 +172,9 @@ config SYS_HYPERVISOR
bool
default n
+config SYS_SOC
+ bool
+
source "drivers/base/regmap/Kconfig"
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 99a375ad2cc..442251cf83e 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -17,6 +17,7 @@ ifeq ($(CONFIG_SYSFS),y)
obj-$(CONFIG_MODULES) += module.o
endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
+obj-$(CONFIG_SYS_SOC) += soc.o
obj-$(CONFIG_REGMAP) += regmap/
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
new file mode 100644
index 00000000000..046b43bfcdb
--- /dev/null
+++ b/drivers/base/soc.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Maxime Coquelin <maxime.coquelin-nonst@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+struct kobject *soc_object;
+
+ssize_t show_soc_info(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct sysfs_soc_info *si = container_of(attr,
+ struct sysfs_soc_info, attr);
+
+ if (si->info)
+ return sprintf(buf, "%s\n", si->info);
+
+ return si->get_info(buf, si);
+}
+
+int __init register_sysfs_soc_info(struct sysfs_soc_info *info, int nb_info)
+{
+ int i, ret;
+
+ for (i = 0; i < nb_info; i++) {
+ ret = sysfs_create_file(soc_object, &info[i].attr.attr);
+ if (ret) {
+ for (i -= 1; i >= 0; i--)
+ sysfs_remove_file(soc_object, &info[i].attr.attr);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static struct attribute *soc_attrs[] = {
+ NULL,
+};
+
+static struct attribute_group soc_attr_group = {
+ .attrs = soc_attrs,
+};
+
+int __init register_sysfs_soc(struct sysfs_soc_info *info, size_t num)
+{
+ int ret;
+
+ soc_object = kobject_create_and_add("socinfo", NULL);
+ if (!soc_object) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = sysfs_create_group(soc_object, &soc_attr_group);
+ if (ret)
+ goto kset_exit;
+
+ ret = register_sysfs_soc_info(info, num);
+ if (ret)
+ goto group_exit;
+
+ return 0;
+
+group_exit:
+ sysfs_remove_group(soc_object, &soc_attr_group);
+kset_exit:
+ kobject_put(soc_object);
+exit:
+ return ret;
+}
+
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index 59feefe0e3e..dcfb5666a26 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -14,6 +14,9 @@
*/
#include <linux/clockchips.h>
#include <linux/clksrc-dbx500-prcmu.h>
+#ifdef CONFIG_BOOTTIME
+#include <linux/boottime.h>
+#endif
#include <asm/sched_clock.h>
@@ -79,6 +82,23 @@ static void notrace clksrc_dbx500_prcmu_update_sched_clock(void)
}
#endif
+#ifdef CONFIG_BOOTTIME
+static unsigned long __init boottime_get_time(void)
+{
+ return div_s64(clocksource_cyc2ns(clocksource_dbx500_prcmu.read(
+ &clocksource_dbx500_prcmu),
+ clocksource_dbx500_prcmu.mult,
+ clocksource_dbx500_prcmu.shift),
+ 1000);
+}
+
+static struct boottime_timer __initdata boottime_timer = {
+ .init = NULL,
+ .get_time = boottime_get_time,
+ .finalize = NULL,
+};
+#endif
+
void __init clksrc_dbx500_prcmu_init(void __iomem *base)
{
clksrc_dbx500_timer_base = base;
@@ -103,4 +123,7 @@ void __init clksrc_dbx500_prcmu_init(void __iomem *base)
clocksource_calc_mult_shift(&clocksource_dbx500_prcmu,
RATE_32K, SCHED_CLOCK_MIN_WRAP);
clocksource_register(&clocksource_dbx500_prcmu);
+#ifdef CONFIG_BOOTTIME
+ boottime_activate(&boottime_timer);
+#endif
}
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index a48bc02cd76..2988ff59bdb 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -39,7 +39,8 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
##################################################################################
# ARM SoC drivers
-obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
+obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
+obj-$(CONFIG_UX500_SOC_DB5500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 987a165ede2..d0945dbb29c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -368,6 +368,27 @@ show_one(scaling_cur_freq, cur);
static int __cpufreq_set_policy(struct cpufreq_policy *data,
struct cpufreq_policy *policy);
+int cpufreq_update_freq(int cpu, unsigned int min, unsigned int max)
+{
+ int ret;
+ struct cpufreq_policy new_policy;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ ret = cpufreq_get_policy(&new_policy, cpu);
+ if (ret)
+ return -EINVAL;
+
+ new_policy.min = min;
+ new_policy.max = max;
+
+ ret = __cpufreq_set_policy(policy, &new_policy);
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_update_freq);
+
/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
deleted file mode 100644
index f5002015d82..00000000000
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics 2009
- * Copyright (C) ST-Ericsson SA 2010
- *
- * License Terms: GNU General Public License v2
- * Author: Sundar Iyer <sundar.iyer@stericsson.com>
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- *
- */
-#include <linux/kernel.h>
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/mfd/dbx500-prcmu.h>
-#include <mach/id.h>
-
-static struct cpufreq_frequency_table freq_table[] = {
- [0] = {
- .index = 0,
- .frequency = 200000,
- },
- [1] = {
- .index = 1,
- .frequency = 300000,
- },
- [2] = {
- .index = 2,
- .frequency = 600000,
- },
- [3] = {
- /* Used for MAX_OPP, if available */
- .index = 3,
- .frequency = CPUFREQ_TABLE_END,
- },
- [4] = {
- .index = 4,
- .frequency = CPUFREQ_TABLE_END,
- },
-};
-
-static enum arm_opp idx2opp[] = {
- ARM_EXTCLK,
- ARM_50_OPP,
- ARM_100_OPP,
- ARM_MAX_OPP
-};
-
-static struct freq_attr *db8500_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-static int db8500_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
-static int db8500_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- struct cpufreq_freqs freqs;
- unsigned int idx;
-
- /* scale the target frequency to one of the extremes supported */
- if (target_freq < policy->cpuinfo.min_freq)
- target_freq = policy->cpuinfo.min_freq;
- if (target_freq > policy->cpuinfo.max_freq)
- target_freq = policy->cpuinfo.max_freq;
-
- /* Lookup the next frequency */
- if (cpufreq_frequency_table_target
- (policy, freq_table, target_freq, relation, &idx)) {
- return -EINVAL;
- }
-
- freqs.old = policy->cur;
- freqs.new = freq_table[idx].frequency;
-
- if (freqs.old == freqs.new)
- return 0;
-
- /* pre-change notification */
- for_each_cpu(freqs.cpu, policy->cpus)
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-
- /* request the PRCM unit for opp change */
- if (prcmu_set_arm_opp(idx2opp[idx])) {
- pr_err("db8500-cpufreq: Failed to set OPP level\n");
- return -EINVAL;
- }
-
- /* post change notification */
- for_each_cpu(freqs.cpu, policy->cpus)
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
- return 0;
-}
-
-static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
-{
- int i;
- /* request the prcm to get the current ARM opp */
- for (i = 0; prcmu_get_arm_opp() != idx2opp[i]; i++)
- ;
- return freq_table[i].frequency;
-}
-
-static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
-{
- int i, res;
-
- BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table));
-
- if (!prcmu_is_u8400()) {
- freq_table[1].frequency = 400000;
- freq_table[2].frequency = 800000;
- if (prcmu_has_arm_maxopp())
- freq_table[3].frequency = 1000000;
- }
- pr_info("db8500-cpufreq : Available frequencies:\n");
- for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
- pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
-
- /* get policy fields based on the table */
- res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (!res)
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
- else {
- pr_err("db8500-cpufreq : Failed to read policy table\n");
- return res;
- }
-
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
- policy->cur = db8500_cpufreq_getspeed(policy->cpu);
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
- /*
- * FIXME : Need to take time measurement across the target()
- * function with no/some/all drivers in the notification
- * list.
- */
- policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
-
- /* policy sharing between dual CPUs */
- cpumask_copy(policy->cpus, &cpu_present_map);
-
- policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
-
- return 0;
-}
-
-static struct cpufreq_driver db8500_cpufreq_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = db8500_cpufreq_verify_speed,
- .target = db8500_cpufreq_target,
- .get = db8500_cpufreq_getspeed,
- .init = db8500_cpufreq_init,
- .name = "DB8500",
- .attr = db8500_cpufreq_attr,
-};
-
-static int __init db8500_cpufreq_register(void)
-{
- if (!cpu_is_u8500v20_or_later())
- return -ENODEV;
-
- pr_info("cpufreq for DB8500 started\n");
- return cpufreq_register_driver(&db8500_cpufreq_driver);
-}
-device_initcall(db8500_cpufreq_register);
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
new file mode 100644
index 00000000000..1f9249f869a
--- /dev/null
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Sundar Iyer
+ * Author: Martin Persson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <mach/id.h>
+
+static struct cpufreq_frequency_table db8500_freq_table[] = {
+ [0] = {
+ .index = 0,
+ .frequency = 200000,
+ },
+ [1] = {
+ .index = 1,
+ .frequency = 300000,
+ },
+ [2] = {
+ .index = 2,
+ .frequency = 600000,
+ },
+ [3] = {
+ /* Used for MAX_OPP, if available */
+ .index = 3,
+ .frequency = CPUFREQ_TABLE_END,
+ },
+ [4] = {
+ .index = 4,
+ .frequency = CPUFREQ_TABLE_END,
+ },
+};
+
+static struct cpufreq_frequency_table db5500_freq_table[] = {
+ [0] = {
+ .index = 0,
+ .frequency = 200000,
+ },
+ [1] = {
+ .index = 1,
+ .frequency = 396500,
+ },
+ [2] = {
+ .index = 2,
+ .frequency = 793000,
+ },
+ [3] = {
+ .index = 3,
+ .frequency = CPUFREQ_TABLE_END,
+ },
+};
+
+static struct cpufreq_frequency_table *freq_table;
+
+static enum arm_opp db8500_idx2opp[] = {
+ ARM_EXTCLK,
+ ARM_50_OPP,
+ ARM_100_OPP,
+ ARM_MAX_OPP
+};
+
+static enum arm_opp db5500_idx2opp[] = {
+ ARM_EXTCLK,
+ ARM_50_OPP,
+ ARM_100_OPP,
+};
+
+static enum arm_opp *idx2opp;
+
+static struct freq_attr *dbx500_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ unsigned int idx;
+
+ /* scale the target frequency to one of the extremes supported */
+ if (target_freq < policy->cpuinfo.min_freq)
+ target_freq = policy->cpuinfo.min_freq;
+ if (target_freq > policy->cpuinfo.max_freq)
+ target_freq = policy->cpuinfo.max_freq;
+
+ /* Lookup the next frequency */
+ if (cpufreq_frequency_table_target
+ (policy, freq_table, target_freq, relation, &idx)) {
+ return -EINVAL;
+ }
+
+ freqs.old = policy->cur;
+ freqs.new = freq_table[idx].frequency;
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ /* pre-change notification */
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ /* request the PRCM unit for opp change */
+ if (prcmu_set_arm_opp(idx2opp[idx])) {
+ pr_err("ux500-cpufreq: Failed to set OPP level\n");
+ return -EINVAL;
+ }
+
+ /* post change notification */
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
+{
+ int i;
+ /* request the prcm to get the current ARM opp */
+ for (i = 0; prcmu_get_arm_opp() != idx2opp[i]; i++)
+ ;
+ return freq_table[i].frequency;
+}
+
+static bool initialized;
+
+static void __init dbx500_cpufreq_early_init(void)
+{
+ if (cpu_is_u5500()) {
+ freq_table = db5500_freq_table;
+ idx2opp = db5500_idx2opp;
+
+ } else if (cpu_is_u8500()) {
+ freq_table = db8500_freq_table;
+ idx2opp = db8500_idx2opp;
+
+ if (!prcmu_is_u8400()) {
+ freq_table[1].frequency = 400000;
+ freq_table[2].frequency = 800000;
+ if (prcmu_has_arm_maxopp())
+ freq_table[3].frequency = 1000000;
+ }
+
+ } else {
+ ux500_unknown_soc();
+ }
+
+ initialized = true;
+}
+
+/*
+ * This is called from localtimer initialization, via the clk_get_rate() for
+ * the smp_twd clock. This is way before cpufreq is initialized.
+ */
+unsigned long dbx500_cpufreq_getfreq(void)
+{
+ if (!initialized)
+ dbx500_cpufreq_early_init();
+
+ return dbx500_cpufreq_getspeed(0) * 1000;
+}
+
+int dbx500_cpufreq_get_limits(int cpu, int r,
+ unsigned int *min, unsigned int *max)
+{
+ int op;
+ int i;
+ int ret;
+ static int old_freq;
+ struct cpufreq_policy p;
+
+ switch (r) {
+ case 0:
+ /* Fall through */
+ case 25:
+ op = ARM_EXTCLK;
+ break;
+ case 50:
+ op = ARM_50_OPP;
+ break;
+ case 100:
+ op = ARM_100_OPP;
+ break;
+ case 125:
+ if (cpu_is_u8500() && prcmu_has_arm_maxopp())
+ op = ARM_MAX_OPP;
+ else
+ op = ARM_100_OPP;
+ break;
+ default:
+ pr_err("cpufreq-dbx500: Incorrect arm target value (%d).\n",
+ r);
+ BUG();
+ break;
+ }
+
+ for (i = 0; idx2opp[i] != op; i++)
+ ;
+
+ if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
+ pr_err("cpufreq-dbx500: Minimum frequency does not exist!\n");
+ BUG();
+ }
+
+ if (freq_table[i].frequency != old_freq)
+ pr_debug("cpufreq-dbx500: set min arm freq to %d\n",
+ freq_table[i].frequency);
+
+ (*min) = freq_table[i].frequency;
+
+ ret = cpufreq_get_policy(&p, cpu);
+ if (ret) {
+ pr_err("cpufreq-dbx500: Failed to get policy.\n");
+ return -EINVAL;
+ }
+
+ (*max) = p.max;
+ return 0;
+}
+
+static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int res;
+ int i;
+
+ /* get policy fields based on the table */
+ res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (!res)
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+ else {
+ pr_err("dbx500-cpufreq : Failed to read policy table\n");
+ return res;
+ }
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+ policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
+
+ for (i = 0; freq_table[i].frequency != policy->cur; i++)
+ ;
+
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+
+ /*
+ * FIXME : Need to take time measurement across the target()
+ * function with no/some/all drivers in the notification
+ * list.
+ */
+ policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
+
+ /* policy sharing between dual CPUs */
+ cpumask_copy(policy->cpus, &cpu_present_map);
+
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+
+ return 0;
+}
+
+static struct cpufreq_driver dbx500_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = dbx500_cpufreq_verify_speed,
+ .target = dbx500_cpufreq_target,
+ .get = dbx500_cpufreq_getspeed,
+ .init = dbx500_cpufreq_init,
+ .name = "DBX500",
+ .attr = dbx500_cpufreq_attr,
+};
+
+static int __init dbx500_cpufreq_register(void)
+{
+ int i;
+
+ if (cpu_is_u5500() && cpu_is_u5500v1())
+ return -ENODEV;
+
+ if (cpu_is_u8500() && !cpu_is_u8500v20_or_later())
+ return -ENODEV;
+
+ if (!initialized)
+ dbx500_cpufreq_early_init();
+
+ pr_info("dbx500-cpufreq : Available frequencies:\n");
+
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+ pr_info(" %d Mhz\n", freq_table[i].frequency / 1000);
+
+ return cpufreq_register_driver(&dbx500_cpufreq_driver);
+}
+device_initcall(dbx500_cpufreq_register);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 6d16b4b0d7a..12edcef976a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -293,4 +293,15 @@ config CRYPTO_DEV_S5P
Select this to offload Samsung S5PV210 or S5PC110 from AES
algorithms execution.
+config CRYPTO_DEV_UX500
+ tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration"
+ depends on ARCH_U8500
+ select CRYPTO_ALGAPI
+ help
+ Driver for ST-Ericsson UX500 crypto engine.
+
+if CRYPTO_DEV_UX500
+ source "drivers/crypto/ux500/Kconfig"
+endif # if CRYPTO_DEV_UX500
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 53ea5015531..dbcc3113205 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
+obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
new file mode 100644
index 00000000000..165a03d46c0
--- /dev/null
+++ b/drivers/crypto/ux500/Kconfig
@@ -0,0 +1,29 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+config CRYPTO_DEV_UX500_CRYP
+ tristate "UX500 crypto driver for CRYP block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_DES
+ help
+ This is the driver for the crypto block CRYP.
+
+config CRYPTO_DEV_UX500_HASH
+ tristate "UX500 crypto driver for HASH block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_HASH
+ select CRYPTO_HMAC
+ help
+ This selects the UX500 hash driver for the HASH hardware.
+ Depends on U8500/STM DMA if running in DMA mode.
+
+config CRYPTO_DEV_UX500_DEBUG
+ bool "Activate ux500 platform debug-mode for crypto and hash block"
+ depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH
+ default n
+ help
+ Say Y if you want to add debug prints to ux500_hash and
+ ux500_cryp devices.
diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile
new file mode 100644
index 00000000000..b9a365bade8
--- /dev/null
+++ b/drivers/crypto/ux500/Makefile
@@ -0,0 +1,8 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/
+obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
new file mode 100644
index 00000000000..e5d362a6f68
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -0,0 +1,13 @@
+#/*
+# * Copyright (C) ST-Ericsson SA 2010
+# * Author: shujuan.chen@stericsson.com for ST-Ericsson.
+# * License terms: GNU General Public License (GPL) version 2 */
+
+ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
+CFLAGS_cryp_core.o := -DDEBUG -O0
+CFLAGS_cryp.o := -DDEBUG -O0
+CFLAGS_cryp_irq.o := -DDEBUG -O0
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
+ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
new file mode 100644
index 00000000000..211200fed34
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -0,0 +1,418 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <mach/hardware.h>
+
+#include "cryp_p.h"
+#include "cryp.h"
+
+/**
+ * cryp_wait_until_done - wait until the device logic is not busy
+ */
+void cryp_wait_until_done(struct cryp_device_data *device_data)
+{
+ while (cryp_is_logic_busy(device_data))
+ cpu_relax();
+}
+
+/**
+ * cryp_check - This routine checks Peripheral and PCell Id
+ * @device_data: Pointer to the device data struct for base address.
+ */
+int cryp_check(struct cryp_device_data *device_data)
+{
+ int peripheralID2 = 0;
+
+ if (NULL == device_data)
+ return -EINVAL;
+
+ if (cpu_is_u8500())
+ peripheralID2 = CRYP_PERIPHERAL_ID2_DB8500;
+ else if (cpu_is_u5500())
+ peripheralID2 = CRYP_PERIPHERAL_ID2_DB5500;
+
+ /* Check Peripheral and Pcell Id Register for CRYP */
+ if ((CRYP_PERIPHERAL_ID0 ==
+ readl_relaxed(&device_data->base->periphId0))
+ && (CRYP_PERIPHERAL_ID1 ==
+ readl_relaxed(&device_data->base->periphId1))
+ && (peripheralID2 ==
+ readl_relaxed(&device_data->base->periphId2))
+ && (CRYP_PERIPHERAL_ID3 ==
+ readl_relaxed(&device_data->base->periphId3))
+ && (CRYP_PCELL_ID0 ==
+ readl_relaxed(&device_data->base->pcellId0))
+ && (CRYP_PCELL_ID1 ==
+ readl_relaxed(&device_data->base->pcellId1))
+ && (CRYP_PCELL_ID2 ==
+ readl_relaxed(&device_data->base->pcellId2))
+ && (CRYP_PCELL_ID3 ==
+ readl_relaxed(&device_data->base->pcellId3))) {
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+/**
+ * cryp_activity - This routine enables/disable the cryptography function.
+ * @device_data: Pointer to the device data struct for base address.
+ * @cryp_activity: Enable/Disable functionality
+ */
+void cryp_activity(struct cryp_device_data *device_data,
+ enum cryp_crypen cryp_crypen)
+{
+ CRYP_PUT_BITS(&device_data->base->cr,
+ cryp_crypen,
+ CRYP_CR_CRYPEN_POS,
+ CRYP_CR_CRYPEN_MASK);
+}
+
+/**
+ * cryp_flush_inoutfifo - Resets both the input and the output FIFOs
+ * @device_data: Pointer to the device data struct for base address.
+ */
+void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
+{
+ /*
+ * We always need to disble the hardware before trying to flush the
+ * FIFO. This is something that isn't written in the design
+ * specification, but we have been informed by the hardware designers
+ * that this must be done.
+ */
+ cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
+ cryp_wait_until_done(device_data);
+
+ CRYP_SET_BITS(&device_data->base->cr, CRYP_CR_FFLUSH_MASK);
+ /*
+ * CRYP_SR_INFIFO_READY_MASK is the expected value on the status
+ * register when starting a new calculation, which means Input FIFO is
+ * not full and input FIFO is empty.
+ */
+ while (readl_relaxed(&device_data->base->sr) !=
+ CRYP_SR_INFIFO_READY_MASK)
+ cpu_relax();
+}
+
+/**
+ * cryp_set_configuration - This routine set the cr CRYP IP
+ * @device_data: Pointer to the device data struct for base address.
+ * @cryp_config: Pointer to the configuration parameter
+ * @control_register: The control register to be written later on.
+ */
+int cryp_set_configuration(struct cryp_device_data *device_data,
+ struct cryp_config *cryp_config,
+ u32 *control_register)
+{
+ u32 cr_for_kse;
+
+ if (NULL == device_data || NULL == cryp_config)
+ return -EINVAL;
+
+ *control_register |= (cryp_config->keysize << CRYP_CR_KEYSIZE_POS);
+
+ /* Prepare key for decryption in AES_ECB and AES_CBC mode. */
+ if ((CRYP_ALGORITHM_DECRYPT == cryp_config->algodir) &&
+ ((CRYP_ALGO_AES_ECB == cryp_config->algomode) ||
+ (CRYP_ALGO_AES_CBC == cryp_config->algomode))) {
+ cr_for_kse = *control_register;
+ /*
+ * This seems a bit odd, but it is indeed needed to set this to
+ * encrypt even though it is a decryption that we are doing. It
+ * also mentioned in the design spec that you need to do this.
+ * After the keyprepartion for decrypting is done you should set
+ * algodir back to decryption, which is done outside this if
+ * statement.
+ *
+ * According to design specification we should set mode ECB
+ * during key preparation even though we might be running CBC
+ * when enter this function.
+ *
+ * Writing to KSE_ENABLED will drop CRYPEN when key preparation
+ * is done. Therefore we need to set CRYPEN again outside this
+ * if statement when running decryption.
+ */
+ cr_for_kse |= ((CRYP_ALGORITHM_ENCRYPT << CRYP_CR_ALGODIR_POS) |
+ (CRYP_ALGO_AES_ECB << CRYP_CR_ALGOMODE_POS) |
+ (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS) |
+ (KSE_ENABLED << CRYP_CR_KSE_POS));
+
+ writel_relaxed(cr_for_kse, &device_data->base->cr);
+ cryp_wait_until_done(device_data);
+ }
+
+ *control_register |=
+ ((cryp_config->algomode << CRYP_CR_ALGOMODE_POS) |
+ (cryp_config->algodir << CRYP_CR_ALGODIR_POS));
+
+ return 0;
+}
+
+/**
+ * cryp_configure_protection - set the protection bits in the CRYP logic.
+ * @device_data: Pointer to the device data struct for base address.
+ * @p_protect_config: Pointer to the protection mode and
+ * secure mode configuration
+ */
+int cryp_configure_protection(struct cryp_device_data *device_data,
+ struct cryp_protection_config *p_protect_config)
+{
+ if (NULL == p_protect_config)
+ return -EINVAL;
+
+ CRYP_WRITE_BIT(&device_data->base->cr,
+ (u32) p_protect_config->secure_access,
+ CRYP_CR_SECURE_MASK);
+ CRYP_PUT_BITS(&device_data->base->cr,
+ p_protect_config->privilege_access,
+ CRYP_CR_PRLG_POS,
+ CRYP_CR_PRLG_MASK);
+
+ return 0;
+}
+
+/**
+ * cryp_is_logic_busy - returns the busy status of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ */
+int cryp_is_logic_busy(struct cryp_device_data *device_data)
+{
+ return CRYP_TEST_BITS(&device_data->base->sr,
+ CRYP_SR_BUSY_MASK);
+}
+
+/**
+ * cryp_configure_for_dma - configures the CRYP IP for DMA operation
+ * @device_data: Pointer to the device data struct for base address.
+ * @dma_req: Specifies the DMA request type value.
+ */
+void cryp_configure_for_dma(struct cryp_device_data *device_data,
+ enum cryp_dma_req_type dma_req)
+{
+ CRYP_SET_BITS(&device_data->base->dmacr,
+ (u32) dma_req);
+}
+
+/**
+ * cryp_configure_key_values - configures the key values for CRYP operations
+ * @device_data: Pointer to the device data struct for base address.
+ * @key_reg_index: Key value index register
+ * @key_value: The key value struct
+ */
+int cryp_configure_key_values(struct cryp_device_data *device_data,
+ enum cryp_key_reg_index key_reg_index,
+ struct cryp_key_value key_value)
+{
+ while (cryp_is_logic_busy(device_data))
+ cpu_relax();
+
+ switch (key_reg_index) {
+ case CRYP_KEY_REG_1:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_1_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_1_r);
+ break;
+ case CRYP_KEY_REG_2:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_2_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_2_r);
+ break;
+ case CRYP_KEY_REG_3:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_3_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_3_r);
+ break;
+ case CRYP_KEY_REG_4:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_4_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_4_r);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+
+}
+
+/**
+ * cryp_configure_init_vector - configures the initialization vector register
+ * @device_data: Pointer to the device data struct for base address.
+ * @init_vector_index: Specifies the index of the init vector.
+ * @init_vector_value: Specifies the value for the init vector.
+ */
+int cryp_configure_init_vector(struct cryp_device_data *device_data,
+ enum cryp_init_vector_index
+ init_vector_index,
+ struct cryp_init_vector_value
+ init_vector_value)
+{
+ while (cryp_is_logic_busy(device_data))
+ cpu_relax();
+
+ switch (init_vector_index) {
+ case CRYP_INIT_VECTOR_INDEX_0:
+ writel_relaxed(init_vector_value.init_value_left,
+ &device_data->base->init_vect_0_l);
+ writel_relaxed(init_vector_value.init_value_right,
+ &device_data->base->init_vect_0_r);
+ break;
+ case CRYP_INIT_VECTOR_INDEX_1:
+ writel_relaxed(init_vector_value.init_value_left,
+ &device_data->base->init_vect_1_l);
+ writel_relaxed(init_vector_value.init_value_right,
+ &device_data->base->init_vect_1_r);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * cryp_save_device_context - Store hardware registers and
+ * other device context parameter
+ * @device_data: Pointer to the device data struct for base address.
+ * @ctx: Crypto device context
+ */
+void cryp_save_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx,
+ int cryp_mode)
+{
+ enum cryp_algo_mode algomode;
+ struct cryp_register *src_reg = device_data->base;
+ struct cryp_config *config =
+ (struct cryp_config *)device_data->current_ctx;
+
+ /*
+ * Always start by disable the hardware and wait for it to finish the
+ * ongoing calculations before trying to reprogram it.
+ */
+ cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
+ cryp_wait_until_done(device_data);
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH);
+
+ if (CRYP_TEST_BITS(&src_reg->sr, CRYP_SR_IFEM_MASK) == 0)
+ ctx->din = readl_relaxed(&src_reg->din);
+
+ ctx->cr = readl_relaxed(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK;
+
+ switch (config->keysize) {
+ case CRYP_KEY_SIZE_256:
+ ctx->key_4_l = readl_relaxed(&src_reg->key_4_l);
+ ctx->key_4_r = readl_relaxed(&src_reg->key_4_r);
+
+ case CRYP_KEY_SIZE_192:
+ ctx->key_3_l = readl_relaxed(&src_reg->key_3_l);
+ ctx->key_3_r = readl_relaxed(&src_reg->key_3_r);
+
+ case CRYP_KEY_SIZE_128:
+ ctx->key_2_l = readl_relaxed(&src_reg->key_2_l);
+ ctx->key_2_r = readl_relaxed(&src_reg->key_2_r);
+
+ default:
+ ctx->key_1_l = readl_relaxed(&src_reg->key_1_l);
+ ctx->key_1_r = readl_relaxed(&src_reg->key_1_r);
+ }
+
+ /* Save IV for CBC mode for both AES and DES. */
+ algomode = ((ctx->cr & CRYP_CR_ALGOMODE_MASK) >> CRYP_CR_ALGOMODE_POS);
+ if (algomode == CRYP_ALGO_TDES_CBC ||
+ algomode == CRYP_ALGO_DES_CBC ||
+ algomode == CRYP_ALGO_AES_CBC) {
+ ctx->init_vect_0_l = readl_relaxed(&src_reg->init_vect_0_l);
+ ctx->init_vect_0_r = readl_relaxed(&src_reg->init_vect_0_r);
+ ctx->init_vect_1_l = readl_relaxed(&src_reg->init_vect_1_l);
+ ctx->init_vect_1_r = readl_relaxed(&src_reg->init_vect_1_r);
+ }
+}
+
+/**
+ * cryp_restore_device_context - Restore hardware registers and
+ * other device context parameter
+ * @device_data: Pointer to the device data struct for base address.
+ * @ctx: Crypto device context
+ */
+void cryp_restore_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx)
+{
+ struct cryp_register *reg = device_data->base;
+ struct cryp_config *config =
+ (struct cryp_config *)device_data->current_ctx;
+
+ /*
+ * Fall through for all items in switch statement. DES is captured in
+ * the default.
+ */
+ switch (config->keysize) {
+ case CRYP_KEY_SIZE_256:
+ writel_relaxed(ctx->key_4_l, &reg->key_4_l);
+ writel_relaxed(ctx->key_4_r, &reg->key_4_r);
+
+ case CRYP_KEY_SIZE_192:
+ writel_relaxed(ctx->key_3_l, &reg->key_3_l);
+ writel_relaxed(ctx->key_3_r, &reg->key_3_r);
+
+ case CRYP_KEY_SIZE_128:
+ writel_relaxed(ctx->key_2_l, &reg->key_2_l);
+ writel_relaxed(ctx->key_2_r, &reg->key_2_r);
+
+ default:
+ writel_relaxed(ctx->key_1_l, &reg->key_1_l);
+ writel_relaxed(ctx->key_1_r, &reg->key_1_r);
+ }
+
+ /* Restore IV for CBC mode for AES and DES. */
+ if (config->algomode == CRYP_ALGO_TDES_CBC ||
+ config->algomode == CRYP_ALGO_DES_CBC ||
+ config->algomode == CRYP_ALGO_AES_CBC) {
+ writel_relaxed(ctx->init_vect_0_l, &reg->init_vect_0_l);
+ writel_relaxed(ctx->init_vect_0_r, &reg->init_vect_0_r);
+ writel_relaxed(ctx->init_vect_1_l, &reg->init_vect_1_l);
+ writel_relaxed(ctx->init_vect_1_r, &reg->init_vect_1_r);
+ }
+}
+
+/**
+ * cryp_write_indata - This routine writes 32 bit data into the data input
+ * register of the cryptography IP.
+ * @device_data: Pointer to the device data struct for base address.
+ * @write_data: Data word to write
+ */
+int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data)
+{
+ writel_relaxed(write_data, &device_data->base->din);
+
+ return 0;
+}
+
+/**
+ * cryp_read_outdata - This routine reads the data from the data output
+ * register of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ * @read_data: Read the data from the output FIFO.
+ */
+int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data)
+{
+ *read_data = readl_relaxed(&device_data->base->dout);
+
+ return 0;
+}
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
new file mode 100644
index 00000000000..df2e25d4671
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -0,0 +1,308 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_H_
+#define _CRYP_H_
+
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/klist.h>
+#include <linux/mutex.h>
+
+#define DEV_DBG_NAME "crypX crypX:"
+
+/* CRYP enable/disable */
+enum cryp_crypen {
+ CRYP_CRYPEN_DISABLE = 0,
+ CRYP_CRYPEN_ENABLE = 1
+};
+
+/* CRYP Start Computation enable/disable */
+enum cryp_start {
+ CRYP_START_DISABLE = 0,
+ CRYP_START_ENABLE = 1
+};
+
+/* CRYP Init Signal enable/disable */
+enum cryp_init {
+ CRYP_INIT_DISABLE = 0,
+ CRYP_INIT_ENABLE = 1
+};
+
+/* Cryp State enable/disable */
+enum cryp_state {
+ CRYP_STATE_DISABLE = 0,
+ CRYP_STATE_ENABLE = 1
+};
+
+/* Key preparation bit enable */
+enum cryp_key_prep {
+ KSE_DISABLED = 0,
+ KSE_ENABLED = 1
+};
+
+/* Key size for AES */
+#define CRYP_KEY_SIZE_128 (0)
+#define CRYP_KEY_SIZE_192 (1)
+#define CRYP_KEY_SIZE_256 (2)
+
+/* AES modes */
+enum cryp_algo_mode {
+ CRYP_ALGO_TDES_ECB,
+ CRYP_ALGO_TDES_CBC,
+ CRYP_ALGO_DES_ECB,
+ CRYP_ALGO_DES_CBC,
+ CRYP_ALGO_AES_ECB,
+ CRYP_ALGO_AES_CBC,
+ CRYP_ALGO_AES_CTR,
+ CRYP_ALGO_AES_XTS
+};
+
+/* Cryp Encryption or Decryption */
+enum cryp_algorithm_dir {
+ CRYP_ALGORITHM_ENCRYPT,
+ CRYP_ALGORITHM_DECRYPT
+};
+
+/* Hardware access method */
+enum cryp_mode {
+ CRYP_MODE_POLLING,
+ CRYP_MODE_INTERRUPT,
+ CRYP_MODE_DMA
+};
+
+/**
+ * struct cryp_config -
+ * @keysize: Key size for AES
+ * @algomode: AES modes
+ * @algodir: Cryp Encryption or Decryption
+ *
+ * CRYP configuration structure to be passed to set configuration
+ */
+struct cryp_config {
+ int keysize;
+ enum cryp_algo_mode algomode;
+ enum cryp_algorithm_dir algodir;
+};
+
+/**
+ * struct cryp_protection_config -
+ * @privilege_access: Privileged cryp state enable/disable
+ * @secure_access: Secure cryp state enable/disable
+ *
+ * Protection configuration structure for setting privilage access
+ */
+struct cryp_protection_config {
+ enum cryp_state privilege_access;
+ enum cryp_state secure_access;
+};
+
+/* Cryp status */
+enum cryp_status_id {
+ CRYP_STATUS_BUSY = 0x10,
+ CRYP_STATUS_OUTPUT_FIFO_FULL = 0x08,
+ CRYP_STATUS_OUTPUT_FIFO_NOT_EMPTY = 0x04,
+ CRYP_STATUS_INPUT_FIFO_NOT_FULL = 0x02,
+ CRYP_STATUS_INPUT_FIFO_EMPTY = 0x01
+};
+
+/* Cryp DMA interface */
+enum cryp_dma_req_type {
+ CRYP_DMA_DISABLE_BOTH,
+ CRYP_DMA_ENABLE_IN_DATA,
+ CRYP_DMA_ENABLE_OUT_DATA,
+ CRYP_DMA_ENABLE_BOTH_DIRECTIONS
+};
+
+enum cryp_dma_channel {
+ CRYP_DMA_RX = 0,
+ CRYP_DMA_TX
+};
+
+/* Key registers */
+enum cryp_key_reg_index {
+ CRYP_KEY_REG_1,
+ CRYP_KEY_REG_2,
+ CRYP_KEY_REG_3,
+ CRYP_KEY_REG_4
+};
+
+/* Key register left and right */
+struct cryp_key_value {
+ u32 key_value_left;
+ u32 key_value_right;
+};
+
+/* Cryp Initialization structure */
+enum cryp_init_vector_index {
+ CRYP_INIT_VECTOR_INDEX_0,
+ CRYP_INIT_VECTOR_INDEX_1
+};
+
+/* struct cryp_init_vector_value -
+ * @init_value_left
+ * @init_value_right
+ * */
+struct cryp_init_vector_value {
+ u32 init_value_left;
+ u32 init_value_right;
+};
+
+/**
+ * struct cryp_device_context - structure for a cryp context.
+ * @cr: control register
+ * @dmacr: DMA control register
+ * @imsc: Interrupt mask set/clear register
+ * @key_1_l: Key 1l register
+ * @key_1_r: Key 1r register
+ * @key_2_l: Key 2l register
+ * @key_2_r: Key 2r register
+ * @key_3_l: Key 3l register
+ * @key_3_r: Key 3r register
+ * @key_4_l: Key 4l register
+ * @key_4_r: Key 4r register
+ * @init_vect_0_l: Initialization vector 0l register
+ * @init_vect_0_r: Initialization vector 0r register
+ * @init_vect_1_l: Initialization vector 1l register
+ * @init_vect_1_r: Initialization vector 0r register
+ * @din: Data in register
+ * @dout: Data out register
+ *
+ * CRYP power management specifc structure.
+ */
+struct cryp_device_context {
+ u32 cr;
+ u32 dmacr;
+ u32 imsc;
+
+ u32 key_1_l;
+ u32 key_1_r;
+ u32 key_2_l;
+ u32 key_2_r;
+ u32 key_3_l;
+ u32 key_3_r;
+ u32 key_4_l;
+ u32 key_4_r;
+
+ u32 init_vect_0_l;
+ u32 init_vect_0_r;
+ u32 init_vect_1_l;
+ u32 init_vect_1_r;
+
+ u32 din;
+ u32 dout;
+};
+
+struct cryp_dma {
+ dma_cap_mask_t mask;
+ struct completion cryp_dma_complete;
+ struct dma_chan *chan_cryp2mem;
+ struct dma_chan *chan_mem2cryp;
+ struct stedma40_chan_cfg *cfg_cryp2mem;
+ struct stedma40_chan_cfg *cfg_mem2cryp;
+ int sg_src_len;
+ int sg_dst_len;
+ struct scatterlist *sg_src;
+ struct scatterlist *sg_dst;
+ int nents_src;
+ int nents_dst;
+};
+
+/**
+ * struct cryp_device_data - structure for a cryp device.
+ * @base: Pointer to the hardware base address.
+ * @dev: Pointer to the devices dev structure.
+ * @clk: Pointer to the device's clock control.
+ * @pwr_regulator: Pointer to the device's power control.
+ * @power_status: Current status of the power.
+ * @ctx_lock: Lock for current_ctx.
+ * @current_ctx: Pointer to the currently allocated context.
+ * @list_node: For inclusion into a klist.
+ * @dma: The dma structure holding channel configuration.
+ * @power_state: TRUE = power state on, FALSE = power state off.
+ * @power_state_spinlock: Spinlock for power_state.
+ * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx.
+ */
+struct cryp_device_data {
+ struct cryp_register __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+ struct ux500_regulator *pwr_regulator;
+ int power_status;
+ struct spinlock ctx_lock;
+ struct cryp_ctx *current_ctx;
+ struct klist_node list_node;
+ struct cryp_dma dma;
+ bool power_state;
+ struct spinlock power_state_spinlock;
+ bool restore_dev_ctx;
+};
+
+void cryp_wait_until_done(struct cryp_device_data *device_data);
+
+/* Initialization functions */
+
+int cryp_check(struct cryp_device_data *device_data);
+
+void cryp_activity(struct cryp_device_data *device_data,
+ enum cryp_crypen cryp_crypen);
+
+void cryp_flush_inoutfifo(struct cryp_device_data *device_data);
+
+int cryp_set_configuration(struct cryp_device_data *device_data,
+ struct cryp_config *cryp_config,
+ u32 *control_register);
+
+void cryp_configure_for_dma(struct cryp_device_data *device_data,
+ enum cryp_dma_req_type dma_req);
+
+int cryp_configure_key_values(struct cryp_device_data *device_data,
+ enum cryp_key_reg_index key_reg_index,
+ struct cryp_key_value key_value);
+
+int cryp_configure_init_vector(struct cryp_device_data *device_data,
+ enum cryp_init_vector_index
+ init_vector_index,
+ struct cryp_init_vector_value
+ init_vector_value);
+
+int cryp_configure_protection(struct cryp_device_data *device_data,
+ struct cryp_protection_config *p_protect_config);
+
+/* Power management funtions */
+void cryp_save_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx,
+ int cryp_mode);
+
+void cryp_restore_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx);
+
+/* Data transfer and status bits. */
+int cryp_is_logic_busy(struct cryp_device_data *device_data);
+
+int cryp_get_status(struct cryp_device_data *device_data);
+
+/**
+ * cryp_write_indata - This routine writes 32 bit data into the data input
+ * register of the cryptography IP.
+ * @device_data: Pointer to the device data struct for base address.
+ * @write_data: Data to write.
+ */
+int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data);
+
+/**
+ * cryp_read_outdata - This routine reads the data from the data output
+ * register of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ * @read_data: Read the data from the output FIFO.
+ */
+int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data);
+
+#endif /* _CRYP_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
new file mode 100644
index 00000000000..5893abb57dc
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -0,0 +1,2313 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/crypto.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/klist.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/dbx500-prcmu.h>
+#include <linux/semaphore.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+
+#include <plat/ste_dma40.h>
+
+#include <mach/crypto-ux500.h>
+#include <mach/hardware.h>
+
+#include "cryp_p.h"
+#include "cryp.h"
+
+#define CRYP_MAX_KEY_SIZE 32
+#define BYTES_PER_WORD 4
+
+static int cryp_mode;
+static atomic_t session_id;
+
+static struct stedma40_chan_cfg *mem_to_engine;
+static struct stedma40_chan_cfg *engine_to_mem;
+
+/**
+ * struct cryp_driver_data - data specific to the driver.
+ *
+ * @device_list: A list of registered devices to choose from.
+ * @device_allocation: A semaphore initialized with number of devices.
+ */
+struct cryp_driver_data {
+ struct klist device_list;
+ struct semaphore device_allocation;
+};
+
+/**
+ * struct cryp_ctx - Crypto context
+ * @config: Crypto mode.
+ * @key[CRYP_MAX_KEY_SIZE]: Key.
+ * @keylen: Length of key.
+ * @iv: Pointer to initialization vector.
+ * @indata: Pointer to indata.
+ * @outdata: Pointer to outdata.
+ * @datalen: Length of indata.
+ * @outlen: Length of outdata.
+ * @blocksize: Size of blocks.
+ * @updated: Updated flag.
+ * @dev_ctx: Device dependent context.
+ * @device: Pointer to the device.
+ */
+struct cryp_ctx {
+ struct cryp_config config;
+ u8 key[CRYP_MAX_KEY_SIZE];
+ u32 keylen;
+ u8 *iv;
+ const u8 *indata;
+ u8 *outdata;
+ u32 datalen;
+ u32 outlen;
+ u32 blocksize;
+ u8 updated;
+ struct cryp_device_context dev_ctx;
+ struct cryp_device_data *device;
+ u32 session_id;
+};
+
+static struct cryp_driver_data driver_data;
+
+/**
+ * uint8p_to_uint32_be - 4*uint8 to uint32 big endian
+ * @in: Data to convert.
+ */
+static inline u32 uint8p_to_uint32_be(u8 *in)
+{
+ return (u32)in[0]<<24 |
+ ((u32)in[1]<<16) |
+ ((u32)in[2]<<8) |
+ ((u32)in[3]);
+}
+
+/**
+ * swap_bits_in_byte - mirror the bits in a byte
+ * @b: the byte to be mirrored
+ *
+ * The bits are swapped the following way:
+ * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and
+ * nibble 2 (n2) bits 4-7.
+ *
+ * Nibble 1 (n1):
+ * (The "old" (moved) bit is replaced with a zero)
+ * 1. Move bit 6 and 7, 4 positions to the left.
+ * 2. Move bit 3 and 5, 2 positions to the left.
+ * 3. Move bit 1-4, 1 position to the left.
+ *
+ * Nibble 2 (n2):
+ * 1. Move bit 0 and 1, 4 positions to the right.
+ * 2. Move bit 2 and 4, 2 positions to the right.
+ * 3. Move bit 3-6, 1 position to the right.
+ *
+ * Combine the two nibbles to a complete and swapped byte.
+ */
+
+static inline u8 swap_bits_in_byte(u8 b)
+{
+#define R_SHIFT_4_MASK (0xc0) /* Bits 6 and 7, right shift 4 */
+#define R_SHIFT_2_MASK (0x28) /* (After right shift 4) Bits 3 and 5,
+ right shift 2 */
+#define R_SHIFT_1_MASK (0x1e) /* (After right shift 2) Bits 1-4,
+ right shift 1 */
+#define L_SHIFT_4_MASK (0x03) /* Bits 0 and 1, left shift 4 */
+#define L_SHIFT_2_MASK (0x14) /* (After left shift 4) Bits 2 and 4,
+ left shift 2 */
+#define L_SHIFT_1_MASK (0x78) /* (After left shift 1) Bits 3-6,
+ left shift 1 */
+
+ u8 n1;
+ u8 n2;
+
+ /* Swap most significant nibble */
+ /* Right shift 4, bits 6 and 7 */
+ n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4));
+ /* Right shift 2, bits 3 and 5 */
+ n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2));
+ /* Right shift 1, bits 1-4 */
+ n1 = (n1 & R_SHIFT_1_MASK) >> 1;
+
+ /* Swap least significant nibble */
+ /* Left shift 4, bits 0 and 1 */
+ n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4));
+ /* Left shift 2, bits 2 and 4 */
+ n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2));
+ /* Left shift 1, bits 3-6 */
+ n2 = (n2 & L_SHIFT_1_MASK) << 1;
+
+ return n1 | n2;
+}
+
+static inline void swap_words_in_key_and_bits_in_byte(const u8 *in,
+ u8 *out, u32 len)
+{
+ unsigned int i = 0;
+ int j;
+ int index = 0;
+
+ j = len - BYTES_PER_WORD;
+ while (j >= 0) {
+ for (i = 0; i < BYTES_PER_WORD; i++) {
+ index = len - j - BYTES_PER_WORD + i;
+ out[j + i] =
+ swap_bits_in_byte(in[index]);
+ }
+ j -= BYTES_PER_WORD;
+ }
+}
+
+static void add_session_id(struct cryp_ctx *ctx)
+{
+ /*
+ * We never want 0 to be a valid value, since this is the default value
+ * for the software context.
+ */
+ if (unlikely(atomic_inc_and_test(&session_id)))
+ atomic_inc(&session_id);
+
+ ctx->session_id = atomic_read(&session_id);
+}
+
+static irqreturn_t cryp_interrupt_handler(int irq, void *param)
+{
+ struct cryp_ctx *ctx;
+ int i;
+ struct cryp_device_data *device_data;
+
+ if (param == NULL) {
+ BUG_ON(!param);
+ return IRQ_HANDLED;
+ }
+
+ /* The device is coming from the one found in hw_crypt_noxts. */
+ device_data = (struct cryp_device_data *)param;
+
+ ctx = device_data->current_ctx;
+
+ if (ctx == NULL) {
+ BUG_ON(!ctx);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen,
+ cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ?
+ "out" : "in");
+
+ if (cryp_pending_irq_src(device_data,
+ CRYP_IRQ_SRC_OUTPUT_FIFO)) {
+ if (ctx->outlen / ctx->blocksize > 0) {
+ for (i = 0; i < ctx->blocksize / 4; i++) {
+ cryp_read_outdata(device_data,
+ (u32 *)ctx->outdata);
+ ctx->outdata += 4;
+ ctx->outlen -= 4;
+ }
+
+ if (ctx->outlen == 0) {
+ cryp_disable_irq_src(device_data,
+ CRYP_IRQ_SRC_OUTPUT_FIFO);
+ }
+ }
+ } else if (cryp_pending_irq_src(device_data,
+ CRYP_IRQ_SRC_INPUT_FIFO)) {
+ if (ctx->datalen / ctx->blocksize > 0) {
+ for (i = 0 ; i < ctx->blocksize / 4; i++) {
+ cryp_write_indata(device_data,
+ *((u32 *)ctx->indata));
+ ctx->indata += 4;
+ ctx->datalen -= 4;
+ }
+
+ if (ctx->datalen == 0)
+ cryp_disable_irq_src(device_data,
+ CRYP_IRQ_SRC_INPUT_FIFO);
+
+ if (ctx->config.algomode == CRYP_ALGO_AES_XTS) {
+ CRYP_PUT_BITS(&device_data->base->cr,
+ CRYP_START_ENABLE,
+ CRYP_CR_START_POS,
+ CRYP_CR_START_MASK);
+
+ cryp_wait_until_done(device_data);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mode_is_aes(enum cryp_algo_mode mode)
+{
+ return (CRYP_ALGO_AES_ECB == mode) ||
+ (CRYP_ALGO_AES_CBC == mode) ||
+ (CRYP_ALGO_AES_CTR == mode) ||
+ (CRYP_ALGO_AES_XTS == mode);
+}
+
+static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right,
+ enum cryp_init_vector_index index)
+{
+ struct cryp_init_vector_value vector_value;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ vector_value.init_value_left = left;
+ vector_value.init_value_right = right;
+
+ return cryp_configure_init_vector(device_data,
+ index,
+ vector_value);
+}
+
+static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
+{
+ int i;
+ int status = 0;
+ int num_of_regs = ctx->blocksize / 8;
+ u32 iv[AES_BLOCK_SIZE / 4];
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ /*
+ * Since we loop on num_of_regs we need to have a check in case
+ * someone provides an incorrect blocksize which would force calling
+ * cfg_iv with i greater than 2 which is an error.
+ */
+ if (num_of_regs > 2) {
+ dev_err(device_data->dev, "[%s] Incorrect blocksize %d",
+ __func__, ctx->blocksize);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ctx->blocksize / 4; i++)
+ iv[i] = uint8p_to_uint32_be(ctx->iv + i*4);
+
+ for (i = 0; i < num_of_regs; i++) {
+ status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
+ (enum cryp_init_vector_index) i);
+ if (status != 0)
+ return status;
+ }
+ return status;
+}
+
+static int set_key(struct cryp_device_data *device_data,
+ u32 left_key,
+ u32 right_key,
+ enum cryp_key_reg_index index)
+{
+ struct cryp_key_value key_value;
+ int cryp_error;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ key_value.key_value_left = left_key;
+ key_value.key_value_right = right_key;
+
+ cryp_error = cryp_configure_key_values(device_data,
+ index,
+ key_value);
+ if (cryp_error != 0)
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_configure_key_values() failed!", __func__);
+
+ return cryp_error;
+}
+
+static int cfg_keys(struct cryp_ctx *ctx)
+{
+ int i;
+ int num_of_regs = ctx->keylen / 8;
+ u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
+ int cryp_error = 0;
+
+ dev_dbg(ctx->device->dev, "[%s]", __func__);
+
+ if (mode_is_aes(ctx->config.algomode)) {
+ swap_words_in_key_and_bits_in_byte((u8 *)ctx->key,
+ (u8 *)swapped_key,
+ ctx->keylen);
+ } else {
+ for (i = 0; i < ctx->keylen / 4; i++)
+ swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4);
+ }
+
+ for (i = 0; i < num_of_regs; i++) {
+ cryp_error = set_key(ctx->device,
+ *(((u32 *)swapped_key)+i*2),
+ *(((u32 *)swapped_key)+i*2+1),
+ (enum cryp_key_reg_index) i);
+
+ if (cryp_error != 0) {
+ dev_err(ctx->device->dev, "[%s]: set_key() failed!",
+ __func__);
+ return cryp_error;
+ }
+ }
+ return cryp_error;
+}
+
+static int cryp_setup_context(struct cryp_ctx *ctx,
+ struct cryp_device_data *device_data)
+{
+ u32 control_register = CRYP_CR_DEFAULT;
+
+ switch (cryp_mode) {
+ case CRYP_MODE_INTERRUPT:
+ writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc);
+ break;
+
+ case CRYP_MODE_DMA:
+ writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr);
+ break;
+
+ default:
+ break;
+ }
+
+ if (ctx->updated == 0) {
+ cryp_flush_inoutfifo(device_data);
+ if (cfg_keys(ctx) != 0) {
+ dev_err(ctx->device->dev, "[%s]: cfg_keys failed!",
+ __func__);
+ return -EPERM;
+ }
+
+ if ((ctx->iv) &&
+ (CRYP_ALGO_AES_ECB != ctx->config.algomode) &&
+ (CRYP_ALGO_DES_ECB != ctx->config.algomode) &&
+ (CRYP_ALGO_TDES_ECB != ctx->config.algomode)) {
+ if (cfg_ivs(device_data, ctx) != 0)
+ return -EPERM;
+ }
+
+ cryp_set_configuration(device_data, &ctx->config,
+ &control_register);
+ add_session_id(ctx);
+ } else if (ctx->updated == 1 &&
+ ctx->session_id != atomic_read(&session_id)) {
+ cryp_flush_inoutfifo(device_data);
+ cryp_restore_device_context(device_data, &ctx->dev_ctx);
+
+ add_session_id(ctx);
+ control_register = ctx->dev_ctx.cr;
+ } else
+ control_register = ctx->dev_ctx.cr;
+
+ writel(control_register |
+ (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS),
+ &device_data->base->cr);
+
+ return 0;
+}
+
+static int cryp_get_device_data(struct cryp_ctx *ctx,
+ struct cryp_device_data **device_data)
+{
+ int ret;
+ struct klist_iter device_iterator;
+ struct klist_node *device_node;
+ struct cryp_device_data *local_device_data = NULL;
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ /* Wait until a device is available */
+ ret = down_interruptible(&driver_data.device_allocation);
+ if (ret)
+ return ret; /* Interrupted */
+
+ /* Select a device */
+ klist_iter_init(&driver_data.device_list, &device_iterator);
+
+ device_node = klist_next(&device_iterator);
+ while (device_node) {
+ local_device_data = container_of(device_node,
+ struct cryp_device_data, list_node);
+ spin_lock(&local_device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (local_device_data->current_ctx) {
+ device_node = klist_next(&device_iterator);
+ } else {
+ local_device_data->current_ctx = ctx;
+ ctx->device = local_device_data;
+ spin_unlock(&local_device_data->ctx_lock);
+ break;
+ }
+ spin_unlock(&local_device_data->ctx_lock);
+ }
+ klist_iter_exit(&device_iterator);
+
+ if (!device_node) {
+ /**
+ * No free device found.
+ * Since we allocated a device with down_interruptible, this
+ * should not be able to happen.
+ * Number of available devices, which are contained in
+ * device_allocation, is therefore decremented by not doing
+ * an up(device_allocation).
+ */
+ return -EBUSY;
+ }
+
+ *device_data = local_device_data;
+
+ return 0;
+}
+
+static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
+ struct device *dev)
+{
+ dma_cap_zero(device_data->dma.mask);
+ dma_cap_set(DMA_SLAVE, device_data->dma.mask);
+
+ device_data->dma.cfg_mem2cryp = mem_to_engine;
+ device_data->dma.chan_mem2cryp =
+ dma_request_channel(device_data->dma.mask,
+ stedma40_filter,
+ device_data->dma.cfg_mem2cryp);
+
+ device_data->dma.cfg_cryp2mem = engine_to_mem;
+ device_data->dma.chan_cryp2mem =
+ dma_request_channel(device_data->dma.mask,
+ stedma40_filter,
+ device_data->dma.cfg_cryp2mem);
+
+ init_completion(&device_data->dma.cryp_dma_complete);
+}
+
+static void cryp_dma_out_callback(void *data)
+{
+ struct cryp_ctx *ctx = (struct cryp_ctx *) data;
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ complete(&ctx->device->dma.cryp_dma_complete);
+}
+
+static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
+ struct scatterlist *sg,
+ int len,
+ enum dma_data_direction direction)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = NULL;
+ dma_cookie_t cookie;
+
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ if (unlikely(!IS_ALIGNED((u32)sg, 4))) {
+ dev_err(ctx->device->dev, "[%s]: Data in sg list isn't "
+ "aligned! Addr: 0x%08x", __func__, (u32)sg);
+ return -EFAULT;
+ }
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ channel = ctx->device->dma.chan_mem2cryp;
+ ctx->device->dma.sg_src = sg;
+ ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev,
+ ctx->device->dma.sg_src,
+ ctx->device->dma.nents_src,
+ direction);
+
+ if (!ctx->device->dma.sg_src_len) {
+ dev_dbg(ctx->device->dev,
+ "[%s]: Could not map the sg list (TO_DEVICE)",
+ __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+ "(TO_DEVICE)", __func__);
+
+ desc = channel->device->device_prep_slave_sg(channel,
+ ctx->device->dma.sg_src,
+ ctx->device->dma.sg_src_len,
+ direction,
+ DMA_CTRL_ACK);
+ break;
+
+ case DMA_FROM_DEVICE:
+ channel = ctx->device->dma.chan_cryp2mem;
+ ctx->device->dma.sg_dst = sg;
+ ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev,
+ ctx->device->dma.sg_dst,
+ ctx->device->dma.nents_dst,
+ direction);
+
+ if (!ctx->device->dma.sg_dst_len) {
+ dev_dbg(ctx->device->dev,
+ "[%s]: Could not map the sg list "
+ "(FROM_DEVICE)", __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+ "(FROM_DEVICE)", __func__);
+
+ desc = channel->device->device_prep_slave_sg(channel,
+ ctx->device->dma.sg_dst,
+ ctx->device->dma.sg_dst_len,
+ direction,
+ DMA_CTRL_ACK |
+ DMA_PREP_INTERRUPT);
+
+ desc->callback = cryp_dma_out_callback;
+ desc->callback_param = ctx;
+ break;
+
+ default:
+ dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction",
+ __func__);
+ return -EFAULT;
+ }
+
+ cookie = desc->tx_submit(desc);
+ dma_async_issue_pending(channel);
+
+ return 0;
+}
+
+static void cryp_dma_done(struct cryp_ctx *ctx)
+{
+ struct dma_chan *chan;
+
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ chan = ctx->device->dma.chan_mem2cryp;
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
+ ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
+
+ chan = ctx->device->dma.chan_cryp2mem;
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
+ ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
+}
+
+static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
+ int len)
+{
+ int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ if (error) {
+ dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
+ "failed", __func__);
+ return error;
+ }
+
+ return len;
+}
+
+static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len)
+{
+ int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE);
+ if (error) {
+ dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
+ "failed", __func__);
+ return error;
+ }
+
+ return len;
+}
+
+static void cryp_polling_mode(struct cryp_ctx *ctx,
+ struct cryp_device_data *device_data)
+{
+ int len = ctx->blocksize / BYTES_PER_WORD;
+ int remaining_length = ctx->datalen;
+ u32 *indata = (u32 *)ctx->indata;
+ u32 *outdata = (u32 *)ctx->outdata;
+
+ while (remaining_length > 0) {
+ writesl(&device_data->base->din, indata, len);
+ indata += len;
+ remaining_length -= (len * BYTES_PER_WORD);
+ cryp_wait_until_done(device_data);
+
+ readsl(&device_data->base->dout, outdata, len);
+ outdata += len;
+ cryp_wait_until_done(device_data);
+ }
+}
+
+static int cryp_disable_power(struct device *dev,
+ struct cryp_device_data *device_data,
+ bool save_device_context)
+{
+ int ret = 0;
+
+ dev_dbg(dev, "[%s]", __func__);
+
+ spin_lock(&device_data->power_state_spinlock);
+ if (!device_data->power_state)
+ goto out;
+
+ spin_lock(&device_data->ctx_lock);
+ if (save_device_context && device_data->current_ctx) {
+ cryp_save_device_context(device_data,
+ &device_data->current_ctx->dev_ctx,
+ cryp_mode);
+ device_data->restore_dev_ctx = true;
+ }
+ spin_unlock(&device_data->ctx_lock);
+
+ clk_disable(device_data->clk);
+ ret = ux500_regulator_atomic_disable(device_data->pwr_regulator);
+ if (ret)
+ dev_err(dev, "[%s]: "
+ "regulator_disable() failed!",
+ __func__);
+
+ device_data->power_state = false;
+
+out:
+ spin_unlock(&device_data->power_state_spinlock);
+
+ return ret;
+}
+
+static int cryp_enable_power(
+ struct device *dev,
+ struct cryp_device_data *device_data,
+ bool restore_device_context)
+{
+ int ret = 0;
+
+ dev_dbg(dev, "[%s]", __func__);
+
+ spin_lock(&device_data->power_state_spinlock);
+ if (!device_data->power_state) {
+ ret = ux500_regulator_atomic_enable(device_data->pwr_regulator);
+ if (ret) {
+ dev_err(dev, "[%s]: regulator_enable() failed!",
+ __func__);
+ goto out;
+ }
+
+ ret = clk_enable(device_data->clk);
+ if (ret) {
+ dev_err(dev, "[%s]: clk_enable() failed!",
+ __func__);
+ ux500_regulator_atomic_disable(
+ device_data->pwr_regulator);
+ goto out;
+ }
+ device_data->power_state = true;
+ }
+
+ if (device_data->restore_dev_ctx) {
+ spin_lock(&device_data->ctx_lock);
+ if (restore_device_context && device_data->current_ctx) {
+ device_data->restore_dev_ctx = false;
+ cryp_restore_device_context(device_data,
+ &device_data->current_ctx->dev_ctx);
+ }
+ spin_unlock(&device_data->ctx_lock);
+ }
+out:
+ spin_unlock(&device_data->power_state_spinlock);
+
+ return ret;
+}
+
+static int hw_crypt_noxts(struct cryp_ctx *ctx,
+ struct cryp_device_data *device_data)
+{
+ int ret = 0;
+
+ const u8 *indata = ctx->indata;
+ u8 *outdata = ctx->outdata;
+ u32 datalen = ctx->datalen;
+ u32 outlen = datalen;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->outlen = ctx->datalen;
+
+ if (unlikely(!IS_ALIGNED((u32)indata, 4))) {
+ pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: "
+ "0x%08x", __func__, (u32)indata);
+ return -EINVAL;
+ }
+
+ ret = cryp_setup_context(ctx, device_data);
+
+ if (ret)
+ goto out;
+
+ if (cryp_mode == CRYP_MODE_INTERRUPT) {
+ cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO |
+ CRYP_IRQ_SRC_OUTPUT_FIFO);
+
+ /*
+ * ctx->outlen is decremented in the cryp_interrupt_handler
+ * function. We had to add cpu_relax() (barrier) to make sure
+ * that gcc didn't optimze away this variable.
+ */
+ while (ctx->outlen > 0)
+ cpu_relax();
+ } else if (cryp_mode == CRYP_MODE_POLLING ||
+ cryp_mode == CRYP_MODE_DMA) {
+ /*
+ * The reason for having DMA in this if case is that if we are
+ * running cryp_mode = 2, then we separate DMA routines for
+ * handling cipher/plaintext > blocksize, except when
+ * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use
+ * the polling mode. Overhead of doing DMA setup eats up the
+ * benefits using it.
+ */
+ cryp_polling_mode(ctx, device_data);
+ } else {
+ dev_err(ctx->device->dev, "[%s]: Invalid operation mode!",
+ __func__);
+ ret = -EPERM;
+ goto out;
+ }
+
+ cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
+ ctx->updated = 1;
+
+out:
+ ctx->indata = indata;
+ ctx->outdata = outdata;
+ ctx->datalen = datalen;
+ ctx->outlen = outlen;
+
+ return ret;
+}
+
+static int get_nents(struct scatterlist *sg, int nbytes)
+{
+ int nents = 0;
+
+ while (nbytes > 0) {
+ nbytes -= sg->length;
+ sg = scatterwalk_sg_next(sg);
+ nents++;
+ }
+
+ return nents;
+}
+
+static int ablk_dma_crypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_device_data *device_data;
+
+ int bytes_written = 0;
+ int bytes_read = 0;
+ int ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->datalen = areq->nbytes;
+ ctx->outlen = areq->nbytes;
+
+ ret = cryp_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ ret = cryp_setup_context(ctx, device_data);
+ if (ret)
+ goto out_power;
+
+ /* We have the device now, so store the nents in the dma struct. */
+ ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen);
+ ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen);
+
+ /* Enable DMA in- and output. */
+ cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS);
+
+ bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen);
+ bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written);
+
+ wait_for_completion(&ctx->device->dma.cryp_dma_complete);
+ cryp_dma_done(ctx);
+
+ cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
+ ctx->updated = 1;
+
+out_power:
+ if (cryp_disable_power(device_data->dev, device_data, false))
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_disable_power() failed!", __func__);
+
+out:
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx = NULL;
+ ctx->device = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+
+ if (unlikely(bytes_written != bytes_read))
+ return -EPERM;
+
+ return 0;
+}
+
+static int ablk_crypt(struct ablkcipher_request *areq)
+{
+ struct ablkcipher_walk walk;
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_device_data *device_data;
+ unsigned long src_paddr;
+ unsigned long dst_paddr;
+ int ret;
+ int nbytes;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ret = cryp_get_device_data(ctx, &device_data);
+ if (ret)
+ goto out;
+
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_enable_power() failed!", __func__);
+ goto out_power;
+ }
+
+ ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes);
+ ret = ablkcipher_walk_phys(areq, &walk);
+
+ if (ret) {
+ pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!",
+ __func__);
+ goto out_power;
+ }
+
+ while ((nbytes = walk.nbytes) > 0) {
+ ctx->iv = walk.iv;
+ src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
+ ctx->indata = phys_to_virt(src_paddr);
+
+ dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
+ ctx->outdata = phys_to_virt(dst_paddr);
+
+ ctx->datalen = nbytes - (nbytes % ctx->blocksize);
+
+ ret = hw_crypt_noxts(ctx, device_data);
+ if (ret)
+ goto out_power;
+
+ nbytes -= ctx->datalen;
+ ret = ablkcipher_walk_done(areq, &walk, nbytes);
+ if (ret)
+ goto out_power;
+ }
+ ablkcipher_walk_complete(&walk);
+
+out_power:
+ if (cryp_disable_power(device_data->dev, device_data, false))
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_disable_power() failed!", __func__);
+out:
+ /* Release the device */
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx = NULL;
+ ctx->device = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+
+ return ret;
+}
+
+static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->config.keysize = CRYP_KEY_SIZE_128;
+ break;
+
+ case AES_KEYSIZE_192:
+ ctx->config.keysize = CRYP_KEY_SIZE_192;
+ break;
+
+ case AES_KEYSIZE_256:
+ ctx->config.keysize = CRYP_KEY_SIZE_256;
+ break;
+
+ default:
+ pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+
+ return 0;
+}
+
+static int aes_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ if (unlikely(!IS_ALIGNED((u32)key, 4))) {
+ dev_err(ctx->device->dev, "[%s]: key isn't aligned! Addr: "
+ "0x%08x", __func__, (u32)key);
+ return -EFAULT;
+ }
+
+ /* For CTR mode */
+ if (keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256) {
+
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s] invalid keylen", __func__);
+ return -EINVAL;
+ }
+
+ if (keylen == AES_KEYSIZE_128)
+ ctx->config.keysize = CRYP_KEY_SIZE_128;
+ else if (keylen == AES_KEYSIZE_192)
+ ctx->config.keysize = CRYP_KEY_SIZE_192;
+ else if (keylen == AES_KEYSIZE_256)
+ ctx->config.keysize = CRYP_KEY_SIZE_256;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+ u32 tmp[DES_EXPKEY_WORDS];
+ int ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+ if (keylen != DES_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+ int ret;
+ u32 tmp[DES_EXPKEY_WORDS];
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ if (keylen != DES_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+ const u32 *K = (const u32 *)key;
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ int i, ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+ if (keylen != DES3_EDE_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Checking key interdependency for weak key detection. */
+ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+ !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: "
+ "CRYPTO_TFM_REQ_WEAK_KEY", __func__);
+ return -EINVAL;
+ }
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+ const u32 *K = (const u32 *)key;
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ int i, ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ if (keylen != DES3_EDE_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+ !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 3; i++) {
+ ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: "
+ "CRYPTO_TFM_REQ_WEAK_KEY", __func__);
+ return -EINVAL;
+ }
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int cryp_hw_calculate(struct cryp_ctx *ctx)
+{
+ struct cryp_device_data *device_data;
+ int ret;
+
+ ret = cryp_get_device_data(ctx, &device_data);
+ if (ret)
+ goto out;
+
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ if (hw_crypt_noxts(ctx, device_data))
+ dev_err(device_data->dev, "[%s]: hw_crypt_noxts() failed!",
+ __func__);
+
+out:
+ if (cryp_disable_power(device_data->dev, device_data, false))
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_disable_power() failed!", __func__);
+
+ /* Release the device */
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx = NULL;
+ ctx->device = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+
+ return ret;
+}
+
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void des3_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void des3_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static int aes_ecb_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_ECB;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_ecb_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_ECB;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_cbc_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_CBC;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ /* Only DMA for ablkcipher, since givcipher not yet supported */
+ if ((cryp_mode == CRYP_MODE_DMA) &&
+ (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_cbc_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_CBC;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ /* Only DMA for ablkcipher, since givcipher not yet supported */
+ if ((cryp_mode == CRYP_MODE_DMA) &&
+ (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_ctr_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_CTR;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ /* Only DMA for ablkcipher, since givcipher not yet supported */
+ if ((cryp_mode == CRYP_MODE_DMA) &&
+ (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_ctr_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_CTR;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ /* Only DMA for ablkcipher, since givcipher not yet supported */
+ if ((cryp_mode == CRYP_MODE_DMA) &&
+ (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int des_ecb_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_ECB;
+ ctx->blocksize = DES_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des_ecb_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_ECB;
+ ctx->blocksize = DES_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des_cbc_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_CBC;
+ ctx->blocksize = DES_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des_cbc_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_CBC;
+ ctx->blocksize = DES_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des3_ecb_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_ECB;
+ ctx->blocksize = DES3_EDE_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des3_ecb_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_ECB;
+ ctx->blocksize = DES3_EDE_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des3_cbc_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_CBC;
+ ctx->blocksize = DES3_EDE_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des3_cbc_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_CBC;
+ ctx->blocksize = DES3_EDE_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+/**
+ * struct crypto_alg aes_alg
+ */
+static struct crypto_alg aes_alg = {
+ .cra_name = "aes",
+ .cra_driver_name = "aes-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = aes_setkey,
+ .cia_encrypt = aes_encrypt,
+ .cia_decrypt = aes_decrypt
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des_alg
+ */
+static struct crypto_alg des_alg = {
+ .cra_name = "des",
+ .cra_driver_name = "des-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = DES_KEY_SIZE,
+ .cia_max_keysize = DES_KEY_SIZE,
+ .cia_setkey = des_setkey,
+ .cia_encrypt = des_encrypt,
+ .cia_decrypt = des_decrypt
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des3_alg
+ */
+static struct crypto_alg des3_alg = {
+ .cra_name = "des3_ede",
+ .cra_driver_name = "des3_ede-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des3_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = DES3_EDE_KEY_SIZE,
+ .cia_max_keysize = DES3_EDE_KEY_SIZE,
+ .cia_setkey = des3_setkey,
+ .cia_encrypt = des3_encrypt,
+ .cia_decrypt = des3_decrypt
+ }
+ }
+};
+
+/**
+ * struct crypto_alg aes_ecb_alg
+ */
+static struct crypto_alg aes_ecb_alg = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_ecb_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = aes_ecb_encrypt,
+ .decrypt = aes_ecb_decrypt,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg aes_cbc_alg
+ */
+static struct crypto_alg aes_cbc_alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_cbc_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = aes_cbc_encrypt,
+ .decrypt = aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg aes_ctr_alg
+ */
+static struct crypto_alg aes_ctr_alg = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_ctr_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = aes_ctr_encrypt,
+ .decrypt = aes_ctr_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des_ecb_alg
+ */
+static struct crypto_alg des_ecb_alg = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des_ecb_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_ablkcipher_setkey,
+ .encrypt = des_ecb_encrypt,
+ .decrypt = des_ecb_decrypt,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des_cbc_alg
+ */
+static struct crypto_alg des_cbc_alg = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des_cbc_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_ablkcipher_setkey,
+ .encrypt = des_cbc_encrypt,
+ .decrypt = des_cbc_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des3_ecb_alg
+ */
+static struct crypto_alg des3_ecb_alg = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3_ede-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des3_ecb_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_ablkcipher_setkey,
+ .encrypt = des3_ecb_encrypt,
+ .decrypt = des3_ecb_decrypt,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des3_cbc_alg
+ */
+static struct crypto_alg des3_cbc_alg = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3_ede-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des3_cbc_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_ablkcipher_setkey,
+ .encrypt = des3_cbc_encrypt,
+ .decrypt = des3_cbc_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg *ux500_cryp_algs[] -
+ */
+static struct crypto_alg *ux500_cryp_algs[] = {
+ &aes_alg,
+ &des_alg,
+ &des3_alg,
+ &aes_ecb_alg,
+ &aes_cbc_alg,
+ &aes_ctr_alg,
+ &des_ecb_alg,
+ &des_cbc_alg,
+ &des3_ecb_alg,
+ &des3_cbc_alg,
+};
+
+/**
+ * cryp_algs_register_all -
+ */
+static int cryp_algs_register_all(void)
+{
+ int ret;
+ int i;
+ int count;
+
+ pr_debug("[%s]", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(ux500_cryp_algs); i++) {
+ ret = crypto_register_alg(ux500_cryp_algs[i]);
+ if (ret) {
+ count = i;
+ pr_err("[%s] alg registration failed",
+ ux500_cryp_algs[i]->cra_driver_name);
+ goto unreg;
+ }
+ }
+ return 0;
+unreg:
+ for (i = 0; i < count; i++)
+ crypto_unregister_alg(ux500_cryp_algs[i]);
+ return ret;
+}
+
+/**
+ * cryp_algs_unregister_all -
+ */
+static void cryp_algs_unregister_all(void)
+{
+ int i;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(ux500_cryp_algs); i++)
+ crypto_unregister_alg(ux500_cryp_algs[i]);
+}
+
+static int ux500_cryp_probe(struct platform_device *pdev)
+{
+ int ret;
+ int cryp_error = 0;
+ struct resource *res = NULL;
+ struct resource *res_irq = NULL;
+ struct cryp_device_data *device_data;
+ struct cryp_protection_config prot = {
+ .privilege_access = CRYP_STATE_ENABLE
+ };
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "[%s]", __func__);
+ device_data = kzalloc(sizeof(struct cryp_device_data), GFP_ATOMIC);
+ if (!device_data) {
+ dev_err(dev, "[%s]: kzalloc() failed!", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ device_data->dev = dev;
+ device_data->current_ctx = NULL;
+
+ /* Grab the DMA configuration from platform data. */
+ mem_to_engine = &((struct cryp_platform_data *)
+ dev->platform_data)->mem_to_engine;
+ engine_to_mem = &((struct cryp_platform_data *)
+ dev->platform_data)->engine_to_mem;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "[%s]: platform_get_resource() failed",
+ __func__);
+ ret = -ENODEV;
+ goto out_kfree;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ dev_err(dev, "[%s]: request_mem_region() failed",
+ __func__);
+ ret = -EBUSY;
+ goto out_kfree;
+ }
+
+ device_data->base = ioremap(res->start, resource_size(res));
+ if (!device_data->base) {
+ dev_err(dev, "[%s]: ioremap failed!", __func__);
+ ret = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ spin_lock_init(&device_data->ctx_lock);
+ spin_lock_init(&device_data->power_state_spinlock);
+
+ /* Enable power for CRYP hardware block */
+ device_data->pwr_regulator = ux500_regulator_get(&pdev->dev);
+ if (IS_ERR(device_data->pwr_regulator)) {
+ dev_err(dev, "[%s]: could not get cryp regulator", __func__);
+ ret = PTR_ERR(device_data->pwr_regulator);
+ device_data->pwr_regulator = NULL;
+ goto out_unmap;
+ }
+
+ /* Enable the clk for CRYP hardware block */
+ device_data->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(device_data->clk)) {
+ dev_err(dev, "[%s]: clk_get() failed!", __func__);
+ ret = PTR_ERR(device_data->clk);
+ goto out_regulator;
+ }
+
+ /* Enable device power (and clock) */
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
+ goto out_clk;
+ }
+
+ cryp_error = cryp_check(device_data);
+ if (cryp_error != 0) {
+ dev_err(dev, "[%s]: cryp_init() failed!", __func__);
+ ret = -EINVAL;
+ goto out_power;
+ }
+
+ cryp_error = cryp_configure_protection(device_data, &prot);
+ if (cryp_error != 0) {
+ dev_err(dev, "[%s]: cryp_configure_protection() failed!",
+ __func__);
+ ret = -EINVAL;
+ goto out_power;
+ }
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq) {
+ dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
+ __func__);
+ goto out_power;
+ }
+
+ ret = request_irq(res_irq->start,
+ cryp_interrupt_handler,
+ 0,
+ "cryp1",
+ device_data);
+ if (ret) {
+ dev_err(dev, "[%s]: Unable to request IRQ", __func__);
+ goto out_power;
+ }
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ cryp_dma_setup_channel(device_data, dev);
+
+ platform_set_drvdata(pdev, device_data);
+
+ /* Put the new device into the device list... */
+ klist_add_tail(&device_data->list_node, &driver_data.device_list);
+
+ /* ... and signal that a new device is available. */
+ up(&driver_data.device_allocation);
+
+ atomic_set(&session_id, 1);
+
+ ret = cryp_algs_register_all();
+ if (ret) {
+ dev_err(dev, "[%s]: cryp_algs_register_all() failed!",
+ __func__);
+ goto out_power;
+ }
+
+ if (cryp_disable_power(&pdev->dev, device_data, false))
+ dev_err(dev, "[%s]: cryp_disable_power() failed!", __func__);
+
+ return 0;
+
+out_power:
+ cryp_disable_power(&pdev->dev, device_data, false);
+
+out_clk:
+ clk_put(device_data->clk);
+
+out_regulator:
+ ux500_regulator_put(device_data->pwr_regulator);
+
+out_unmap:
+ iounmap(device_data->base);
+
+out_free_mem:
+ release_mem_region(res->start, resource_size(res));
+
+out_kfree:
+ kfree(device_data);
+out:
+ return ret;
+}
+
+static int ux500_cryp_remove(struct platform_device *pdev)
+{
+ struct resource *res = NULL;
+ struct resource *res_irq = NULL;
+ struct cryp_device_data *device_data;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Try to decrease the number of available devices. */
+ if (down_trylock(&driver_data.device_allocation))
+ return -EBUSY;
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (device_data->current_ctx) {
+ /* The device is busy */
+ spin_unlock(&device_data->ctx_lock);
+ /* Return the device to the pool. */
+ up(&driver_data.device_allocation);
+ return -EBUSY;
+ }
+
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ cryp_algs_unregister_all();
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
+ __func__);
+ else {
+ disable_irq(res_irq->start);
+ free_irq(res_irq->start, device_data);
+ }
+
+ if (cryp_disable_power(&pdev->dev, device_data, false))
+ dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
+ __func__);
+
+ clk_put(device_data->clk);
+ ux500_regulator_put(device_data->pwr_regulator);
+
+ iounmap(device_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, res->end - res->start + 1);
+
+ kfree(device_data);
+
+ return 0;
+}
+
+static void ux500_cryp_shutdown(struct platform_device *pdev)
+{
+ struct resource *res_irq = NULL;
+ struct cryp_device_data *device_data;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return;
+ }
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (!device_data->current_ctx) {
+ if (down_trylock(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
+ "Shutting down anyway...", __func__);
+ /**
+ * (Allocate the device)
+ * Need to set this to non-null (dummy) value,
+ * to avoid usage if context switching.
+ */
+ device_data->current_ctx++;
+ }
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ cryp_algs_unregister_all();
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
+ __func__);
+ else {
+ disable_irq(res_irq->start);
+ free_irq(res_irq->start, device_data);
+ }
+
+ if (cryp_disable_power(&pdev->dev, device_data, false))
+ dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
+ __func__);
+
+}
+
+static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret;
+ struct cryp_device_data *device_data;
+ struct resource *res_irq;
+ struct cryp_ctx *temp_ctx = NULL;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ /* Handle state? */
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
+ __func__);
+ else
+ disable_irq(res_irq->start);
+
+ spin_lock(&device_data->ctx_lock);
+ if (!device_data->current_ctx)
+ device_data->current_ctx++;
+ spin_unlock(&device_data->ctx_lock);
+
+ if (device_data->current_ctx == ++temp_ctx) {
+ if (down_interruptible(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
+ "failed", __func__);
+ ret = cryp_disable_power(&pdev->dev, device_data, false);
+
+ } else
+ ret = cryp_disable_power(&pdev->dev, device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: cryp_disable_power()", __func__);
+
+ return ret;
+}
+
+static int ux500_cryp_resume(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct cryp_device_data *device_data;
+ struct resource *res_irq;
+ struct cryp_ctx *temp_ctx = NULL;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock(&device_data->ctx_lock);
+ if (device_data->current_ctx == ++temp_ctx)
+ device_data->current_ctx = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+
+ if (!device_data->current_ctx)
+ up(&driver_data.device_allocation);
+ else
+ ret = cryp_enable_power(&pdev->dev, device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: cryp_enable_power() failed!",
+ __func__);
+ else {
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res_irq)
+ enable_irq(res_irq->start);
+ }
+
+ return ret;
+}
+
+static struct platform_driver cryp_driver = {
+ .probe = ux500_cryp_probe,
+ .remove = ux500_cryp_remove,
+ .shutdown = ux500_cryp_shutdown,
+ .suspend = ux500_cryp_suspend,
+ .resume = ux500_cryp_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cryp1"
+ }
+};
+
+static int __init ux500_cryp_mod_init(void)
+{
+ pr_debug("[%s] is called!", __func__);
+ klist_init(&driver_data.device_list, NULL, NULL);
+ /* Initialize the semaphore to 0 devices (locked state) */
+ sema_init(&driver_data.device_allocation, 0);
+ return platform_driver_register(&cryp_driver);
+}
+
+static void __exit ux500_cryp_mod_fini(void)
+{
+ pr_debug("[%s] is called!", __func__);
+ platform_driver_unregister(&cryp_driver);
+ return;
+}
+
+module_init(ux500_cryp_mod_init);
+module_exit(ux500_cryp_mod_fini);
+
+module_param(cryp_mode, int, 0);
+
+MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
+MODULE_ALIAS("aes-all");
+MODULE_ALIAS("des-all");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c
new file mode 100644
index 00000000000..08d291cdbe6
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irq.c
@@ -0,0 +1,45 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitmap.h>
+#include <linux/device.h>
+
+#include "cryp.h"
+#include "cryp_p.h"
+#include "cryp_irq.h"
+#include "cryp_irqp.h"
+
+void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+ u32 i;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ i = readl_relaxed(&device_data->base->imsc);
+ i = i | irq_src;
+ writel_relaxed(i, &device_data->base->imsc);
+}
+
+void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+ u32 i;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ i = readl_relaxed(&device_data->base->imsc);
+ i = i & ~irq_src;
+ writel_relaxed(i, &device_data->base->imsc);
+}
+
+bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+ return (readl_relaxed(&device_data->base->mis) & irq_src) > 0;
+}
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h
new file mode 100644
index 00000000000..5a7837f1b8f
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irq.h
@@ -0,0 +1,31 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_IRQ_H_
+#define _CRYP_IRQ_H_
+
+#include "cryp.h"
+
+enum cryp_irq_src_id {
+ CRYP_IRQ_SRC_INPUT_FIFO = 0x1,
+ CRYP_IRQ_SRC_OUTPUT_FIFO = 0x2,
+ CRYP_IRQ_SRC_ALL = 0x3
+};
+
+/**
+ * M0 Funtions
+ */
+void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+#endif /* _CRYP_IRQ_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h
new file mode 100644
index 00000000000..8b339cc34bf
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irqp.h
@@ -0,0 +1,125 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __CRYP_IRQP_H_
+#define __CRYP_IRQP_H_
+
+#include "cryp_irq.h"
+
+/**
+ *
+ * CRYP Registers - Offset mapping
+ * +-----------------+
+ * 00h | CRYP_CR | Configuration register
+ * +-----------------+
+ * 04h | CRYP_SR | Status register
+ * +-----------------+
+ * 08h | CRYP_DIN | Data In register
+ * +-----------------+
+ * 0ch | CRYP_DOUT | Data out register
+ * +-----------------+
+ * 10h | CRYP_DMACR | DMA control register
+ * +-----------------+
+ * 14h | CRYP_IMSC | IMSC
+ * +-----------------+
+ * 18h | CRYP_RIS | Raw interrupt status
+ * +-----------------+
+ * 1ch | CRYP_MIS | Masked interrupt status.
+ * +-----------------+
+ * Key registers
+ * IVR registers
+ * Peripheral
+ * Cell IDs
+ *
+ * Refer data structure for other register map
+ */
+
+/**
+ * struct cryp_register
+ * @cr - Configuration register
+ * @status - Status register
+ * @din - Data input register
+ * @din_size - Data input size register
+ * @dout - Data output register
+ * @dout_size - Data output size register
+ * @dmacr - Dma control register
+ * @imsc - Interrupt mask set/clear register
+ * @ris - Raw interrupt status
+ * @mis - Masked interrupt statu register
+ * @key_1_l - Key register 1 L
+ * @key_1_r - Key register 1 R
+ * @key_2_l - Key register 2 L
+ * @key_2_r - Key register 2 R
+ * @key_3_l - Key register 3 L
+ * @key_3_r - Key register 3 R
+ * @key_4_l - Key register 4 L
+ * @key_4_r - Key register 4 R
+ * @init_vect_0_l - init vector 0 L
+ * @init_vect_0_r - init vector 0 R
+ * @init_vect_1_l - init vector 1 L
+ * @init_vect_1_r - init vector 1 R
+ * @cryp_unused1 - unused registers
+ * @itcr - Integration test control register
+ * @itip - Integration test input register
+ * @itop - Integration test output register
+ * @cryp_unused2 - unused registers
+ * @periphId0 - FE0 CRYP Peripheral Identication Register
+ * @periphId1 - FE4
+ * @periphId2 - FE8
+ * @periphId3 - FEC
+ * @pcellId0 - FF0 CRYP PCell Identication Register
+ * @pcellId1 - FF4
+ * @pcellId2 - FF8
+ * @pcellId3 - FFC
+ */
+struct cryp_register {
+ u32 cr; /* Configuration register */
+ u32 sr; /* Status register */
+ u32 din; /* Data input register */
+ u32 din_size; /* Data input size register */
+ u32 dout; /* Data output register */
+ u32 dout_size; /* Data output size register */
+ u32 dmacr; /* Dma control register */
+ u32 imsc; /* Interrupt mask set/clear register */
+ u32 ris; /* Raw interrupt status */
+ u32 mis; /* Masked interrupt statu register */
+
+ u32 key_1_l; /*Key register 1 L */
+ u32 key_1_r; /*Key register 1 R */
+ u32 key_2_l; /*Key register 2 L */
+ u32 key_2_r; /*Key register 2 R */
+ u32 key_3_l; /*Key register 3 L */
+ u32 key_3_r; /*Key register 3 R */
+ u32 key_4_l; /*Key register 4 L */
+ u32 key_4_r; /*Key register 4 R */
+
+ u32 init_vect_0_l; /*init vector 0 L */
+ u32 init_vect_0_r; /*init vector 0 R */
+ u32 init_vect_1_l; /*init vector 1 L */
+ u32 init_vect_1_r; /*init vector 1 R */
+
+ u32 cryp_unused1[(0x80 - 0x58) / sizeof(u32)]; /* unused registers */
+ u32 itcr; /*Integration test control register */
+ u32 itip; /*Integration test input register */
+ u32 itop; /*Integration test output register */
+ u32 cryp_unused2[(0xFE0 - 0x8C) / sizeof(u32)]; /* unused registers */
+
+ u32 periphId0; /* FE0 CRYP Peripheral Identication Register */
+ u32 periphId1; /* FE4 */
+ u32 periphId2; /* FE8 */
+ u32 periphId3; /* FEC */
+
+ u32 pcellId0; /* FF0 CRYP PCell Identication Register */
+ u32 pcellId1; /* FF4 */
+ u32 pcellId2; /* FF8 */
+ u32 pcellId3; /* FFC */
+};
+
+#endif
diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h
new file mode 100644
index 00000000000..0e070829edc
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_p.h
@@ -0,0 +1,124 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_P_H_
+#define _CRYP_P_H_
+
+#include <linux/io.h>
+#include <linux/bitops.h>
+
+#include "cryp.h"
+#include "cryp_irqp.h"
+
+/**
+ * Generic Macros
+ */
+#define CRYP_SET_BITS(reg_name, mask) \
+ writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
+
+#define CRYP_WRITE_BIT(reg_name, val, mask) \
+ writel_relaxed(((readl_relaxed(reg_name) & ~(mask)) |\
+ ((val) & (mask))), reg_name)
+
+#define CRYP_TEST_BITS(reg_name, val) \
+ (readl_relaxed(reg_name) & (val))
+
+#define CRYP_PUT_BITS(reg, val, shift, mask) \
+ writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \
+ (((u32)val << shift) & (mask))), reg)
+
+/**
+ * CRYP specific Macros
+ */
+#define CRYP_PERIPHERAL_ID0 0xE3
+#define CRYP_PERIPHERAL_ID1 0x05
+
+#define CRYP_PERIPHERAL_ID2_DB8500 0x28
+#define CRYP_PERIPHERAL_ID2_DB5500 0x29
+#define CRYP_PERIPHERAL_ID3 0x00
+
+#define CRYP_PCELL_ID0 0x0D
+#define CRYP_PCELL_ID1 0xF0
+#define CRYP_PCELL_ID2 0x05
+#define CRYP_PCELL_ID3 0xB1
+
+/**
+ * CRYP register default values
+ */
+#define MAX_DEVICE_SUPPORT 2
+
+/* Priv set, keyrden set and datatype 8bits swapped set as default. */
+#define CRYP_CR_DEFAULT 0x0482
+#define CRYP_DMACR_DEFAULT 0x0
+#define CRYP_IMSC_DEFAULT 0x0
+#define CRYP_DIN_DEFAULT 0x0
+#define CRYP_DOUT_DEFAULT 0x0
+#define CRYP_KEY_DEFAULT 0x0
+#define CRYP_INIT_VECT_DEFAULT 0x0
+
+/**
+ * CRYP Control register specific mask
+ */
+#define CRYP_CR_SECURE_MASK BIT(0)
+#define CRYP_CR_PRLG_MASK BIT(1)
+#define CRYP_CR_ALGODIR_MASK BIT(2)
+#define CRYP_CR_ALGOMODE_MASK (BIT(5) | BIT(4) | BIT(3))
+#define CRYP_CR_DATATYPE_MASK (BIT(7) | BIT(6))
+#define CRYP_CR_KEYSIZE_MASK (BIT(9) | BIT(8))
+#define CRYP_CR_KEYRDEN_MASK BIT(10)
+#define CRYP_CR_KSE_MASK BIT(11)
+#define CRYP_CR_START_MASK BIT(12)
+#define CRYP_CR_INIT_MASK BIT(13)
+#define CRYP_CR_FFLUSH_MASK BIT(14)
+#define CRYP_CR_CRYPEN_MASK BIT(15)
+#define CRYP_CR_CONTEXT_SAVE_MASK (CRYP_CR_SECURE_MASK |\
+ CRYP_CR_PRLG_MASK |\
+ CRYP_CR_ALGODIR_MASK |\
+ CRYP_CR_ALGOMODE_MASK |\
+ CRYP_CR_DATATYPE_MASK |\
+ CRYP_CR_KEYSIZE_MASK |\
+ CRYP_CR_KEYRDEN_MASK |\
+ CRYP_CR_DATATYPE_MASK)
+
+
+#define CRYP_SR_INFIFO_READY_MASK (BIT(0) | BIT(1))
+#define CRYP_SR_IFEM_MASK BIT(0)
+#define CRYP_SR_BUSY_MASK BIT(4)
+
+/**
+ * Bit position used while setting bits in register
+ */
+#define CRYP_CR_PRLG_POS 1
+#define CRYP_CR_ALGODIR_POS 2
+#define CRYP_CR_ALGOMODE_POS 3
+#define CRYP_CR_DATATYPE_POS 6
+#define CRYP_CR_KEYSIZE_POS 8
+#define CRYP_CR_KEYRDEN_POS 10
+#define CRYP_CR_KSE_POS 11
+#define CRYP_CR_START_POS 12
+#define CRYP_CR_INIT_POS 13
+#define CRYP_CR_CRYPEN_POS 15
+
+#define CRYP_SR_BUSY_POS 4
+
+/**
+ * CRYP PCRs------PC_NAND control register
+ * BIT_MASK
+ */
+#define CRYP_DMA_REQ_MASK (BIT(1) | BIT(0))
+#define CRYP_DMA_REQ_MASK_POS 0
+
+
+struct cryp_system_context {
+ /* CRYP Register structure */
+ struct cryp_register *p_cryp_reg[MAX_DEVICE_SUPPORT];
+};
+
+#endif
diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile
new file mode 100644
index 00000000000..b2f90d9bac7
--- /dev/null
+++ b/drivers/crypto/ux500/hash/Makefile
@@ -0,0 +1,11 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
+CFLAGS_hash_core.o := -DDEBUG -O0
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o
+ux500_hash-objs := hash_core.o
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h
new file mode 100644
index 00000000000..299f0bacc2c
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_alg.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen (shujuan.chen@stericsson.com)
+ * Author: Joakim Bech (joakim.xx.bech@stericsson.com)
+ * Author: Berne Hebark (berne.hebark@stericsson.com))
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _HASH_ALG_H
+#define _HASH_ALG_H
+
+#include <linux/bitops.h>
+
+#define HASH_BLOCK_SIZE 64
+
+/* Maximum value of the length's high word */
+#define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL
+
+/* Power on Reset values HASH registers */
+#define HASH_RESET_CR_VALUE 0x0
+#define HASH_RESET_STR_VALUE 0x0
+
+/* Number of context swap registers */
+#define HASH_CSR_COUNT 52
+
+#define HASH_RESET_CSRX_REG_VALUE 0x0
+#define HASH_RESET_CSFULL_REG_VALUE 0x0
+#define HASH_RESET_CSDATAIN_REG_VALUE 0x0
+
+#define HASH_RESET_INDEX_VAL 0x0
+#define HASH_RESET_BIT_INDEX_VAL 0x0
+#define HASH_RESET_BUFFER_VAL 0x0
+#define HASH_RESET_LEN_HIGH_VAL 0x0
+#define HASH_RESET_LEN_LOW_VAL 0x0
+
+/* Control register bitfields */
+#define HASH_CR_RESUME_MASK 0x11FCF
+
+#define HASH_CR_SWITCHON_POS 31
+#define HASH_CR_SWITCHON_MASK BIT(31)
+
+#define HASH_CR_EMPTYMSG_POS 20
+#define HASH_CR_EMPTYMSG_MASK BIT(20)
+
+#define HASH_CR_DINF_POS 12
+#define HASH_CR_DINF_MASK BIT(12)
+
+#define HASH_CR_NBW_POS 8
+#define HASH_CR_NBW_MASK 0x00000F00UL
+
+#define HASH_CR_LKEY_POS 16
+#define HASH_CR_LKEY_MASK BIT(16)
+
+#define HASH_CR_ALGO_POS 7
+#define HASH_CR_ALGO_MASK BIT(7)
+
+#define HASH_CR_MODE_POS 6
+#define HASH_CR_MODE_MASK BIT(6)
+
+#define HASH_CR_DATAFORM_POS 4
+#define HASH_CR_DATAFORM_MASK (BIT(4) | BIT(5))
+
+#define HASH_CR_DMAE_POS 3
+#define HASH_CR_DMAE_MASK BIT(3)
+
+#define HASH_CR_INIT_POS 2
+#define HASH_CR_INIT_MASK BIT(2)
+
+#define HASH_CR_PRIVN_POS 1
+#define HASH_CR_PRIVN_MASK BIT(1)
+
+#define HASH_CR_SECN_POS 0
+#define HASH_CR_SECN_MASK BIT(0)
+
+/* Start register bitfields */
+#define HASH_STR_DCAL_POS 8
+#define HASH_STR_DCAL_MASK BIT(8)
+#define HASH_STR_DEFAULT 0x0
+
+#define HASH_STR_NBLW_POS 0
+#define HASH_STR_NBLW_MASK 0x0000001FUL
+
+#define HASH_NBLW_MAX_VAL 0x1F
+
+/* PrimeCell IDs */
+#define HASH_P_ID0 0xE0
+#define HASH_P_ID1 0x05
+#define HASH_P_ID2 0x38
+#define HASH_P_ID3 0x00
+#define HASH_CELL_ID0 0x0D
+#define HASH_CELL_ID1 0xF0
+#define HASH_CELL_ID2 0x05
+#define HASH_CELL_ID3 0xB1
+
+#define HASH_SET_BITS(reg_name, mask) \
+ writel((readl(reg_name) | mask), reg_name)
+
+#define HASH_CLEAR_BITS(reg_name, mask) \
+ writel((readl(reg_name) & ~mask), reg_name)
+
+#define HASH_PUT_BITS(reg, val, shift, mask) \
+ writel(((readl(reg) & ~(mask)) | \
+ (((u32)val << shift) & (mask))), reg)
+
+#define HASH_SET_DIN(val) writel((val), &device_data->base->din)
+
+#define HASH_INITIALIZE \
+ HASH_PUT_BITS( \
+ &device_data->base->cr, \
+ 0x01, HASH_CR_INIT_POS, \
+ HASH_CR_INIT_MASK)
+
+#define HASH_SET_DATA_FORMAT(data_format) \
+ HASH_PUT_BITS( \
+ &device_data->base->cr, \
+ (u32) (data_format), HASH_CR_DATAFORM_POS, \
+ HASH_CR_DATAFORM_MASK)
+#define HASH_SET_NBLW(val) \
+ HASH_PUT_BITS( \
+ &device_data->base->str, \
+ (u32) (val), HASH_STR_NBLW_POS, \
+ HASH_STR_NBLW_MASK)
+#define HASH_SET_DCAL \
+ HASH_PUT_BITS( \
+ &device_data->base->str, \
+ 0x01, HASH_STR_DCAL_POS, \
+ HASH_STR_DCAL_MASK)
+
+/**
+ * struct uint64 - Structure to handle 64 bits integers.
+ * @high_word: Most significant bits.
+ * @low_word: Least significant bits.
+ *
+ * Used to handle 64 bits integers.
+ */
+struct uint64 {
+ u32 high_word;
+ u32 low_word;
+};
+
+/**
+ * struct hash_register - Contains all registers in u8500 hash hardware.
+ * @cr: HASH control register (0x000).
+ * @din: HASH data input register (0x004).
+ * @str: HASH start register (0x008).
+ * @hx: HASH digest register 0..7 (0x00c-0x01C).
+ * @padding0: Reserved (0x02C).
+ * @itcr: Integration test control register (0x080).
+ * @itip: Integration test input register (0x084).
+ * @itop: Integration test output register (0x088).
+ * @padding1: Reserved (0x08C).
+ * @csfull: HASH context full register (0x0F8).
+ * @csdatain: HASH context swap data input register (0x0FC).
+ * @csrx: HASH context swap register 0..51 (0x100-0x1CC).
+ * @padding2: Reserved (0x1D0).
+ * @periphid0: HASH peripheral identification register 0 (0xFE0).
+ * @periphid1: HASH peripheral identification register 1 (0xFE4).
+ * @periphid2: HASH peripheral identification register 2 (0xFE8).
+ * @periphid3: HASH peripheral identification register 3 (0xFEC).
+ * @cellid0: HASH PCell identification register 0 (0xFF0).
+ * @cellid1: HASH PCell identification register 1 (0xFF4).
+ * @cellid2: HASH PCell identification register 2 (0xFF8).
+ * @cellid3: HASH PCell identification register 3 (0xFFC).
+ *
+ * The device communicates to the HASH via 32-bit-wide control registers
+ * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure
+ * with the registers used.
+ */
+struct hash_register {
+ u32 cr;
+ u32 din;
+ u32 str;
+ u32 hx[8];
+
+ u32 padding0[(0x080 - 0x02C) / sizeof(u32)];
+
+ u32 itcr;
+ u32 itip;
+ u32 itop;
+
+ u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)];
+
+ u32 csfull;
+ u32 csdatain;
+ u32 csrx[HASH_CSR_COUNT];
+
+ u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)];
+
+ u32 periphid0;
+ u32 periphid1;
+ u32 periphid2;
+ u32 periphid3;
+
+ u32 cellid0;
+ u32 cellid1;
+ u32 cellid2;
+ u32 cellid3;
+};
+
+/**
+ * struct hash_state - Hash context state.
+ * @temp_cr: Temporary HASH Control Register.
+ * @str_reg: HASH Start Register.
+ * @din_reg: HASH Data Input Register.
+ * @csr[52]: HASH Context Swap Registers 0-39.
+ * @csfull: HASH Context Swap Registers 40 ie Status flags.
+ * @csdatain: HASH Context Swap Registers 41 ie Input data.
+ * @buffer: Working buffer for messages going to the hardware.
+ * @length: Length of the part of message hashed so far (floor(N/64) * 64).
+ * @index: Valid number of bytes in buffer (N % 64).
+ * @bit_index: Valid number of bits in buffer (N % 8).
+ *
+ * This structure is used between context switches, i.e. when ongoing jobs are
+ * interupted with new jobs. When this happens we need to store intermediate
+ * results in software.
+ *
+ * WARNING: "index" is the member of the structure, to be sure that "buffer"
+ * is aligned on a 4-bytes boundary. This is highly implementation dependent
+ * and MUST be checked whenever this code is ported on new platforms.
+ */
+struct hash_state {
+ u32 temp_cr;
+ u32 str_reg;
+ u32 din_reg;
+ u32 csr[52];
+ u32 csfull;
+ u32 csdatain;
+ u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)];
+ struct uint64 length;
+ u8 index;
+ u8 bit_index;
+};
+
+/**
+ * enum hash_device_id - HASH device ID.
+ * @HASH_DEVICE_ID_0: Hash hardware with ID 0
+ * @HASH_DEVICE_ID_1: Hash hardware with ID 1
+ */
+enum hash_device_id {
+ HASH_DEVICE_ID_0 = 0,
+ HASH_DEVICE_ID_1 = 1
+};
+
+/**
+ * enum hash_data_format - HASH data format.
+ * @HASH_DATA_32_BITS: 32 bits data format
+ * @HASH_DATA_16_BITS: 16 bits data format
+ * @HASH_DATA_8_BITS: 8 bits data format.
+ * @HASH_DATA_1_BITS: 1 bit data format.
+ */
+enum hash_data_format {
+ HASH_DATA_32_BITS = 0x0,
+ HASH_DATA_16_BITS = 0x1,
+ HASH_DATA_8_BITS = 0x2,
+ HASH_DATA_1_BIT = 0x3
+};
+
+/**
+ * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm.
+ * @HASH_ALGO_SHA1: Indicates that SHA1 is used.
+ * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used.
+ */
+enum hash_algo {
+ HASH_ALGO_SHA1 = 0x0,
+ HASH_ALGO_SHA256 = 0x1
+};
+
+/**
+ * enum hash_op - Enumeration for selecting between HASH or HMAC mode.
+ * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode.
+ * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC.
+ */
+enum hash_op {
+ HASH_OPER_MODE_HASH = 0x0,
+ HASH_OPER_MODE_HMAC = 0x1
+};
+
+/**
+ * struct hash_config - Configuration data for the hardware.
+ * @data_format: Format of data entered into the hash data in register.
+ * @algorithm: Algorithm selection bit.
+ * @oper_mode: Operating mode selection bit.
+ */
+struct hash_config {
+ int data_format;
+ int algorithm;
+ int oper_mode;
+};
+
+/**
+ * struct hash_ctx - The context used for hash calculations.
+ * @key: The key used in the operation.
+ * @keylen: The length of the key.
+ * @updated: Indicates if hardware is initialized for new operations.
+ * @state: The state of the current calculations.
+ * @config: The current configuration.
+ * @digestsize The size of current digest.
+ * @device Pointer to the device structure.
+ */
+struct hash_ctx {
+ u8 *key;
+ u32 keylen;
+ u8 updated;
+ struct hash_state state;
+ struct hash_config config;
+ int digestsize;
+ struct hash_device_data *device;
+};
+
+/**
+ * struct hash_device_data - structure for a hash device.
+ * @base: Pointer to the hardware base address.
+ * @list_node: For inclusion in klist.
+ * @dev: Pointer to the device dev structure.
+ * @ctx_lock: Spinlock for current_ctx.
+ * @current_ctx: Pointer to the currently allocated context.
+ * @power_state: TRUE = power state on, FALSE = power state off.
+ * @power_state_lock: Spinlock for power_state.
+ * @regulator: Pointer to the device's power control.
+ * @clk: Pointer to the device's clock control.
+ * @restore_dev_state: TRUE = saved state, FALSE = no saved state.
+ */
+struct hash_device_data {
+ struct hash_register __iomem *base;
+ struct klist_node list_node;
+ struct device *dev;
+ struct spinlock ctx_lock;
+ struct hash_ctx *current_ctx;
+ bool power_state;
+ struct spinlock power_state_lock;
+ struct ux500_regulator *regulator;
+ struct clk *clk;
+ bool restore_dev_state;
+};
+
+int hash_check_hw(struct hash_device_data *device_data);
+
+int hash_setconfiguration(struct hash_device_data *device_data,
+ struct hash_config *config);
+
+void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx);
+
+void hash_get_digest(struct hash_device_data *device_data,
+ u8 *digest, int algorithm);
+
+int hash_hw_update(struct ahash_request *req);
+
+int hash_save_state(struct hash_device_data *device_data,
+ struct hash_state *state);
+
+int hash_resume_state(struct hash_device_data *device_data,
+ const struct hash_state *state);
+
+#endif
diff --git a/drivers/crypto/ux500/hash/hash_alg_p.h b/drivers/crypto/ux500/hash/hash_alg_p.h
new file mode 100755
index 00000000000..c85faaeba6f
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_alg_p.h
@@ -0,0 +1,26 @@
+/*****************************************************************************/
+/**
+* � ST-Ericsson, 2009 - All rights reserved
+* Reproduction and Communication of this document is strictly prohibited
+* unless specifically authorized in writing by ST-Ericsson
+*
+* static Header file of HASH Processor
+* Specification release related to this implementation: A_V2.2
+* AUTHOR : ST-Ericsson
+*/
+/*****************************************************************************/
+
+#ifndef _HASH_P_H_
+#define _HASH_P_H_
+
+/*--------------------------------------------------------------------------*
+ * Includes *
+ *--------------------------------------------------------------------------*/
+#include "hash_alg.h"
+
+/*--------------------------------------------------------------------------*
+ * Defines *
+ *--------------------------------------------------------------------------*/
+
+#endif /* End _HASH_P_H_ */
+
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
new file mode 100644
index 00000000000..08a89eeb601
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -0,0 +1,1814 @@
+/*
+ * Cryptographic API.
+ * Support for Nomadik hardware crypto engine.
+
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/klist.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+
+#include <linux/regulator/dbx500-prcmu.h>
+#include <linux/bitops.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+
+#include <mach/hardware.h>
+
+#include "hash_alg.h"
+
+#define DEV_DBG_NAME "hashX hashX:"
+
+/**
+ * Pre-calculated empty message digests.
+ */
+static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09
+};
+
+static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+
+/* HMAC-SHA1, no key */
+static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
+ 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
+ 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
+ 0x70, 0x69, 0x0e, 0x1d
+};
+
+/* HMAC-SHA256, no key */
+static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
+ 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
+ 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
+ 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
+ 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
+};
+
+/**
+ * struct hash_driver_data - data specific to the driver.
+ *
+ * @device_list: A list of registered devices to choose from.
+ * @device_allocation: A semaphore initialized with number of devices.
+ */
+struct hash_driver_data {
+ struct klist device_list;
+ struct semaphore device_allocation;
+};
+
+static struct hash_driver_data driver_data;
+
+/* Declaration of functions */
+/**
+ * hash_messagepad - Pads a message and write the nblw bits.
+ * @device_data: Structure for the hash device.
+ * @message: Last word of a message
+ * @index_bytes: The number of bytes in the last message
+ *
+ * This function manages the final part of the digest calculation, when less
+ * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
+ *
+ * Reentrancy: Non Re-entrant.
+ */
+static void hash_messagepad(struct hash_device_data *device_data,
+ const u32 *message, u8 index_bytes);
+
+/**
+ * release_hash_device - Releases a previously allocated hash device.
+ * @device_data: Structure for the hash device.
+ *
+ */
+static void release_hash_device(struct hash_device_data *device_data)
+{
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx->device = NULL;
+ device_data->current_ctx = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+}
+
+/**
+ * get_empty_message_digest - Returns a pre-calculated digest for
+ * the empty message.
+ * @device_data: Structure for the hash device.
+ * @zero_hash: Buffer to return the empty message digest.
+ * @zero_hash_size: Hash size of the empty message digest.
+ * @zero_digest: True if zero_digest returned.
+ */
+static int get_empty_message_digest(
+ struct hash_device_data *device_data,
+ u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
+{
+ int ret = 0;
+ struct hash_ctx *ctx = device_data->current_ctx;
+ *zero_digest = false;
+
+ /**
+ * Caller responsible for ctx != NULL.
+ */
+
+ if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
+ if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hash_sha1[0],
+ SHA1_DIGEST_SIZE);
+ *zero_hash_size = SHA1_DIGEST_SIZE;
+ *zero_digest = true;
+ } else if (HASH_ALGO_SHA256 ==
+ ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hash_sha256[0],
+ SHA256_DIGEST_SIZE);
+ *zero_hash_size = SHA256_DIGEST_SIZE;
+ *zero_digest = true;
+ } else {
+ dev_err(device_data->dev, "[%s] "
+ "Incorrect algorithm!"
+ , __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
+ if (!ctx->keylen) {
+ if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hmac_sha1[0],
+ SHA1_DIGEST_SIZE);
+ *zero_hash_size = SHA1_DIGEST_SIZE;
+ *zero_digest = true;
+ } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hmac_sha256[0],
+ SHA256_DIGEST_SIZE);
+ *zero_hash_size = SHA256_DIGEST_SIZE;
+ *zero_digest = true;
+ } else {
+ dev_err(device_data->dev, "[%s] "
+ "Incorrect algorithm!"
+ , __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ dev_dbg(device_data->dev, "[%s] Continue hash "
+ "calculation, since hmac key avalable",
+ __func__);
+ }
+ }
+out:
+
+ return ret;
+}
+
+/**
+ * hash_disable_power - Request to disable power and clock.
+ * @device_data: Structure for the hash device.
+ * @save_device_state: If true, saves the current hw state.
+ *
+ * This function request for disabling power (regulator) and clock,
+ * and could also save current hw state.
+ */
+static int hash_disable_power(
+ struct hash_device_data *device_data,
+ bool save_device_state)
+{
+ int ret = 0;
+ struct device *dev = device_data->dev;
+
+ dev_dbg(dev, "[%s]", __func__);
+
+ spin_lock(&device_data->power_state_lock);
+ if (!device_data->power_state)
+ goto out;
+
+ if (save_device_state && device_data->current_ctx) {
+ hash_save_state(device_data,
+ &device_data->current_ctx->state);
+ device_data->restore_dev_state = true;
+ }
+
+ clk_disable(device_data->clk);
+ ret = ux500_regulator_atomic_disable(device_data->regulator);
+ if (ret)
+ dev_err(dev, "[%s] regulator_disable() failed!", __func__);
+
+ device_data->power_state = false;
+
+out:
+ spin_unlock(&device_data->power_state_lock);
+
+ return ret;
+}
+
+/**
+ * hash_enable_power - Request to enable power and clock.
+ * @device_data: Structure for the hash device.
+ * @restore_device_state: If true, restores a previous saved hw state.
+ *
+ * This function request for enabling power (regulator) and clock,
+ * and could also restore a previously saved hw state.
+ */
+static int hash_enable_power(
+ struct hash_device_data *device_data,
+ bool restore_device_state)
+{
+ int ret = 0;
+ struct device *dev = device_data->dev;
+ dev_dbg(dev, "[%s]", __func__);
+
+ spin_lock(&device_data->power_state_lock);
+ if (!device_data->power_state) {
+ ret = ux500_regulator_atomic_enable(device_data->regulator);
+ if (ret) {
+ dev_err(dev, "[%s]: regulator_enable() failed!",
+ __func__);
+ goto out;
+ }
+ ret = clk_enable(device_data->clk);
+ if (ret) {
+ dev_err(dev, "[%s]: clk_enable() failed!",
+ __func__);
+ ret = ux500_regulator_atomic_disable(
+ device_data->regulator);
+ goto out;
+ }
+ device_data->power_state = true;
+ }
+
+ if (device_data->restore_dev_state) {
+ if (restore_device_state) {
+ device_data->restore_dev_state = false;
+ hash_resume_state(device_data,
+ &device_data->current_ctx->state);
+ }
+ }
+out:
+ spin_unlock(&device_data->power_state_lock);
+
+ return ret;
+}
+
+/**
+ * hash_get_device_data - Checks for an available hash device and return it.
+ * @hash_ctx: Structure for the hash context.
+ * @device_data: Structure for the hash device.
+ *
+ * This function check for an available hash device and return it to
+ * the caller.
+ * Note! Caller need to release the device, calling up().
+ */
+static int hash_get_device_data(struct hash_ctx *ctx,
+ struct hash_device_data **device_data)
+{
+ int ret;
+ struct klist_iter device_iterator;
+ struct klist_node *device_node;
+ struct hash_device_data *local_device_data = NULL;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ /* Wait until a device is available */
+ ret = down_interruptible(&driver_data.device_allocation);
+ if (ret)
+ return ret; /* Interrupted */
+
+ /* Select a device */
+ klist_iter_init(&driver_data.device_list, &device_iterator);
+ device_node = klist_next(&device_iterator);
+ while (device_node) {
+ local_device_data = container_of(device_node,
+ struct hash_device_data, list_node);
+ spin_lock(&local_device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (local_device_data->current_ctx) {
+ device_node = klist_next(&device_iterator);
+ } else {
+ local_device_data->current_ctx = ctx;
+ ctx->device = local_device_data;
+ spin_unlock(&local_device_data->ctx_lock);
+ break;
+ }
+ spin_unlock(&local_device_data->ctx_lock);
+ }
+ klist_iter_exit(&device_iterator);
+
+ if (!device_node) {
+ /**
+ * No free device found.
+ * Since we allocated a device with down_interruptible, this
+ * should not be able to happen.
+ * Number of available devices, which are contained in
+ * device_allocation, is therefore decremented by not doing
+ * an up(device_allocation).
+ */
+ return -EBUSY;
+ }
+
+ *device_data = local_device_data;
+
+ return 0;
+}
+
+/**
+ * hash_hw_write_key - Writes the key to the hardware registries.
+ *
+ * @device_data: Structure for the hash device.
+ * @key: Key to be written.
+ * @keylen: The lengt of the key.
+ *
+ * Note! This function DOES NOT write to the NBLW registry, even though
+ * specified in the the hw design spec. Either due to incorrect info in the
+ * spec or due to a bug in the hw.
+ */
+static void hash_hw_write_key(struct hash_device_data *device_data,
+ const u8 *key, unsigned int keylen)
+{
+ u32 word = 0;
+
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+ while (keylen >= 4) {
+ word = ((u32) (key[3] & 0xff) << 24) |
+ ((u32) (key[2] & 0xff) << 16) |
+ ((u32) (key[1] & 0xff) << 8) |
+ ((u32) (key[0] & 0xff));
+
+ HASH_SET_DIN(word);
+ keylen -= 4;
+ key += 4;
+ }
+
+ /* Take care of the remaining bytes in the last word */
+ if (keylen) {
+ word = 0;
+ while (keylen) {
+ word |= (key[keylen - 1] << (8 * (keylen - 1)));
+ keylen--;
+ }
+ HASH_SET_DIN(word);
+ }
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ HASH_SET_DCAL;
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+}
+
+/**
+ * init_hash_hw - Initialise the hash hardware for a new calculation.
+ * @device_data: Structure for the hash device.
+ * @ctx: The hash context.
+ *
+ * This function will enable the bits needed to clear and start a new
+ * calculation.
+ */
+static int init_hash_hw(struct hash_device_data *device_data,
+ struct hash_ctx *ctx)
+{
+ int ret = 0;
+
+ dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32)ctx);
+
+ ret = hash_setconfiguration(device_data, &ctx->config);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_setconfiguration() "
+ "failed!", __func__);
+ return ret;
+ }
+
+ hash_begin(device_data, ctx);
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
+ hash_hw_write_key(device_data, ctx->key, ctx->keylen);
+
+ return ret;
+}
+
+/**
+ * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ *
+ * Initialize structures.
+ */
+static int hash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes);
+
+ if (!ctx->key)
+ ctx->keylen = 0;
+
+ memset(&ctx->state, 0, sizeof(struct hash_state));
+ ctx->updated = 0;
+ return 0;
+}
+
+/**
+ * hash_processblock - This function processes a single block of 512 bits (64
+ * bytes), word aligned, starting at message.
+ * @device_data: Structure for the hash device.
+ * @message: Block (512 bits) of message to be written to
+ * the HASH hardware.
+ *
+ * Reentrancy: Non Re-entrant.
+ */
+static void hash_processblock(
+ struct hash_device_data *device_data,
+ const u32 *message)
+{
+ u32 count;
+
+ /*
+ * NBLW bits. Reset the number of bits in last word (NBLW).
+ */
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+ /*
+ * Write message data to the HASH_DIN register.
+ */
+ for (count = 0; count < (HASH_BLOCK_SIZE / sizeof(u32)); count += 4) {
+ HASH_SET_DIN(message[0]);
+ HASH_SET_DIN(message[1]);
+ HASH_SET_DIN(message[2]);
+ HASH_SET_DIN(message[3]);
+ message += 4;
+ }
+}
+
+/**
+ * hash_messagepad - Pads a message and write the nblw bits.
+ * @device_data: Structure for the hash device.
+ * @message: Last word of a message.
+ * @index_bytes: The number of bytes in the last message.
+ *
+ * This function manages the final part of the digest calculation, when less
+ * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
+ *
+ * Reentrancy: Non Re-entrant.
+ */
+static void hash_messagepad(struct hash_device_data *device_data,
+ const u32 *message, u8 index_bytes)
+{
+ dev_dbg(device_data->dev, "[%s] (bytes in final msg=%d))",
+ __func__, index_bytes);
+
+ /*
+ * Clear hash str register, only clear NBLW
+ * since DCAL will be reset by hardware.
+ */
+ writel((readl(&device_data->base->str) & ~HASH_STR_NBLW_MASK),
+ &device_data->base->str);
+
+ /* Main loop */
+ while (index_bytes >= 4) {
+ HASH_SET_DIN(message[0]);
+ index_bytes -= 4;
+ message++;
+ }
+
+ if (index_bytes)
+ HASH_SET_DIN(message[0]);
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
+ HASH_SET_NBLW(index_bytes * 8);
+ dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__,
+ readl(&device_data->base->din),
+ (int)(readl(&device_data->base->str) &
+ HASH_STR_NBLW_MASK));
+ HASH_SET_DCAL;
+ dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d",
+ __func__, readl(&device_data->base->din),
+ (int)(readl(&device_data->base->str) &
+ HASH_STR_NBLW_MASK));
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+}
+
+/**
+ * hash_incrementlength - Increments the length of the current message.
+ * @ctx: Hash context
+ * @incr: Length of message processed already
+ *
+ * Overflow cannot occur, because conditions for overflow are checked in
+ * hash_hw_update.
+ */
+static void hash_incrementlength(struct hash_ctx *ctx, u32 incr)
+{
+ ctx->state.length.low_word += incr;
+
+ /* Check for wrap-around */
+ if (ctx->state.length.low_word < incr)
+ ctx->state.length.high_word++;
+}
+
+/**
+ * hash_setconfiguration - Sets the required configuration for the hash
+ * hardware.
+ * @device_data: Structure for the hash device.
+ * @config: Pointer to a configuration structure.
+ *
+ * Reentrancy: Non Re-entrant
+ * Reentrancy issues:
+ * 1. Global variable registry(cofiguration register,
+ * parameter register, divider register) is being modified
+ *
+ * Comments 1. : User need to call hash_begin API after calling this
+ * API i.e. the current configuration is set only when
+ * bit INIT is set and we set INIT bit in hash_begin.
+ * Changing the configuration during a computation has
+ * no effect so we first set configuration by calling
+ * this API and then set the INIT bit for the HASH
+ * processor and the curent configuration is taken into
+ * account. As reading INIT bit (with correct protection
+ * rights) will always return 0b so we can't make a check
+ * at software level. So the user has to initialize the
+ * device for new configuration to take in to effect.
+ * 2. The default value of data format is 00b ie the format
+ * of data entered in HASH_DIN register is 32-bit data.
+ * The data written in HASH_DIN is used directly by the
+ * HASH processing, without re ordering.
+ */
+int hash_setconfiguration(struct hash_device_data *device_data,
+ struct hash_config *config)
+{
+ int ret = 0;
+ dev_dbg(device_data->dev, "[%s] ", __func__);
+
+ if (config->algorithm != HASH_ALGO_SHA1 &&
+ config->algorithm != HASH_ALGO_SHA256)
+ return -EPERM;
+
+ /*
+ * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
+ * to be written to HASH_DIN is considered as 32 bits.
+ */
+ HASH_SET_DATA_FORMAT(config->data_format);
+
+ /*
+ * Empty message bit. This bit is needed when the hash input data
+ * contain the empty message. Always set in current impl. but with
+ * no impact on data different than empty message.
+ */
+ HASH_SET_BITS(&device_data->base->cr, HASH_CR_EMPTYMSG_MASK);
+
+ /*
+ * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
+ */
+ switch (config->algorithm) {
+ case HASH_ALGO_SHA1:
+ HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
+ break;
+
+ case HASH_ALGO_SHA256:
+ HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
+ break;
+
+ default:
+ dev_err(device_data->dev, "[%s] Incorrect algorithm.",
+ __func__);
+ return -EPERM;
+ }
+
+ /*
+ * MODE bit. This bit selects between HASH or HMAC mode for the
+ * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
+ */
+ if (HASH_OPER_MODE_HASH == config->oper_mode)
+ HASH_CLEAR_BITS(&device_data->base->cr,
+ HASH_CR_MODE_MASK);
+ else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_MODE_MASK);
+ if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
+ /* Truncate key to blocksize */
+ dev_dbg(device_data->dev, "[%s] LKEY set", __func__);
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_LKEY_MASK);
+ } else {
+ dev_dbg(device_data->dev, "[%s] LKEY cleared",
+ __func__);
+ HASH_CLEAR_BITS(&device_data->base->cr,
+ HASH_CR_LKEY_MASK);
+ }
+ } else { /* Wrong hash mode */
+ ret = -EPERM;
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ }
+ return ret;
+}
+
+/**
+ * hash_begin - This routine resets some globals and initializes the hash
+ * hardware.
+ * @device_data: Structure for the hash device.
+ * @ctx: Hash context.
+ *
+ * Reentrancy: Non Re-entrant
+ *
+ * Comments 1. : User need to call hash_setconfiguration API before
+ * calling this API i.e. the current configuration is set
+ * only when bit INIT is set and we set INIT bit in
+ * hash_begin. Changing the configuration during a
+ * computation has no effect so we first set
+ * configuration by calling this API and then set the
+ * INIT bit for the HASH processor and the current
+ * configuration is taken into account. As reading INIT
+ * bit (with correct protection rights) will always
+ * return 0b so we can't make a check at software level.
+ * So the user has to initialize the device for new
+ * configuration to take in to effect.
+ */
+void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
+{
+ /* HW and SW initializations */
+ /* Note: there is no need to initialize buffer and digest members */
+ dev_dbg(device_data->dev, "[%s] ", __func__);
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ /*
+ * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
+ * prepare the initialize the HASH accelerator to compute the message
+ * digest of a new message.
+ */
+ HASH_INITIALIZE;
+
+ /*
+ * NBLW bits. Reset the number of bits in last word (NBLW).
+ */
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+}
+
+int hash_process_data(
+ struct hash_device_data *device_data,
+ struct hash_ctx *ctx, int msg_length, u8 *data_buffer,
+ u8 *buffer, u8 *index)
+{
+ int ret = 0;
+ u32 count;
+
+ do {
+ if ((*index + msg_length) < HASH_BLOCK_SIZE) {
+ for (count = 0; count < msg_length; count++) {
+ buffer[*index + count] =
+ *(data_buffer + count);
+ }
+ *index += msg_length;
+ msg_length = 0;
+ } else {
+ if (ctx->updated) {
+ ret = hash_resume_state(device_data,
+ &ctx->state);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_resume_state()"
+ " failed!", __func__);
+ goto out;
+ }
+
+ } else {
+ ret = init_hash_hw(device_data, ctx);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "init_hash_hw()"
+ " failed!", __func__);
+ goto out;
+ }
+ ctx->updated = 1;
+ }
+ /*
+ * If 'data_buffer' is four byte aligned and
+ * local buffer does not have any data, we can
+ * write data directly from 'data_buffer' to
+ * HW peripheral, otherwise we first copy data
+ * to a local buffer
+ */
+ if ((0 == (((u32)data_buffer) % 4))
+ && (0 == *index))
+ hash_processblock(device_data,
+ (const u32 *)
+ data_buffer);
+ else {
+ for (count = 0; count <
+ (u32)(HASH_BLOCK_SIZE -
+ *index);
+ count++) {
+ buffer[*index + count] =
+ *(data_buffer + count);
+ }
+ hash_processblock(device_data,
+ (const u32 *)buffer);
+ }
+ hash_incrementlength(ctx, HASH_BLOCK_SIZE);
+ data_buffer += (HASH_BLOCK_SIZE - *index);
+ msg_length -= (HASH_BLOCK_SIZE - *index);
+ *index = 0;
+
+ ret = hash_save_state(device_data,
+ &ctx->state);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_save_state()"
+ " failed!", __func__);
+ goto out;
+ }
+ }
+ } while (msg_length != 0);
+out:
+
+ return ret;
+}
+
+/**
+ * hash_hw_update - Updates current HASH computation hashing another part of
+ * the message.
+ * @req: Byte array containing the message to be hashed (caller
+ * allocated).
+ *
+ * Reentrancy: Non Re-entrant
+ */
+int hash_hw_update(struct ahash_request *req)
+{
+ int ret = 0;
+ u8 index;
+ u8 *buffer;
+ struct hash_device_data *device_data;
+ u8 *data_buffer;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct crypto_hash_walk walk;
+ int msg_length = crypto_hash_walk_first(req, &walk);
+
+ pr_debug(DEV_DBG_NAME " [%s] datalength: %d", __func__, msg_length);
+
+ /* Empty message ("") is correct indata */
+ if (msg_length == 0)
+ return ret;
+
+ index = ctx->state.index;
+ buffer = (u8 *)ctx->state.buffer;
+
+ /* Check if ctx->state.length + msg_length
+ overflows */
+ if (msg_length >
+ (ctx->state.length.low_word + msg_length)
+ && HASH_HIGH_WORD_MAX_VAL ==
+ (ctx->state.length.high_word)) {
+ dev_err(device_data->dev, "[%s] HASH_MSG_LENGTH_OVERFLOW!",
+ __func__);
+ return -EPERM;
+ }
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "hash_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ /* Main loop */
+ while (0 != msg_length) {
+ data_buffer = walk.data;
+ ret = hash_process_data(device_data, ctx,
+ msg_length, data_buffer, buffer, &index);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_internal_hw_"
+ "update() failed!", __func__);
+ goto out_power;
+ }
+
+ msg_length = crypto_hash_walk_done(&walk, 0);
+ }
+
+ ctx->state.index = index;
+
+ dev_dbg(device_data->dev, "[%s] indata length=%d, "
+ "bin=%d))", __func__, ctx->state.index, ctx->state.bit_index);
+out_power:
+ /* Disable power (and clock) */
+ if (hash_disable_power(device_data, false))
+ dev_err(device_data->dev, "[%s]: "
+ "hash_disable_power() failed!", __func__);
+out:
+ release_hash_device(device_data);
+
+ return ret;
+}
+
+/**
+ * hash_resume_state - Function that resumes the state of an calculation.
+ * @device_data: Pointer to the device structure.
+ * @device_state: The state to be restored in the hash hardware
+ *
+ * Reentrancy: Non Re-entrant
+ */
+int hash_resume_state(struct hash_device_data *device_data,
+ const struct hash_state *device_state)
+{
+ u32 temp_cr;
+ s32 count;
+ int hash_mode = HASH_OPER_MODE_HASH;
+
+ dev_dbg(device_data->dev, "[%s] (state(0x%x)))",
+ __func__, (u32) device_state);
+
+ if (NULL == device_state) {
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ return -EPERM;
+ }
+
+ /* Check correctness of index and length members */
+ if (device_state->index > HASH_BLOCK_SIZE
+ || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ return -EPERM;
+ }
+
+ /*
+ * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
+ * prepare the initialize the HASH accelerator to compute the message
+ * digest of a new message.
+ */
+ HASH_INITIALIZE;
+
+ temp_cr = device_state->temp_cr;
+ writel(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
+
+ if (device_data->base->cr & HASH_CR_MODE_MASK)
+ hash_mode = HASH_OPER_MODE_HMAC;
+ else
+ hash_mode = HASH_OPER_MODE_HASH;
+
+ for (count = 0; count < HASH_CSR_COUNT; count++) {
+ if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
+ break;
+
+ writel(device_state->csr[count],
+ &device_data->base->csrx[count]);
+ }
+
+ writel(device_state->csfull, &device_data->base->csfull);
+ writel(device_state->csdatain, &device_data->base->csdatain);
+
+ writel(device_state->str_reg, &device_data->base->str);
+ writel(temp_cr, &device_data->base->cr);
+
+ return 0;
+}
+
+/**
+ * hash_save_state - Function that saves the state of hardware.
+ * @device_data: Pointer to the device structure.
+ * @device_state: The strucure where the hardware state should be saved.
+ *
+ * Reentrancy: Non Re-entrant
+ */
+int hash_save_state(struct hash_device_data *device_data,
+ struct hash_state *device_state)
+{
+ u32 temp_cr;
+ u32 count;
+ int hash_mode = HASH_OPER_MODE_HASH;
+
+ dev_dbg(device_data->dev, "[%s] state(0x%x)))",
+ __func__, (u32) device_state);
+
+ if (NULL == device_state) {
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ return -EPERM;
+ }
+
+ /* Write dummy value to force digest intermediate calculation. This
+ * actually makes sure that there isn't any ongoing calculation in the
+ * hardware.
+ */
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ temp_cr = readl(&device_data->base->cr);
+
+ device_state->str_reg = readl(&device_data->base->str);
+
+ device_state->din_reg = readl(&device_data->base->din);
+
+ if (device_data->base->cr & HASH_CR_MODE_MASK)
+ hash_mode = HASH_OPER_MODE_HMAC;
+ else
+ hash_mode = HASH_OPER_MODE_HASH;
+
+ for (count = 0; count < HASH_CSR_COUNT; count++) {
+ if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
+ break;
+
+ device_state->csr[count] =
+ readl(&device_data->base->csrx[count]);
+ }
+
+ device_state->csfull = readl(&device_data->base->csfull);
+ device_state->csdatain = readl(&device_data->base->csdatain);
+
+ device_state->temp_cr = temp_cr;
+
+ return 0;
+}
+
+/**
+ * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
+ * @device_data:
+ *
+ */
+int hash_check_hw(struct hash_device_data *device_data)
+{
+ int ret = 0;
+
+ dev_dbg(device_data->dev, "[%s] ", __func__);
+
+ if (NULL == device_data) {
+ ret = -EPERM;
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ goto out;
+ }
+
+ /* Checking Peripheral Ids */
+ if ((HASH_P_ID0 == readl(&device_data->base->periphid0))
+ && (HASH_P_ID1 == readl(&device_data->base->periphid1))
+ && (HASH_P_ID2 == readl(&device_data->base->periphid2))
+ && (HASH_P_ID3 == readl(&device_data->base->periphid3))
+ && (HASH_CELL_ID0 == readl(&device_data->base->cellid0))
+ && (HASH_CELL_ID1 == readl(&device_data->base->cellid1))
+ && (HASH_CELL_ID2 == readl(&device_data->base->cellid2))
+ && (HASH_CELL_ID3 == readl(&device_data->base->cellid3))
+ ) {
+ ret = 0;
+ goto out;;
+ } else {
+ ret = -EPERM;
+ dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!",
+ __func__);
+ goto out;
+ }
+out:
+ return ret;
+}
+
+/**
+ * hash_get_digest - Gets the digest.
+ * @device_data: Pointer to the device structure.
+ * @digest: User allocated byte array for the calculated digest.
+ * @algorithm: The algorithm in use.
+ *
+ * Reentrancy: Non Re-entrant, global variable registry (hash control register)
+ * is being modified.
+ *
+ * Note that, if this is called before the final message has been handle it
+ * will return the intermediate message digest.
+ */
+void hash_get_digest(struct hash_device_data *device_data,
+ u8 *digest, int algorithm)
+{
+ u32 temp_hx_val, count;
+ int loop_ctr;
+
+ if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
+ dev_err(device_data->dev, "[%s] Incorrect algorithm %d",
+ __func__, algorithm);
+ return;
+ }
+
+ if (algorithm == HASH_ALGO_SHA1)
+ loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
+ else
+ loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
+
+ dev_dbg(device_data->dev, "[%s] digest array:(0x%x)",
+ __func__, (u32) digest);
+
+ /* Copy result into digest array */
+ for (count = 0; count < loop_ctr; count++) {
+ temp_hx_val = readl(&device_data->base->hx[count]);
+ digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
+ digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
+ digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
+ digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
+ }
+}
+
+/**
+ * hash_update - The hash update function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ */
+static int ahash_update(struct ahash_request *req)
+{
+ int ret = 0;
+
+ pr_debug(DEV_DBG_NAME " [%s] ", __func__);
+
+ ret = hash_hw_update(req);
+ if (ret) {
+ pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!",
+ __func__);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * hash_final - The hash final function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ */
+static int ahash_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct hash_device_data *device_data;
+ u8 digest[SHA256_DIGEST_SIZE];
+
+ pr_debug(DEV_DBG_NAME " [%s] ", __func__);
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "hash_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ if (ctx->updated) {
+ ret = hash_resume_state(device_data, &ctx->state);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_resume_state() "
+ "failed!", __func__);
+ goto out_power;
+ }
+ } else if (req->nbytes == 0 && ctx->keylen == 0) {
+ u8 zero_hash[SHA256_DIGEST_SIZE];
+ u32 zero_hash_size = 0;
+ bool zero_digest = false;
+ /**
+ * Use a pre-calculated empty message digest
+ * (workaround since hw return zeroes, hw bug!?)
+ */
+ ret = get_empty_message_digest(device_data, &zero_hash[0],
+ &zero_hash_size, &zero_digest);
+ if (!ret && likely(zero_hash_size == ctx->digestsize) &&
+ zero_digest) {
+ memcpy(req->result, &zero_hash[0], ctx->digestsize);
+ goto out_power;
+ } else if (!ret && !zero_digest) {
+ dev_dbg(device_data->dev, "[%s] HMAC zero msg with "
+ "key, continue...", __func__);
+ } else {
+ dev_err(device_data->dev, "[%s] ret=%d, or wrong "
+ "digest size? %s", __func__, ret,
+ (zero_hash_size == ctx->digestsize) ?
+ "true" : "false");
+ /* Return error */
+ goto out_power;
+ }
+ }
+
+ if (!ctx->updated) {
+ ret = init_hash_hw(device_data, ctx);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] init_hash_hw() "
+ "failed!", __func__);
+ goto out_power;
+ }
+ }
+
+ if (ctx->state.index) {
+ hash_messagepad(device_data, ctx->state.buffer,
+ ctx->state.index);
+ } else {
+ HASH_SET_DCAL;
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+ }
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
+ unsigned int keylen = ctx->keylen;
+ u8 *key = ctx->key;
+
+ dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
+ ctx->keylen);
+ hash_hw_write_key(device_data, key, keylen);
+ }
+
+ hash_get_digest(device_data, digest, ctx->config.algorithm);
+ memcpy(req->result, digest, ctx->digestsize);
+
+out_power:
+ /* Disable power (and clock) */
+ if (hash_disable_power(device_data, false))
+ dev_err(device_data->dev, "[%s] hash_disable_power() failed!",
+ __func__);
+
+out:
+ release_hash_device(device_data);
+
+ /**
+ * Allocated in setkey, and only used in HMAC.
+ */
+ kfree(ctx->key);
+
+ return ret;
+}
+
+static int hash_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen, int alg)
+{
+ int ret = 0;
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s] keylen: %d", __func__, keylen);
+
+ /**
+ * Freed in final.
+ */
+ ctx->key = kmalloc(keylen, GFP_KERNEL);
+ if (!ctx->key) {
+ pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key "
+ "for %d\n", __func__, alg);
+ return -ENOMEM;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return ret;
+ }
+
+static int ahash_sha1_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA1;
+ ctx->config.oper_mode = HASH_OPER_MODE_HASH;
+ ctx->digestsize = SHA1_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int ahash_sha256_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA256;
+ ctx->config.oper_mode = HASH_OPER_MODE_HASH;
+ ctx->digestsize = SHA256_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int ahash_sha1_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ret1 = ahash_sha1_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int ahash_sha256_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ret1 = ahash_sha256_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha1_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA1;
+ ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
+ ctx->digestsize = SHA1_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int hmac_sha256_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]: (ctx=0x%x)!", __func__, (u32) ctx);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA256;
+ ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
+ ctx->digestsize = SHA256_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int hmac_sha1_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ret1 = hmac_sha1_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha256_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ret1 = hmac_sha256_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha1_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
+}
+
+static int hmac_sha256_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
+}
+
+static struct ahash_alg ahash_sha1_alg = {
+ .init = ahash_sha1_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = ahash_sha1_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct ahash_alg ahash_sha256_alg = {
+ .init = ahash_sha256_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = ahash_sha256_digest,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct ahash_alg hmac_sha1_alg = {
+ .init = hmac_sha1_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = hmac_sha1_digest,
+ .setkey = hmac_sha1_setkey,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "hmac-sha1-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct ahash_alg hmac_sha256_alg = {
+ .init = hmac_sha256_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = hmac_sha256_digest,
+ .setkey = hmac_sha256_setkey,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac-sha256-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+/**
+ * struct hash_alg *ux500_hash_algs[] -
+ */
+static struct ahash_alg *ux500_ahash_algs[] = {
+ &ahash_sha1_alg,
+ &ahash_sha256_alg,
+ &hmac_sha1_alg,
+ &hmac_sha256_alg
+};
+
+/**
+ * hash_algs_register_all -
+ */
+static int ahash_algs_register_all(struct hash_device_data *device_data)
+{
+ int ret;
+ int i;
+ int count;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++) {
+ ret = crypto_register_ahash(ux500_ahash_algs[i]);
+ if (ret) {
+ count = i;
+ dev_err(device_data->dev, "[%s] alg registration"
+ " failed",
+ ux500_ahash_algs[i]->halg.base.cra_driver_name);
+ goto unreg;
+ }
+ }
+ return 0;
+unreg:
+ for (i = 0; i < count; i++)
+ crypto_unregister_ahash(ux500_ahash_algs[i]);
+ return ret;
+}
+
+/**
+ * hash_algs_unregister_all -
+ */
+static void ahash_algs_unregister_all(struct hash_device_data *device_data)
+{
+ int i;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++)
+ crypto_unregister_ahash(ux500_ahash_algs[i]);
+}
+
+/**
+ * ux500_hash_probe - Function that probes the hash hardware.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res = NULL;
+ struct hash_device_data *device_data;
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "[%s] (pdev=0x%x)", __func__, (u32) pdev);
+ device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC);
+ if (!device_data) {
+ dev_dbg(dev, "[%s] kzalloc() failed!", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ device_data->dev = dev;
+ device_data->current_ctx = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__);
+ ret = -ENODEV;
+ goto out_kfree;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ dev_dbg(dev, "[%s] request_mem_region() failed!", __func__);
+ ret = -EBUSY;
+ goto out_kfree;
+ }
+
+ device_data->base = ioremap(res->start, resource_size(res));
+ if (!device_data->base) {
+ dev_err(dev, "[%s] ioremap() failed!",
+ __func__);
+ ret = -ENOMEM;
+ goto out_free_mem;
+ }
+ spin_lock_init(&device_data->ctx_lock);
+ spin_lock_init(&device_data->power_state_lock);
+
+ /* Enable power for HASH1 hardware block */
+ device_data->regulator = ux500_regulator_get(dev);
+
+ if (IS_ERR(device_data->regulator)) {
+ dev_err(dev, "[%s] regulator_get() failed!", __func__);
+ ret = PTR_ERR(device_data->regulator);
+ device_data->regulator = NULL;
+ goto out_unmap;
+ }
+
+ /* Enable the clock for HASH1 hardware block */
+ device_data->clk = clk_get(dev, NULL);
+ if (IS_ERR(device_data->clk)) {
+ dev_err(dev, "[%s] clk_get() failed!", __func__);
+ ret = PTR_ERR(device_data->clk);
+ goto out_regulator;
+ }
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
+ goto out_clk;
+ }
+
+ ret = hash_check_hw(device_data);
+ if (ret) {
+ dev_err(dev, "[%s] hash_check_hw() failed!", __func__);
+ goto out_power;
+ }
+
+ platform_set_drvdata(pdev, device_data);
+
+ /* Put the new device into the device list... */
+ klist_add_tail(&device_data->list_node, &driver_data.device_list);
+ /* ... and signal that a new device is available. */
+ up(&driver_data.device_allocation);
+
+ ret = ahash_algs_register_all(device_data);
+ if (ret) {
+ dev_err(dev, "[%s] ahash_algs_register_all() "
+ "failed!", __func__);
+ goto out_power;
+ }
+
+ if (hash_disable_power(device_data, false))
+ dev_err(dev, "[%s]: hash_disable_power() failed!", __func__);
+
+ dev_info(dev, "[%s] successfully probed\n", __func__);
+ return 0;
+
+out_power:
+ hash_disable_power(device_data, false);
+
+out_clk:
+ clk_put(device_data->clk);
+
+out_regulator:
+ ux500_regulator_put(device_data->regulator);
+
+out_unmap:
+ iounmap(device_data->base);
+
+out_free_mem:
+ release_mem_region(res->start, resource_size(res));
+
+out_kfree:
+ kfree(device_data);
+out:
+ return ret;
+}
+
+/**
+ * ux500_hash_remove - Function that removes the hash device from the platform.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct hash_device_data *device_data;
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "[%s] (pdev=0x%x)", __func__, (u32) pdev);
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Try to decrease the number of available devices. */
+ if (down_trylock(&driver_data.device_allocation))
+ return -EBUSY;
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (device_data->current_ctx) {
+ /* The device is busy */
+ spin_unlock(&device_data->ctx_lock);
+ /* Return the device to the pool. */
+ up(&driver_data.device_allocation);
+ return -EBUSY;
+ }
+
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ ahash_algs_unregister_all(device_data);
+
+ if (hash_disable_power(device_data, false))
+ dev_err(dev, "[%s]: hash_disable_power() failed",
+ __func__);
+
+ clk_put(device_data->clk);
+ ux500_regulator_put(device_data->regulator);
+
+ iounmap(device_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(device_data);
+
+ return 0;
+}
+
+/**
+ * ux500_hash_shutdown - Function that shutdown the hash device.
+ * @pdev: The platform device
+ */
+static void ux500_hash_shutdown(struct platform_device *pdev)
+{
+ struct resource *res = NULL;
+ struct hash_device_data *device_data;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
+ __func__);
+ return;
+ }
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (!device_data->current_ctx) {
+ if (down_trylock(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
+ "Shutting down anyway...", __func__);
+ /**
+ * (Allocate the device)
+ * Need to set this to non-null (dummy) value,
+ * to avoid usage if context switching.
+ */
+ device_data->current_ctx++;
+ }
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ ahash_algs_unregister_all(device_data);
+
+ iounmap(device_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ if (hash_disable_power(device_data, false))
+ dev_err(&pdev->dev, "[%s] hash_disable_power() failed",
+ __func__);
+}
+
+/**
+ * ux500_hash_suspend - Function that suspends the hash device.
+ * @pdev: The platform device.
+ * @state: -
+ */
+static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret;
+ struct hash_device_data *device_data;
+ struct hash_ctx *temp_ctx = NULL;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock(&device_data->ctx_lock);
+ if (!device_data->current_ctx)
+ device_data->current_ctx++;
+ spin_unlock(&device_data->ctx_lock);
+
+ if (device_data->current_ctx == ++temp_ctx) {
+ if (down_interruptible(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
+ "failed", __func__);
+ ret = hash_disable_power(device_data, false);
+
+ } else
+ ret = hash_disable_power(device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__);
+
+ return ret;
+}
+
+/**
+ * ux500_hash_resume - Function that resume the hash device.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_resume(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct hash_device_data *device_data;
+ struct hash_ctx *temp_ctx = NULL;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock(&device_data->ctx_lock);
+ if (device_data->current_ctx == ++temp_ctx)
+ device_data->current_ctx = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ if (!device_data->current_ctx)
+ up(&driver_data.device_allocation);
+ else
+ ret = hash_enable_power(device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!",
+ __func__);
+
+ return ret;
+}
+
+static struct platform_driver hash_driver = {
+ .probe = ux500_hash_probe,
+ .remove = ux500_hash_remove,
+ .shutdown = ux500_hash_shutdown,
+ .suspend = ux500_hash_suspend,
+ .resume = ux500_hash_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hash1",
+ }
+};
+
+/**
+ * ux500_hash_mod_init - The kernel module init function.
+ */
+static int __init ux500_hash_mod_init(void)
+{
+ pr_debug(DEV_DBG_NAME " [%s] is called!", __func__);
+ klist_init(&driver_data.device_list, NULL, NULL);
+ /* Initialize the semaphore to 0 devices (locked state) */
+ sema_init(&driver_data.device_allocation, 0);
+
+ return platform_driver_register(&hash_driver);
+}
+
+/**
+ * ux500_hash_mod_fini - The kernel module exit function.
+ */
+static void __exit ux500_hash_mod_fini(void)
+{
+ pr_debug(DEV_DBG_NAME " [%s] is called!", __func__);
+
+ platform_driver_unregister(&hash_driver);
+ return;
+}
+
+module_init(ux500_hash_mod_init);
+module_exit(ux500_hash_mod_fini);
+
+MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("sha1-all");
+MODULE_ALIAS("sha256-all");
+MODULE_ALIAS("hmac-sha1-all");
+MODULE_ALIAS("hmac-sha256-all");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 13259cad0ce..110e74bf1d7 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/amba/bus.h>
@@ -32,6 +34,9 @@
/* Maximum iterations taken before giving up suspending a channel */
#define D40_SUSPEND_MAX_IT 500
+/* Milliseconds */
+#define DMA40_AUTOSUSPEND_DELAY 100
+
/* Hardware requirement on LCLA alignment */
#define LCLA_ALIGNMENT 0x40000
@@ -62,6 +67,55 @@ enum d40_command {
D40_DMA_SUSPENDED = 3
};
+/*
+ * These are the registers that has to be saved and later restored
+ * when the DMA hw is powered off.
+ * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
+ */
+static u32 d40_backup_regs[] = {
+ D40_DREG_LCPA,
+ D40_DREG_LCLA,
+ D40_DREG_PRMSE,
+ D40_DREG_PRMSO,
+ D40_DREG_PRMOE,
+ D40_DREG_PRMOO,
+};
+
+#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
+
+/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
+static u32 d40_backup_regs_v3[] = {
+ D40_DREG_PSEG1,
+ D40_DREG_PSEG2,
+ D40_DREG_PSEG3,
+ D40_DREG_PSEG4,
+ D40_DREG_PCEG1,
+ D40_DREG_PCEG2,
+ D40_DREG_PCEG3,
+ D40_DREG_PCEG4,
+ D40_DREG_RSEG1,
+ D40_DREG_RSEG2,
+ D40_DREG_RSEG3,
+ D40_DREG_RSEG4,
+ D40_DREG_RCEG1,
+ D40_DREG_RCEG2,
+ D40_DREG_RCEG3,
+ D40_DREG_RCEG4,
+};
+
+#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
+
+static u32 d40_backup_regs_chan[] = {
+ D40_CHAN_REG_SSCFG,
+ D40_CHAN_REG_SSELT,
+ D40_CHAN_REG_SSPTR,
+ D40_CHAN_REG_SSLNK,
+ D40_CHAN_REG_SDCFG,
+ D40_CHAN_REG_SDELT,
+ D40_CHAN_REG_SDPTR,
+ D40_CHAN_REG_SDLNK,
+};
+
/**
* struct d40_lli_pool - Structure for keeping LLIs in memory
*
@@ -96,7 +150,7 @@ struct d40_lli_pool {
* during a transfer.
* @node: List entry.
* @is_in_client_list: true if the client owns this descriptor.
- * the previous one.
+ * @cyclic: true if this is a cyclic job
*
* This descriptor is used for both logical and physical transfers.
*/
@@ -143,6 +197,7 @@ struct d40_lcla_pool {
* channels.
*
* @lock: A lock protection this entity.
+ * @reserved: True if used by secure world or otherwise.
* @num: The physical channel number of this entity.
* @allocated_src: Bit mapped to show which src event line's are mapped to
* this physical channel. Can also be free or physically allocated.
@@ -152,6 +207,7 @@ struct d40_lcla_pool {
*/
struct d40_phy_res {
spinlock_t lock;
+ bool reserved;
int num;
u32 allocated_src;
u32 allocated_dst;
@@ -185,7 +241,6 @@ struct d40_base;
* @src_def_cfg: Default cfg register setting for src.
* @dst_def_cfg: Default cfg register setting for dst.
* @log_def: Default logical channel settings.
- * @lcla: Space for one dst src pair for logical channel transfers.
* @lcpa: Pointer to dst and src lcpa settings.
* @runtime_addr: runtime configured address.
* @runtime_direction: runtime configured direction.
@@ -241,6 +296,7 @@ struct d40_chan {
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
* @dma_slave: dma_device channels that can do only do slave transfers.
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
+ * @phy_chans: Room for all possible physical channels in system.
* @log_chans: Room for all possible logical channels in system.
* @lookup_log_chans: Used to map interrupt number to logical channel. Points
* to log_chans entries.
@@ -248,12 +304,20 @@ struct d40_chan {
* to phy_chans entries.
* @plat_data: Pointer to provided platform_data which is the driver
* configuration.
+ * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
* @phy_res: Vector containing all physical channels.
* @lcla_pool: lcla pool settings and data.
* @lcpa_base: The virtual mapped address of LCPA.
* @phy_lcpa: The physical address of the LCPA.
* @lcpa_size: The size of the LCPA area.
* @desc_slab: cache for descriptors.
+ * @reg_val_backup: Here the values of some hardware registers are stored
+ * before the DMA is powered off. They are restored when the power is back on.
+ * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
+ * later.
+ * @reg_val_backup_chan: Backup data for standard channel parameter registers.
+ * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
+ * @initialized: true if the dma has been initialized
*/
struct d40_base {
spinlock_t interrupt_lock;
@@ -275,6 +339,7 @@ struct d40_base {
struct d40_chan **lookup_log_chans;
struct d40_chan **lookup_phy_chans;
struct stedma40_platform_data *plat_data;
+ struct regulator *lcpa_regulator;
/* Physical half channels */
struct d40_phy_res *phy_res;
struct d40_lcla_pool lcla_pool;
@@ -282,6 +347,11 @@ struct d40_base {
dma_addr_t phy_lcpa;
resource_size_t lcpa_size;
struct kmem_cache *desc_slab;
+ u32 reg_val_backup[BACKUP_REGS_SZ];
+ u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
+ u32 *reg_val_backup_chan;
+ u16 gcc_pwr_off_mask;
+ bool initialized;
};
/**
@@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
struct d40_desc *d;
struct d40_desc *_d;
- list_for_each_entry_safe(d, _d, &d40c->client, node)
+ list_for_each_entry_safe(d, _d, &d40c->client, node) {
if (async_tx_test_ack(&d->txd)) {
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
break;
}
+ }
}
if (!desc)
@@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
bool cyclic = desc->cyclic;
int curr_lcla = -EINVAL;
int first_lcla = 0;
+ bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
bool linkback;
/*
@@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
&lli->src[lli_current],
next_lcla, flags);
- dma_sync_single_range_for_device(chan->base->dev,
- pool->dma_addr, lcla_offset,
- 2 * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
-
+ /*
+ * Cache maintenance is not needed if lcla is
+ * mapped in esram
+ */
+ if (!use_esram_lcla) {
+ dma_sync_single_range_for_device(chan->base->dev,
+ pool->dma_addr, lcla_offset,
+ 2 * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+ }
curr_lcla = next_lcla;
if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
@@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
return len;
}
-/* Support functions for logical channels */
+
+#ifdef CONFIG_PM
+static void dma40_backup(void __iomem *baseaddr, u32 *backup,
+ u32 *regaddr, int num, bool save)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ void __iomem *addr = baseaddr + regaddr[i];
+
+ if (save)
+ backup[i] = readl_relaxed(addr);
+ else
+ writel_relaxed(backup[i], addr);
+ }
+}
+
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+ int i;
+
+ /* Save/Restore channel specific registers */
+ for (i = 0; i < base->num_phy_chans; i++) {
+ void __iomem *addr;
+ int idx;
+
+ if (base->phy_res[i].reserved)
+ continue;
+
+ addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
+ idx = i * ARRAY_SIZE(d40_backup_regs_chan);
+
+ dma40_backup(addr, &base->reg_val_backup_chan[idx],
+ d40_backup_regs_chan,
+ ARRAY_SIZE(d40_backup_regs_chan),
+ save);
+ }
+
+ /* Save/Restore global registers */
+ dma40_backup(base->virtbase, base->reg_val_backup,
+ d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
+ save);
+
+ /* Save/Restore registers only existing on dma40 v3 and later */
+ if (base->rev >= 3)
+ dma40_backup(base->virtbase, base->reg_val_backup_v3,
+ d40_backup_regs_v3,
+ ARRAY_SIZE(d40_backup_regs_v3),
+ save);
+}
+#else
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+}
+#endif
static int d40_channel_execute_command(struct d40_chan *d40c,
enum d40_command command)
@@ -1013,6 +1144,7 @@ static int d40_pause(struct d40_chan *d40c)
if (!d40c->busy)
return 0;
+ pm_runtime_get_sync(d40c->base->dev);
spin_lock_irqsave(&d40c->lock, flags);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
@@ -1025,7 +1157,8 @@ static int d40_pause(struct d40_chan *d40c)
D40_DMA_RUN);
}
}
-
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1039,7 +1172,7 @@ static int d40_resume(struct d40_chan *d40c)
return 0;
spin_lock_irqsave(&d40c->lock, flags);
-
+ pm_runtime_get_sync(d40c->base->dev);
if (d40c->base->rev == 0)
if (chan_is_logical(d40c)) {
res = d40_channel_execute_command(d40c,
@@ -1057,6 +1190,8 @@ static int d40_resume(struct d40_chan *d40c)
}
no_suspend:
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1129,7 +1264,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
d40d = d40_first_queued(d40c);
if (d40d != NULL) {
- d40c->busy = true;
+ if (!d40c->busy)
+ d40c->busy = true;
+
+ pm_runtime_get_sync(d40c->base->dev);
/* Remove from queue */
d40_desc_remove(d40d);
@@ -1190,6 +1328,8 @@ static void dma_tc_handle(struct d40_chan *d40c)
if (d40_queue_start(d40c) == NULL)
d40c->busy = false;
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
}
d40c->pending_tx++;
@@ -1405,11 +1545,16 @@ static int d40_validate_conf(struct d40_chan *d40c,
return res;
}
-static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
- int log_event_line, bool is_log)
+static bool d40_alloc_mask_set(struct d40_phy_res *phy,
+ bool is_src, int log_event_line, bool is_log,
+ bool *first_user)
{
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
+
+ *first_user = ((phy->allocated_src | phy->allocated_dst)
+ == D40_ALLOC_FREE);
+
if (!is_log) {
/* Physical interrupts are masked per physical full channel */
if (phy->allocated_src == D40_ALLOC_FREE &&
@@ -1490,7 +1635,7 @@ out:
return is_free;
}
-static int d40_allocate_channel(struct d40_chan *d40c)
+static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
{
int dev_type;
int event_group;
@@ -1526,7 +1671,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
for (i = 0; i < d40c->base->num_phy_chans; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
- 0, is_log))
+ 0, is_log,
+ first_phy_user))
goto found_phy;
}
} else
@@ -1536,7 +1682,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
if (d40_alloc_mask_set(&phys[i],
is_src,
0,
- is_log))
+ is_log,
+ first_phy_user))
goto found_phy;
}
}
@@ -1552,6 +1699,25 @@ found_phy:
/* Find logical channel */
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
int phy_num = j + event_group * 2;
+
+ if (d40c->dma_cfg.use_fixed_channel) {
+ i = d40c->dma_cfg.phy_channel;
+
+ if ((i != phy_num) && (i != phy_num + 1)) {
+ dev_err(chan2dev(d40c),
+ "invalid fixed phy channel %d\n", i);
+ return -EINVAL;
+ }
+
+ if (d40_alloc_mask_set(&phys[i], is_src, event_line,
+ is_log, first_phy_user))
+ goto found_log;
+
+ dev_err(chan2dev(d40c),
+ "could not allocate fixed phy channel %d\n", i);
+ return -EINVAL;
+ }
+
/*
* Spread logical channels across all available physical rather
* than pack every logical channel at the first available phy
@@ -1560,13 +1726,15 @@ found_phy:
if (is_src) {
for (i = phy_num; i < phy_num + 2; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
- event_line, is_log))
+ event_line, is_log,
+ first_phy_user))
goto found_log;
}
} else {
for (i = phy_num + 1; i >= phy_num; i--) {
if (d40_alloc_mask_set(&phys[i], is_src,
- event_line, is_log))
+ event_line, is_log,
+ first_phy_user))
goto found_log;
}
}
@@ -1643,10 +1811,11 @@ static int d40_free_dma(struct d40_chan *d40c)
return -EINVAL;
}
+ pm_runtime_get_sync(d40c->base->dev);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
if (res) {
chan_err(d40c, "suspend failed\n");
- return res;
+ goto out;
}
if (chan_is_logical(d40c)) {
@@ -1664,13 +1833,11 @@ static int d40_free_dma(struct d40_chan *d40c)
if (d40_chan_has_events(d40c)) {
res = d40_channel_execute_command(d40c,
D40_DMA_RUN);
- if (res) {
+ if (res)
chan_err(d40c,
"Executing RUN command\n");
- return res;
- }
}
- return 0;
+ goto out;
}
} else {
(void) d40_alloc_mask_free(phy, is_src, 0);
@@ -1680,13 +1847,23 @@ static int d40_free_dma(struct d40_chan *d40c)
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
chan_err(d40c, "Failed to stop channel\n");
- return res;
+ goto out;
}
+
+ if (d40c->busy) {
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ }
+
+ d40c->busy = false;
d40c->phy_chan = NULL;
d40c->configured = false;
d40c->base->lookup_phy_chans[phy->num] = NULL;
+out:
- return 0;
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ return res;
}
static bool d40_is_paused(struct d40_chan *d40c)
@@ -2011,14 +2188,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
goto fail;
}
}
- is_free_phy = (d40c->phy_chan == NULL);
- err = d40_allocate_channel(d40c);
+ err = d40_allocate_channel(d40c, &is_free_phy);
if (err) {
chan_err(d40c, "Failed to allocate channel\n");
+ d40c->configured = false;
goto fail;
}
+ pm_runtime_get_sync(d40c->base->dev);
/* Fill in basic CFG register values */
d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
&d40c->dst_def_cfg, chan_is_logical(d40c));
@@ -2038,6 +2216,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
}
+ dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
+ chan_is_logical(d40c) ? "logical" : "physical",
+ d40c->phy_chan->num,
+ d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
+
+
/*
* Only write channel configuration to the DMA if the physical
* resource is free. In case of multiple logical channels
@@ -2046,6 +2230,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (is_free_phy)
d40_config_write(d40c);
fail:
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return err;
}
@@ -2519,6 +2705,72 @@ failure1:
return err;
}
+/* Suspend resume functionality */
+#ifdef CONFIG_PM
+static int dma40_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+ int ret = 0;
+ if (!pm_runtime_suspended(dev))
+ return -EBUSY;
+
+ if (base->lcpa_regulator)
+ ret = regulator_disable(base->lcpa_regulator);
+ return ret;
+}
+
+static int dma40_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+
+ d40_save_restore_registers(base, true);
+
+ /* Don't disable/enable clocks for v1 due to HW bugs */
+ if (base->rev != 1)
+ writel_relaxed(base->gcc_pwr_off_mask,
+ base->virtbase + D40_DREG_GCC);
+
+ return 0;
+}
+
+static int dma40_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+
+ if (base->initialized)
+ d40_save_restore_registers(base, false);
+
+ writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
+ base->virtbase + D40_DREG_GCC);
+ return 0;
+}
+
+static int dma40_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (base->lcpa_regulator)
+ ret = regulator_enable(base->lcpa_regulator);
+
+ return ret;
+}
+
+static const struct dev_pm_ops dma40_pm_ops = {
+ .suspend = dma40_pm_suspend,
+ .runtime_suspend = dma40_runtime_suspend,
+ .runtime_resume = dma40_runtime_resume,
+ .resume = dma40_resume,
+};
+#define DMA40_PM_OPS (&dma40_pm_ops)
+#else
+#define DMA40_PM_OPS NULL
+#endif
+
/* Initialization functions. */
static int __init d40_phy_res_init(struct d40_base *base)
@@ -2527,6 +2779,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
int num_phy_chans_avail = 0;
u32 val[2];
int odd_even_bit = -2;
+ int gcc = D40_DREG_GCC_ENA;
val[0] = readl(base->virtbase + D40_DREG_PRSME);
val[1] = readl(base->virtbase + D40_DREG_PRSMO);
@@ -2538,9 +2791,17 @@ static int __init d40_phy_res_init(struct d40_base *base)
/* Mark security only channels as occupied */
base->phy_res[i].allocated_src = D40_ALLOC_PHY;
base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
+ base->phy_res[i].reserved = true;
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+ D40_DREG_GCC_SRC);
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+ D40_DREG_GCC_DST);
+
+
} else {
base->phy_res[i].allocated_src = D40_ALLOC_FREE;
base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
+ base->phy_res[i].reserved = false;
num_phy_chans_avail++;
}
spin_lock_init(&base->phy_res[i].lock);
@@ -2552,6 +2813,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
+ base->phy_res[chan].reserved = true;
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+ D40_DREG_GCC_SRC);
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+ D40_DREG_GCC_DST);
num_phy_chans_avail--;
}
@@ -2572,6 +2838,15 @@ static int __init d40_phy_res_init(struct d40_base *base)
val[0] = val[0] >> 2;
}
+ /*
+ * To keep things simple, Enable all clocks initially.
+ * The clocks will get managed later post channel allocation.
+ * The clocks for the event lines on which reserved channels exists
+ * are not managed here.
+ */
+ writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
+ base->gcc_pwr_off_mask = gcc;
+
return num_phy_chans_avail;
}
@@ -2699,10 +2974,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure;
}
- base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
- sizeof(struct d40_desc *) *
- D40_LCLA_LINK_PER_EVENT_GRP,
+ base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
+ sizeof(d40_backup_regs_chan),
GFP_KERNEL);
+ if (!base->reg_val_backup_chan)
+ goto failure;
+
+ base->lcla_pool.alloc_map =
+ kzalloc(num_phy_chans * sizeof(struct d40_desc *)
+ * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
if (!base->lcla_pool.alloc_map)
goto failure;
@@ -2741,9 +3021,9 @@ failure:
static void __init d40_hw_init(struct d40_base *base)
{
- static const struct d40_reg_val dma_init_reg[] = {
+ static struct d40_reg_val dma_init_reg[] = {
/* Clock every part of the DMA block from start */
- { .reg = D40_DREG_GCC, .val = 0x0000ff01},
+ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
/* Interrupts on all logical channels */
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
@@ -2943,11 +3223,31 @@ static int __init d40_probe(struct platform_device *pdev)
d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
goto failure;
}
+ /* If lcla has to be located in ESRAM we don't need to allocate */
+ if (base->plat_data->use_esram_lcla) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "lcla_esram");
+ if (!res) {
+ ret = -ENOENT;
+ d40_err(&pdev->dev,
+ "No \"lcla_esram\" memory resource\n");
+ goto failure;
+ }
+ base->lcla_pool.base = ioremap(res->start,
+ resource_size(res));
+ if (!base->lcla_pool.base) {
+ ret = -ENOMEM;
+ d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
+ goto failure;
+ }
+ writel(res->start, base->virtbase + D40_DREG_LCLA);
- ret = d40_lcla_allocate(base);
- if (ret) {
- d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
- goto failure;
+ } else {
+ ret = d40_lcla_allocate(base);
+ if (ret) {
+ d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
+ goto failure;
+ }
}
spin_lock_init(&base->lcla_pool.lock);
@@ -2960,6 +3260,32 @@ static int __init d40_probe(struct platform_device *pdev)
goto failure;
}
+ pm_runtime_irq_safe(base->dev);
+ pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(base->dev);
+ pm_runtime_enable(base->dev);
+ pm_runtime_resume(base->dev);
+
+ if (base->plat_data->use_esram_lcla) {
+
+ base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
+ if (IS_ERR(base->lcpa_regulator)) {
+ d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
+ base->lcpa_regulator = NULL;
+ goto failure;
+ }
+
+ ret = regulator_enable(base->lcpa_regulator);
+ if (ret) {
+ d40_err(&pdev->dev,
+ "Failed to enable lcpa_regulator\n");
+ regulator_put(base->lcpa_regulator);
+ base->lcpa_regulator = NULL;
+ goto failure;
+ }
+ }
+
+ base->initialized = true;
err = d40_dmaengine_init(base, num_reserved_chans);
if (err)
goto failure;
@@ -2976,6 +3302,11 @@ failure:
if (base->virtbase)
iounmap(base->virtbase);
+ if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
+ iounmap(base->lcla_pool.base);
+ base->lcla_pool.base = NULL;
+ }
+
if (base->lcla_pool.dma_addr)
dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
SZ_1K * base->num_phy_chans,
@@ -2998,6 +3329,11 @@ failure:
clk_put(base->clk);
}
+ if (base->lcpa_regulator) {
+ regulator_disable(base->lcpa_regulator);
+ regulator_put(base->lcpa_regulator);
+ }
+
kfree(base->lcla_pool.alloc_map);
kfree(base->lookup_log_chans);
kfree(base->lookup_phy_chans);
@@ -3013,6 +3349,7 @@ static struct platform_driver d40_driver = {
.driver = {
.owner = THIS_MODULE,
.name = D40_NAME,
+ .pm = DMA40_PM_OPS,
},
};
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index cad9e1daedf..d47d1fa36b9 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -102,16 +102,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
+ /* Set the priority bit to high for the physical channel */
+ if (cfg->high_priority) {
+ src |= 1 << D40_SREG_CFG_PRI_POS;
+ dst |= 1 << D40_SREG_CFG_PRI_POS;
+ }
+
} else {
/* Logical channel */
dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
}
- if (cfg->high_priority) {
- src |= 1 << D40_SREG_CFG_PRI_POS;
- dst |= 1 << D40_SREG_CFG_PRI_POS;
- }
if (cfg->src_info.big_endian)
src |= 1 << D40_SREG_CFG_LBE_POS;
@@ -331,10 +333,10 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
{
d40_log_lli_link(lli_dst, lli_src, next, flags);
- writel(lli_src->lcsp02, &lcpa[0].lcsp0);
- writel(lli_src->lcsp13, &lcpa[0].lcsp1);
- writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
- writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
+ writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0);
+ writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1);
+ writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2);
+ writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3);
}
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
@@ -344,10 +346,10 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
{
d40_log_lli_link(lli_dst, lli_src, next, flags);
- writel(lli_src->lcsp02, &lcla[0].lcsp02);
- writel(lli_src->lcsp13, &lcla[0].lcsp13);
- writel(lli_dst->lcsp02, &lcla[1].lcsp02);
- writel(lli_dst->lcsp13, &lcla[1].lcsp13);
+ writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02);
+ writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13);
+ writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02);
+ writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13);
}
static void d40_log_fill_lli(struct d40_log_lli *lli,
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index b44c455158d..8d3d490968a 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -16,6 +16,8 @@
#define D40_TYPE_TO_GROUP(type) (type / 16)
#define D40_TYPE_TO_EVENT(type) (type % 16)
+#define D40_GROUP_SIZE 8
+#define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2)
/* Most bits of the CFG register are the same in log as in phy mode */
#define D40_SREG_CFG_MST_POS 15
@@ -123,6 +125,15 @@
/* DMA Register Offsets */
#define D40_DREG_GCC 0x000
+#define D40_DREG_GCC_ENA 0x1
+/* This assumes that there are only 4 event groups */
+#define D40_DREG_GCC_ENABLE_ALL 0xff01
+#define D40_DREG_GCC_EVTGRP_POS 8
+#define D40_DREG_GCC_SRC 0
+#define D40_DREG_GCC_DST 1
+#define D40_DREG_GCC_EVTGRP_ENA(x, y) \
+ (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y))
+
#define D40_DREG_PRTYP 0x004
#define D40_DREG_PRSME 0x008
#define D40_DREG_PRSMO 0x00C
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8482a23887d..892f8e75c26 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -479,7 +479,7 @@ config GPIO_JANZ_TTL
config GPIO_AB8500
bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions"
- depends on AB8500_CORE && BROKEN
+ depends on AB8500_CORE
help
Select this to enable the AB8500 IC GPIO driver
diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c
index 050c05d9189..a0253ad14df 100644
--- a/drivers/gpio/gpio-ab8500.c
+++ b/drivers/gpio/gpio-ab8500.c
@@ -60,7 +60,7 @@
#define AB8500_GPIO_IN4_REG 0x43
#define AB8500_GPIO_IN5_REG 0x44
#define AB8500_GPIO_IN6_REG 0x45
-#define AB8500_GPIO_ALTFUN_REG 0x45
+#define AB8500_GPIO_ALTFUN_REG 0x50
#define ALTFUN_REG_INDEX 6
#define AB8500_NUM_GPIO 42
#define AB8500_NUM_VIR_GPIO_IRQ 16
@@ -115,7 +115,7 @@ static int ab8500_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
u8 mask = 1 << (offset % 8);
- u8 reg = AB8500_GPIO_OUT1_REG + (offset / 8);
+ u8 reg = AB8500_GPIO_IN1_REG + (offset / 8);
int ret;
u8 data;
ret = abx500_get_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
@@ -132,7 +132,7 @@ static void ab8500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
int ret;
/* Write the data */
- ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, 1);
+ ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
if (ret < 0)
dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
}
@@ -174,9 +174,9 @@ static int ab8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
int start;
int end;
} clusters[] = {
- {.start = 6, .end = 13},
- {.start = 24, .end = 25},
- {.start = 36, .end = 41},
+ {.start = 5, .end = 12}, /* GPIO numbers start from 1 */
+ {.start = 23, .end = 24},
+ {.start = 35, .end = 40},
};
struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
int base = ab8500_gpio->irq_base;
@@ -207,7 +207,7 @@ static struct gpio_chip ab8500gpio_chip = {
static unsigned int irq_to_rising(unsigned int irq)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_get_chip_data(irq);
int offset = irq - ab8500_gpio->irq_base;
int new_irq = offset + AB8500_INT_GPIO6R
+ ab8500_gpio->parent->irq_base;
@@ -216,7 +216,7 @@ static unsigned int irq_to_rising(unsigned int irq)
static unsigned int irq_to_falling(unsigned int irq)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_get_chip_data(irq);
int offset = irq - ab8500_gpio->irq_base;
int new_irq = offset + AB8500_INT_GPIO6F
+ ab8500_gpio->parent->irq_base;
@@ -261,15 +261,16 @@ static irqreturn_t handle_falling(int irq, void *dev)
return IRQ_HANDLED;
}
-static void ab8500_gpio_irq_lock(unsigned int irq)
+static void ab8500_gpio_irq_lock(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
mutex_lock(&ab8500_gpio->lock);
}
-static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
+static void ab8500_gpio_irq_sync_unlock(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
+ unsigned int irq = data->irq;
int offset = irq - ab8500_gpio->irq_base;
bool rising = ab8500_gpio->rising & BIT(offset);
bool falling = ab8500_gpio->falling & BIT(offset);
@@ -316,21 +317,22 @@ static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
}
-static void ab8500_gpio_irq_mask(unsigned int irq)
+static void ab8500_gpio_irq_mask(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
ab8500_gpio->irq_action = MASK;
}
-static void ab8500_gpio_irq_unmask(unsigned int irq)
+static void ab8500_gpio_irq_unmask(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
ab8500_gpio->irq_action = UNMASK;
}
-static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int ab8500_gpio_irq_set_type(struct irq_data *data, unsigned int type)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
+ unsigned int irq = data->irq;
int offset = irq - ab8500_gpio->irq_base;
if (type == IRQ_TYPE_EDGE_BOTH) {
@@ -344,28 +346,28 @@ static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type)
return 0;
}
-unsigned int ab8500_gpio_irq_startup(unsigned int irq)
+unsigned int ab8500_gpio_irq_startup(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
ab8500_gpio->irq_action = STARTUP;
return 0;
}
-void ab8500_gpio_irq_shutdown(unsigned int irq)
+void ab8500_gpio_irq_shutdown(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
ab8500_gpio->irq_action = SHUTDOWN;
}
static struct irq_chip ab8500_gpio_irq_chip = {
.name = "ab8500-gpio",
- .startup = ab8500_gpio_irq_startup,
- .shutdown = ab8500_gpio_irq_shutdown,
- .bus_lock = ab8500_gpio_irq_lock,
- .bus_sync_unlock = ab8500_gpio_irq_sync_unlock,
- .mask = ab8500_gpio_irq_mask,
- .unmask = ab8500_gpio_irq_unmask,
- .set_type = ab8500_gpio_irq_set_type,
+ .irq_startup = ab8500_gpio_irq_startup,
+ .irq_shutdown = ab8500_gpio_irq_shutdown,
+ .irq_bus_lock = ab8500_gpio_irq_lock,
+ .irq_bus_sync_unlock = ab8500_gpio_irq_sync_unlock,
+ .irq_mask = ab8500_gpio_irq_mask,
+ .irq_unmask = ab8500_gpio_irq_unmask,
+ .irq_set_type = ab8500_gpio_irq_set_type,
};
static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio)
@@ -374,14 +376,14 @@ static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio)
int irq;
for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ ; irq++) {
- set_irq_chip_data(irq, ab8500_gpio);
- set_irq_chip_and_handler(irq, &ab8500_gpio_irq_chip,
+ irq_set_chip_data(irq, ab8500_gpio);
+ irq_set_chip_and_handler(irq, &ab8500_gpio_irq_chip,
handle_simple_irq);
- set_irq_nested_thread(irq, 1);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
@@ -397,8 +399,8 @@ static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio)
#ifdef CONFIG_ARM
set_irq_flags(irq, 0);
#endif
- set_irq_chip_and_handler(irq, NULL, NULL);
- set_irq_chip_data(irq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
}
}
@@ -443,6 +445,18 @@ static int __devinit ab8500_gpio_probe(struct platform_device *pdev)
pdata->config_reg[i]);
if (ret < 0)
goto out_free;
+
+ ret = abx500_set_register_interruptible(ab8500_gpio->dev,
+ AB8500_MISC, i + AB8500_GPIO_DIR1_REG,
+ pdata->config_direction[i]);
+ if (ret < 0)
+ goto out_free;
+
+ ret = abx500_set_register_interruptible(ab8500_gpio->dev,
+ AB8500_MISC, i + AB8500_GPIO_PUD1_REG,
+ pdata->config_pullups[i]);
+ if (ret < 0)
+ goto out_free;
}
ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
AB8500_GPIO_ALTFUN_REG,
@@ -493,6 +507,86 @@ static int __devexit ab8500_gpio_remove(struct platform_device *pdev)
return 0;
}
+int ab8500_config_pulldown(struct device *dev,
+ enum ab8500_pin gpio, bool enable)
+{
+ u8 offset = gpio - AB8500_PIN_GPIO1;
+ u8 pos = offset % 8;
+ u8 val = enable ? 0 : 1;
+ u8 reg = AB8500_GPIO_PUD1_REG + (offset / 8);
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(dev,
+ AB8500_MISC, reg, 1 << pos, val << pos);
+ if (ret < 0)
+ dev_err(dev, "%s write failed\n", __func__);
+ return ret;
+}
+EXPORT_SYMBOL(ab8500_config_pulldown);
+
+/*
+ * ab8500_gpio_config_select()
+ *
+ * Configure functionality of pin, either specific use or GPIO.
+ * @dev: device pointer
+ * @gpio: gpio number
+ * @gpio_select: true if the pin should be used as GPIO
+ */
+int ab8500_gpio_config_select(struct device *dev,
+ enum ab8500_pin gpio, bool gpio_select)
+{
+ u8 offset = gpio - AB8500_PIN_GPIO1;
+ u8 reg = AB8500_GPIO_SEL1_REG + (offset / 8);
+ u8 pos = offset % 8;
+ u8 val = gpio_select ? 1 : 0;
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(dev,
+ AB8500_MISC, reg, 1 << pos, val << pos);
+ if (ret < 0)
+ dev_err(dev, "%s write failed\n", __func__);
+
+ dev_vdbg(dev, "%s (bank, addr, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+ __func__, AB8500_MISC, reg, 1 << pos, val << pos);
+
+ return ret;
+}
+
+/*
+ * ab8500_gpio_config_get_select()
+ *
+ * Read currently configured functionality, either specific use or GPIO.
+ * @dev: device pointer
+ * @gpio: gpio number
+ * @gpio_select: pointer to pin selection status
+ */
+int ab8500_gpio_config_get_select(struct device *dev,
+ enum ab8500_pin gpio, bool *gpio_select)
+{
+ u8 offset = gpio - AB8500_PIN_GPIO1;
+ u8 reg = AB8500_GPIO_SEL1_REG + (offset / 8);
+ u8 pos = offset % 8;
+ u8 val;
+ int ret;
+
+ ret = abx500_get_register_interruptible(dev,
+ AB8500_MISC, reg, &val);
+ if (ret < 0) {
+ dev_err(dev, "%s read failed\n", __func__);
+ return ret;
+ }
+
+ if (val & (1 << pos))
+ *gpio_select = true;
+ else
+ *gpio_select = false;
+
+ dev_vdbg(dev, "%s (bank, addr, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+ __func__, AB8500_MISC, reg, 1 << pos, val);
+
+ return 0;
+}
+
static struct platform_driver ab8500_gpio_driver = {
.driver = {
.name = "ab8500-gpio",
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 1ebedfb6d46..7a4fd81a195 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -23,11 +23,11 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
+#include <linux/gpio/nomadik.h>
#include <asm/mach/irq.h>
#include <plat/pincfg.h>
-#include <plat/gpio-nomadik.h>
#include <mach/hardware.h>
#include <asm/gpio.h>
@@ -58,8 +58,11 @@ struct nmk_gpio_chip {
u32 real_wake;
u32 rwimsc;
u32 fwimsc;
+ u32 rimsc;
+ u32 fimsc;
u32 slpm;
u32 pull_up;
+ u32 lowemi;
};
static struct nmk_gpio_chip *
@@ -124,6 +127,24 @@ static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
}
}
+static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, bool lowemi)
+{
+ u32 bit = BIT(offset);
+ bool enabled = nmk_chip->lowemi & bit;
+
+ if (lowemi == enabled)
+ return;
+
+ if (lowemi)
+ nmk_chip->lowemi |= bit;
+ else
+ nmk_chip->lowemi &= ~bit;
+
+ writel_relaxed(nmk_chip->lowemi,
+ nmk_chip->addr + NMK_GPIO_LOWEMI);
+}
+
static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
unsigned offset)
{
@@ -150,8 +171,8 @@ static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
unsigned offset, int gpio_mode,
bool glitch)
{
- u32 rwimsc = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
- u32 fwimsc = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
+ u32 rwimsc = nmk_chip->rwimsc;
+ u32 fwimsc = nmk_chip->fwimsc;
if (glitch && nmk_chip->set_ioforce) {
u32 bit = BIT(offset);
@@ -173,6 +194,36 @@ static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
}
}
+static void
+nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset)
+{
+ u32 falling = nmk_chip->fimsc & BIT(offset);
+ u32 rising = nmk_chip->rimsc & BIT(offset);
+ int gpio = nmk_chip->chip.base + offset;
+ int irq = NOMADIK_GPIO_TO_IRQ(gpio);
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ if (!rising && !falling)
+ return;
+
+ if (!d || !irqd_irq_disabled(d))
+ return;
+
+ if (rising) {
+ nmk_chip->rimsc &= ~BIT(offset);
+ writel_relaxed(nmk_chip->rimsc,
+ nmk_chip->addr + NMK_GPIO_RIMSC);
+ }
+
+ if (falling) {
+ nmk_chip->fimsc &= ~BIT(offset);
+ writel_relaxed(nmk_chip->fimsc,
+ nmk_chip->addr + NMK_GPIO_FIMSC);
+ }
+
+ dev_dbg(nmk_chip->chip.dev, "%d: clearing interrupt mask\n", gpio);
+}
+
static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
pin_cfg_t cfg, bool sleep, unsigned int *slpmregs)
{
@@ -238,6 +289,17 @@ static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
__nmk_gpio_set_pull(nmk_chip, offset, pull);
}
+ __nmk_gpio_set_lowemi(nmk_chip, offset, PIN_LOWEMI(cfg));
+
+ /*
+ * If the pin is switching to altfunc, and there was an interrupt
+ * installed on it which has been lazy disabled, actually mask the
+ * interrupt to prevent spurious interrupts that would occur while the
+ * pin is under control of the peripheral. Only SKE does this.
+ */
+ if (af != NMK_GPIO_ALT_GPIO)
+ nmk_gpio_disable_lazy_irq(nmk_chip, offset);
+
/*
* If we've backed up the SLPM registers (glitch workaround), modify
* the backups since they will be restored.
@@ -359,7 +421,7 @@ static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep)
/**
* nmk_config_pin - configure a pin's mux attributes
* @cfg: pin confguration
- *
+ * @sleep: Non-zero to apply the sleep mode configuration
* Configures a pin's mode (alternate function or GPIO), its pull up status,
* and its sleep mode based on the specified configuration. The @cfg is
* usually one of the SoC specific macros defined in mach/<soc>-pins.h. These
@@ -556,27 +618,38 @@ static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
int gpio, enum nmk_gpio_irq_type which,
bool enable)
{
- u32 rimsc = which == WAKE ? NMK_GPIO_RWIMSC : NMK_GPIO_RIMSC;
- u32 fimsc = which == WAKE ? NMK_GPIO_FWIMSC : NMK_GPIO_FIMSC;
u32 bitmask = nmk_gpio_get_bitmask(gpio);
- u32 reg;
+ u32 *rimscval;
+ u32 *fimscval;
+ u32 rimscreg;
+ u32 fimscreg;
+
+ if (which == NORMAL) {
+ rimscreg = NMK_GPIO_RIMSC;
+ fimscreg = NMK_GPIO_FIMSC;
+ rimscval = &nmk_chip->rimsc;
+ fimscval = &nmk_chip->fimsc;
+ } else {
+ rimscreg = NMK_GPIO_RWIMSC;
+ fimscreg = NMK_GPIO_FWIMSC;
+ rimscval = &nmk_chip->rwimsc;
+ fimscval = &nmk_chip->fwimsc;
+ }
/* we must individually set/clear the two edges */
if (nmk_chip->edge_rising & bitmask) {
- reg = readl(nmk_chip->addr + rimsc);
if (enable)
- reg |= bitmask;
+ *rimscval |= bitmask;
else
- reg &= ~bitmask;
- writel(reg, nmk_chip->addr + rimsc);
+ *rimscval &= ~bitmask;
+ writel(*rimscval, nmk_chip->addr + rimscreg);
}
if (nmk_chip->edge_falling & bitmask) {
- reg = readl(nmk_chip->addr + fimsc);
if (enable)
- reg |= bitmask;
+ *fimscval |= bitmask;
else
- reg &= ~bitmask;
- writel(reg, nmk_chip->addr + fimsc);
+ *fimscval &= ~bitmask;
+ writel(*fimscval, nmk_chip->addr + fimscreg);
}
}
@@ -1008,9 +1081,6 @@ void nmk_gpio_wakeups_suspend(void)
clk_enable(chip->clk);
- chip->rwimsc = readl(chip->addr + NMK_GPIO_RWIMSC);
- chip->fwimsc = readl(chip->addr + NMK_GPIO_FWIMSC);
-
writel(chip->rwimsc & chip->real_wake,
chip->addr + NMK_GPIO_RWIMSC);
writel(chip->fwimsc & chip->real_wake,
@@ -1139,6 +1209,10 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
chip->dev = &dev->dev;
chip->owner = THIS_MODULE;
+ clk_enable(nmk_chip->clk);
+ nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
+ clk_disable(nmk_chip->clk);
+
ret = gpiochip_add(&nmk_chip->chip);
if (ret)
goto out_free;
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index cc9277885dd..b2ae09b4cc7 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1,2 @@
obj-y += drm/ vga/ stub/
+obj-y += mali/
diff --git a/drivers/gpu/mali/Kconfig b/drivers/gpu/mali/Kconfig
new file mode 100644
index 00000000000..0781bcbd854
--- /dev/null
+++ b/drivers/gpu/mali/Kconfig
@@ -0,0 +1,15 @@
+config GPU_MALI
+ tristate "ARM Mali 200/300/400 support"
+ depends on ARM
+ ---help---
+ This enables support for the ARM Mali 200/300/400 family of GPUs.
+ Platform specific configuration is made in configuration files in the
+ drivers/gpu/mali/src folder
+
+config GPU_MALI_DEBUG
+ boolean "Debug mode (required for instrumentation)"
+ default y
+ depends on GPU_MALI
+ ---help---
+ This enables debug prints in the mali device driver. Debug mode must be
+ enabled in order to use the intrumentation feature of the mali libraries.
diff --git a/drivers/gpu/mali/Makefile b/drivers/gpu/mali/Makefile
new file mode 100644
index 00000000000..93c95aca19a
--- /dev/null
+++ b/drivers/gpu/mali/Makefile
@@ -0,0 +1,10 @@
+MALI_SUBFOLDER := mali400ko/driver/src/devicedrv/mali
+MALIDRM_SUBFOLDER := mali400ko/x11/mali_drm/mali
+MALI_FOLDER := $(srctree)/$(src)/$(MALI_SUBFOLDER)
+ifeq ($(shell [ -d $(MALI_FOLDER) ] && echo "OK"), OK)
+obj-$(CONFIG_GPU_MALI) += $(MALI_SUBFOLDER)/
+obj-y += $(MALIDRM_SUBFOLDER)/
+else
+$(warning WARNING: mali: Could not find $(MALI_FOLDER) - mali device driver will not be built)
+obj-n += ./
+endif
diff --git a/drivers/gpu/mali/mali400ko/.gitignore b/drivers/gpu/mali/mali400ko/.gitignore
new file mode 100644
index 00000000000..e2d66d55882
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/.gitignore
@@ -0,0 +1,32 @@
+#symlinks created during build
+driver/src/devicedrv/mali/arch
+driver/bin
+driver/devdrv
+driver/lib
+
+#build output
+driver/build
+.tmp_versions
+*.cmd
+*.o
+*.d
+driver/src/devicedrv/linux/__mali_build_info.*
+driver/src/devicedrv/linux/modules.order
+driver/src/devicedrv/linux/Module.symvers
+driver/src/devicedrv/mali/Module.symvers
+driver/src/devicedrv/mali/__malidrv_build_info.c
+driver/src/devicedrv/mali/mali.ko
+driver/src/devicedrv/mali/mali.mod.c
+driver/src/devicedrv/mali/modules.order
+driver/out*
+driver/src/shared/essl_compiler/src/shadergen_maligp2/shader_pieces.*
+
+out
+kernel
+
+#temp files from editors and Eclipse project files
+*~
+.cproject
+.project
+.settings
+
diff --git a/drivers/gpu/mali/mali400ko/Makefile b/drivers/gpu/mali/mali400ko/Makefile
new file mode 100644
index 00000000000..9176f9f3795
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/Makefile
@@ -0,0 +1,102 @@
+#This Makefile can be called to do an out-of-kernel build of tha mali kernel module.
+#It is not used when mali is built from the kernel build system.
+
+CROSS_COMPILE ?= arm-eabi-
+
+ifeq ($(TARGET_PRODUCT),ste_u5500)
+CONFIG_UX500_SOC_DB5500=y
+export CONFIG_UX500_SOC_DB5500
+endif
+
+PWD:=$(shell pwd)
+
+#if KERNEL_BUILD_DIR parameter is not set, assume there is a symlink to the kernel
+KERNEL_BUILD_DIR?=$(PWD)/kernel
+
+# android
+ifneq (,$(findstring -android,$(PLATFORM)))
+INSTALL_MOD_PATH?=$(ANDROID_OUT_TARGET_PRODUCT_DIRECTORY)/system
+endif
+
+# lbp
+ifneq (,$(findstring -linux,$(PLATFORM)))
+PREFIX?=$(MMROOT)/linux/install/cortexA9-linux-gnu-href_v1
+INSTALL_MOD_PATH?=$(PREFIX)
+endif
+
+#if still no kernel module path is set, install the modules in a local folder
+ifeq (,$(INSTALL_MOD_PATH))
+INSTALL_MOD_PATH?=$(PWD)/out
+endif
+
+KO_FLAGS=KDIR=$(KERNEL_BUILD_DIR) CROSS_COMPILE=$(CROSS_COMPILE)
+
+.PHONY: all entry check
+
+all: mali-devicedrv install-mali
+
+check:
+ @if [ ! -d $(KERNEL_BUILD_DIR) ] ; then echo "Error: provide a path to your linux kernel through KERNEL_BUILD_DIR"; exit 1 ; fi
+
+## Build ###############
+
+mali-devicedrv: check
+ $(MAKE) -C driver/src/devicedrv/mali $(KO_FLAGS) all
+
+ump-devicedrv: check
+ $(MAKE) -C driver/src/devicedrv/ump $(KO_FLAGS) all
+
+mali_drm-devicedrv: check
+ $(MAKE) -C x11/mali_drm/mali $(KO_FLAGS) all
+
+## Clean ###############
+
+clean-mali:
+ $(MAKE) clean -C driver/src/devicedrv/mali $(KO_FLAGS)
+
+clean-ump:
+ $(MAKE) clean -C driver/src/devicedrv/ump $(KO_FLAGS)
+
+clean-mali_drm:
+ $(MAKE) clean -C x11/mali_drm/mali $(KO_FLAGS)
+
+clean: clean-mali
+
+realclean:
+ rm -f driver/src/devicedrv/mali/common/*.o
+ rm -f driver/src/devicedrv/mali/linux/*.o
+ rm -f driver/src/devicedrv/mali/common/.*.cmd
+ rm -f driver/src/devicedrv/mali/linux/.*.cmd
+ rm -f driver/src/devicedrv/mali/.*.cmd
+ rm -f driver/src/devicedrv/mali/*.c
+
+distclean: realclean
+
+## Install #############
+
+install: install-mali
+
+install-mali: mali-devicedrv
+ $(MAKE) -C $(KERNEL_BUILD_DIR) ARCH=arm CROSS_COMPILE=$(CROSS_COMPILE) KERNELDIR=$(KERNEL_BUILD_DIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) M=$(PWD)/driver/src/devicedrv/mali modules_install
+
+install-ump: ump-devicedrv
+ $(MAKE) -C $(KERNEL_BUILD_DIR) ARCH=arm CROSS_COMPILE=$(CROSS_COMPILE) KERNELDIR=$(KERNEL_BUILD_DIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) M=$(PWD)/driver/src/devicedrv/ump modules_install
+
+install-mali_drm: mali_drm-devicedrv
+ $(MAKE) -C $(KERNEL_BUILD_DIR) ARCH=arm CROSS_COMPILE=$(CROSS_COMPILE) KERNELDIR=$(KERNEL_BUILD_DIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) M=$(PWD)/x11/mali_drm/mali modules_install
+
+
+printenv:
+ @echo "\033[0;44m"
+ @echo "ANDROID_OUT_TARGET_PRODUCT_DIRECTORY: " $(ANDROID_OUT_TARGET_PRODUCT_DIRECTORY)
+ @echo "ANDROID_BSP_ROOT: " $(ANDROID_BSP_ROOT)
+ @echo "KERNEL_BUILD_DIR: " $(KERNEL_BUILD_DIR)
+ @echo "MM_INSTALL_DIR: " $(MM_INSTALL_DIR)
+ @echo "PLATFORM: " $(PLATFORM)
+ @echo "PREFIX: " $(PREFIX)
+ @echo "MMROOT: " $(MMROOT)
+ @echo "INSTALL_MOD_PATH: " $(INSTALL_MOD_PATH)
+ @echo "FOO: " $(FOO)
+ @echo "VARIANT: " $(VARIANT)
+ @echo "\033[0m"
+
diff --git a/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface.h b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface.h
new file mode 100644
index 00000000000..3bd928aefdb
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ *
+ * This file contains the kernel space part of the UMP API.
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_H__
+#define __UMP_KERNEL_INTERFACE_H__
+
+
+/** @defgroup ump_kernel_space_api UMP Kernel Space API
+ * @{ */
+
+
+#include "ump_kernel_platform.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+/**
+ * External representation of a UMP handle in kernel space.
+ */
+typedef void * ump_dd_handle;
+
+/**
+ * Typedef for a secure ID, a system wide identificator for UMP memory buffers.
+ */
+typedef unsigned int ump_secure_id;
+
+
+/**
+ * Value to indicate an invalid UMP memory handle.
+ */
+#define UMP_DD_HANDLE_INVALID ((ump_dd_handle)0)
+
+
+/**
+ * Value to indicate an invalid secure Id.
+ */
+#define UMP_INVALID_SECURE_ID ((ump_secure_id)-1)
+
+
+/**
+ * UMP error codes for kernel space.
+ */
+typedef enum
+{
+ UMP_DD_SUCCESS, /**< indicates success */
+ UMP_DD_INVALID, /**< indicates failure */
+} ump_dd_status_code;
+
+
+/**
+ * Struct used to describe a physical block used by UMP memory
+ */
+typedef struct ump_dd_physical_block
+{
+ unsigned long addr; /**< The physical address of the block */
+ unsigned long size; /**< The length of the block, typically page aligned */
+} ump_dd_physical_block;
+
+
+/**
+ * Retrieves the secure ID for the specified UMP memory.
+ *
+ * This identificator is unique across the entire system, and uniquely identifies
+ * the specified UMP memory. This identificator can later be used through the
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id" or
+ * @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ * functions in order to access this UMP memory, for instance from another process.
+ *
+ * @note There is a user space equivalent function called @ref ump_secure_id_get "ump_secure_id_get"
+ *
+ * @see ump_dd_handle_create_from_secure_id
+ * @see ump_handle_create_from_secure_id
+ * @see ump_secure_id_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the secure ID for the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves a handle to allocated UMP memory.
+ *
+ * The usage of UMP memory is reference counted, so this will increment the reference
+ * count by one for the specified UMP memory.
+ * Use @ref ump_dd_reference_release "ump_dd_reference_release" when there is no longer any
+ * use for the retrieved handle.
+ *
+ * @note There is a user space equivalent function called @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ *
+ * @see ump_dd_reference_release
+ * @see ump_handle_create_from_secure_id
+ *
+ * @param secure_id The secure ID of the UMP memory to open, that can be retrieved using the @ref ump_secure_id_get "ump_secure_id_get " function.
+ *
+ * @return UMP_INVALID_MEMORY_HANDLE indicates failure, otherwise a valid handle is returned.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id);
+
+
+/**
+ * Retrieves the number of physical blocks used by the specified UMP memory.
+ *
+ * This function retrieves the number of @ref ump_dd_physical_block "ump_dd_physical_block" structs needed
+ * to describe the physical memory layout of the given UMP memory. This can later be used when calling
+ * the functions @ref ump_dd_phys_blocks_get "ump_dd_phys_blocks_get" and
+ * @ref ump_dd_phys_block_get "ump_dd_phys_block_get".
+ *
+ * @see ump_dd_phys_blocks_get
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return The number of ump_dd_physical_block structs required to describe the physical memory layout of the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves all physical memory block information for specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will fail if the num_blocks parameter is either to large or to small.
+ *
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param blocks An array of @ref ump_dd_physical_block "ump_dd_physical_block" structs that will receive the physical description.
+ * @param num_blocks The number of blocks to return in the blocks array. Use the function
+ * @ref ump_dd_phys_block_count_get "ump_dd_phys_block_count_get" first to determine the number of blocks required.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle mem, ump_dd_physical_block * blocks, unsigned long num_blocks);
+
+
+/**
+ * Retrieves the physical memory block information for specified block for the specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will return UMP_DD_INVALID if the specified index is out of range.
+ *
+ * @see ump_dd_phys_blocks_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param index Which physical info block to retrieve.
+ * @param block Pointer to a @ref ump_dd_physical_block "ump_dd_physical_block" struct which will receive the requested information.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle mem, unsigned long index, ump_dd_physical_block * block);
+
+
+/**
+ * Retrieves the actual size of the specified UMP memory.
+ *
+ * The size is reported in bytes, and is typically page aligned.
+ *
+ * @note There is a user space equivalent function called @ref ump_size_get "ump_size_get"
+ *
+ * @see ump_size_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the allocated size of the specified UMP memory, in bytes.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle mem);
+
+
+/**
+ * Adds an extra reference to the specified UMP memory.
+ *
+ * This function adds an extra reference to the specified UMP memory. This function should
+ * be used every time a UMP memory handle is duplicated, that is, assigned to another ump_dd_handle
+ * variable. The function @ref ump_dd_reference_release "ump_dd_reference_release" must then be used
+ * to release each copy of the UMP memory handle.
+ *
+ * @note You are not required to call @ref ump_dd_reference_add "ump_dd_reference_add"
+ * for UMP handles returned from
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id",
+ * because these handles are already reference counted by this function.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_add "ump_reference_add"
+ *
+ * @see ump_reference_add
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle mem);
+
+
+/**
+ * Releases a reference from the specified UMP memory.
+ *
+ * This function should be called once for every reference to the UMP memory handle.
+ * When the last reference is released, all resources associated with this UMP memory
+ * handle are freed.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_release "ump_reference_release"
+ *
+ * @see ump_reference_release
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle mem);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_INTERFACE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface_ref_drv.h b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface_ref_drv.h
new file mode 100644
index 00000000000..70eea22d3c3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface_ref_drv.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_REF_DRV_H__
+#define __UMP_KERNEL_INTERFACE_REF_DRV_H__
+
+#include "ump_kernel_interface.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Turn specified physical memory into UMP memory. */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_INTERFACE_REF_DRV_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_platform.h b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_platform.h
new file mode 100644
index 00000000000..55c49885f06
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_platform.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_platform.h
+ *
+ * This file should define UMP_KERNEL_API_EXPORT,
+ * which dictates how the UMP kernel API should be exported/imported.
+ * Modify this file, if needed, to match your platform setup.
+ */
+
+#ifndef __UMP_KERNEL_PLATFORM_H__
+#define __UMP_KERNEL_PLATFORM_H__
+
+/** @addtogroup ump_kernel_space_api
+ * @{ */
+
+/**
+ * A define which controls how UMP kernel space API functions are imported and exported.
+ * This define should be set by the implementor of the UMP API.
+ */
+
+#if defined(_WIN32)
+
+#if defined(UMP_BUILDING_UMP_LIBRARY)
+#define UMP_KERNEL_API_EXPORT __declspec(dllexport)
+#else
+#define UMP_KERNEL_API_EXPORT __declspec(dllimport)
+#endif
+
+#else
+
+#define UMP_KERNEL_API_EXPORT
+
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_PLATFORM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile
new file mode 100644
index 00000000000..20b57359167
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile
@@ -0,0 +1,346 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+OSKOS=linux
+FILES_PREFIX=
+
+ifneq ($(KBUILD_EXTMOD),)
+DRIVER_DIR=$(KBUILD_EXTMOD)
+else
+src?=.
+srctree?=.
+DRIVER_DIR?=$(srctree)/$(src)
+M?=$(DRIVER_DIR)
+endif
+MALI_RELEASE_NAME=$(shell cat $(DRIVER_DIR)/.version 2> /dev/null)
+include $(DRIVER_DIR)/Makefile.platform
+include $(DRIVER_DIR)/Makefile.common
+
+# set up defaults if not defined by the user
+ARCH ?= arm
+USING_MMU ?= 1
+USING_UMP ?= 0
+USING_HWMEM ?= 0
+USING_OS_MEMORY ?= 0
+USING_PMM ?= 0
+USING_GPU_UTILIZATION ?= 0
+USING_MALI_RUN_TIME_PM ?= 0
+USING_MALI_PMM_TESTSUITE ?= 0
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 6
+USING_PROFILING ?= 0
+TIMESTAMP ?= default
+BUILD ?= debug
+TARGET_PLATFORM ?= default
+
+ifeq ($(USING_UMP),1)
+ifneq ($(USING_HWMEM),1)
+ UMP_SYMVERS_FILE = ../ump/Module.symvers
+ KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTMOD)/$(UMP_SYMVERS_FILE)
+endif
+endif
+
+# Check if a Mali Core sub module should be enabled, true or false returned
+submodule_enabled = $(shell gcc $(DEFINES) -E $1/arch/config.h | grep type | grep -c $(2))
+
+# linux build system integration
+ifneq ($(KERNELRELEASE),)
+# Inside the kernel build system
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Set up our defines, which will be passed to gcc
+DEFINES += -DUSING_OS_MEMORY=$(USING_OS_MEMORY)
+DEFINES += -DUSING_MMU=$(USING_MMU)
+DEFINES += -DUSING_UMP=$(USING_UMP)
+DEFINES += -DUSING_HWMEM=$(USING_HWMEM)
+DEFINES += -D_MALI_OSK_SPECIFIC_INDIRECT_MMAP
+DEFINES += -DMALI_TIMELINE_PROFILING_ENABLED=$(USING_PROFILING)
+DEFINES += -DMALI_POWER_MGMT_TEST_SUITE=$(USING_MALI_PMM_TESTSUITE)
+DEFINES += -DMALI_PMM_RUNTIME_JOB_CONTROL_ON=$(USING_MALI_RUN_TIME_PM)
+DEFINES += -DMALI_STATE_TRACKING=1
+DEFINES += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+
+ifneq ($(call submodule_enabled, $M, PMU),0)
+ MALI_PLATFORM_FILE = platform/mali400-pmu/mali_platform.c
+else
+ MALI_PLATFORM_FILE = platform/$(TARGET_PLATFORM)/mali_platform.c
+endif
+
+DEFINES += -DUSING_MALI_PMM=$(USING_PMM)
+DEFINES += -DMALI_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+
+ifeq ($(BUILD), debug)
+DEFINES += -DDEBUG
+endif
+DEFINES += -DSVN_REV=$(SVN_REV)
+DEFINES += -DSVN_REV_STRING=\"$(SVN_REV)\"
+
+# Linux has its own mmap cleanup handlers (see mali_kernel_mem_mmu.c)
+DEFINES += -DMALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP
+
+ifeq ($(USING_UMP),1)
+ DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER=1
+ EXTRA_CFLAGS += -I$(DRIVER_DIR)/../../../include/ump
+else
+ DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER=0
+endif
+
+# Use our defines when compiling
+EXTRA_CFLAGS += $(DEFINES) -I$(DRIVER_DIR) -I$(DRIVER_DIR)/common -I$(DRIVER_DIR)/linux -I$(DRIVER_DIR)/platform
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+ifeq ($(wildcard $(DRIVER_DIR)/linux/license/gpl/*),)
+EXTRA_CFLAGS += -I$(DRIVER_DIR)/linux/license/proprietary
+else
+EXTRA_CFLAGS += -I$(DRIVER_DIR)/linux/license/gpl
+endif
+EXTRA_CFLAGS += -I$(DRIVER_DIR)/common/pmm
+
+# Source files which always are included in a build
+SRC = \
+ common/mali_kernel_core.c \
+ linux/mali_kernel_linux.c \
+ $(OSKOS)/mali_osk_indir_mmap.c \
+ common/mali_kernel_rendercore.c \
+ common/mali_kernel_descriptor_mapping.c \
+ common/mali_kernel_vsync.c \
+ linux/mali_ukk_vsync.c \
+ $(MALI_PLATFORM_FILE) \
+ $(OSKFILES) \
+ $(UKKFILES)
+ #__malidrv_build_info.c
+
+ifeq ($(USING_PROFILING),1)
+SRC += \
+ common/mali_kernel_profiling.c \
+ timestamp-$(TIMESTAMP)/mali_timestamp.c
+EXTRA_CFLAGS += -I$(DRIVER_DIR)/timestamp-$(TIMESTAMP)
+endif
+
+# Selecting files to compile by parsing the config file
+
+ifeq ($(USING_PMM),1)
+SRC += \
+ common/pmm/mali_pmm.c \
+ common/pmm/mali_pmm_policy.c \
+ common/pmm/mali_pmm_policy_alwayson.c \
+ common/pmm/mali_pmm_policy_jobcontrol.c \
+ common/pmm/mali_pmm_state.c \
+ linux/mali_kernel_pm.c \
+ linux/mali_osk_pm.c \
+ linux/mali_device_pause_resume.c
+endif
+
+ifeq ($(USING_GPU_UTILIZATION),1)
+SRC += \
+ common/mali_kernel_utilization.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI400PP),0)
+ # Mali-400 PP in use
+ EXTRA_CFLAGS += -DUSING_MALI400
+ SRC += common/mali_kernel_MALI200.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI400GP),0)
+ # Mali-400 GP in use
+ SRC += common/mali_kernel_GP2.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI300PP),0)
+ # Mali-400 PP in use
+ EXTRA_CFLAGS += -DUSING_MALI400
+ SRC += common/mali_kernel_MALI200.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI300GP),0)
+ # Mali-400 GP in use
+ SRC += common/mali_kernel_GP2.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI200),0)
+ # Mali200 in use
+ EXTRA_CFLAGS += -DUSING_MALI200
+ SRC += common/mali_kernel_MALI200.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALIGP2),0)
+ # MaliGP2 in use
+ SRC += common/mali_kernel_GP2.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MMU),0)
+ # Mali MMU in use
+ SRC += common/mali_kernel_mem_mmu.c common/mali_kernel_memory_engine.c common/mali_block_allocator.c common/mali_kernel_mem_os.c
+else
+ # No Mali MMU in use
+ SRC += common/mali_kernel_mem_buddy.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI400L2)$(),0)
+ # Mali Level2 cache in use
+ EXTRA_CFLAGS += -DUSING_MALI400_L2_CACHE
+ SRC += common/mali_kernel_l2_cache.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI300L2)$(),0)
+ # Mali Level2 cache in use
+ EXTRA_CFLAGS += -DUSING_MALI400_L2_CACHE
+ SRC += common/mali_kernel_l2_cache.c
+endif
+
+ifeq ($(USING_HWMEM),1)
+ # HWMEM used as backend for UMP api
+ EXTRA_CFLAGS += -I$(DRIVER_DIR)/../ump/common
+ SRC += platform/ux500/ump_kernel_api_hwmem.c
+endif
+
+# Target build file
+MODULE:=mali.ko
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_GPU_MALI) := $(MODULE:.ko=.o)
+# Tell the Linux build system to enable building of our .c files
+$(MODULE:.ko=-y) := $(SRC:.c=.o)
+
+else
+# Outside the kernel build system
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+ifeq ($(ARCH), arm)
+ # when compiling for ARM we're cross compiling
+ export CROSS_COMPILE ?= arm-none-linux-gnueabi-
+ # default to Virtex5
+ CONFIG ?= pb-virtex5-m200
+else
+ # Compiling for the host
+ CONFIG ?= $(shell uname -m)
+endif
+
+# default cpu to select
+CPU ?= pb11mp
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+# validate lookup result
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+# report detected/selected settings
+ifdef ARM_INTERNAL_BUILD
+$(warning Config $(CONFIG))
+$(warning Host CPU $(CPU))
+$(warning MMU $(USING_MMU))
+$(warning OS_MEMORY $(USING_OS_MEMORY))
+endif
+
+# Validate selected config
+ifneq ($(shell [ -d $(DRIVER_DIR)/arch-$(CONFIG) ] && [ -f $(DRIVER_DIR)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(warning Current directory is $(shell pwd))
+$(error No configuration found for config $(CONFIG). Check that $(DRIVER_DIR)/arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L $(DRIVER_DIR)/arch ] && rm $(DRIVER_DIR)/arch)
+$(shell ln -sf $(DRIVER_DIR)/arch-$(CONFIG) $(DRIVER_DIR)/arch)
+$(shell touch $(DRIVER_DIR)/arch/config.h)
+
+# Register number of Mali400 cores for routing test jobs to the right Mali-400
+ifeq ($CONFIG, pb-virtex5-m400-1)
+VERSION_STRINGS += USING_MALI400_PP_CORES=1
+endif
+ifeq ($CONFIG, pb-virtex5-m400-2)
+VERSION_STRINGS += USING_MALI400_PP_CORES=2
+endif
+ifeq ($CONFIG, pb-virtex5-m400-3)
+VERSION_STRINGS += USING_MALI400_PP_CORES=3
+endif
+ifeq ($CONFIG, pb-virtex5-m400-4)
+VERSION_STRINGS += USING_MALI400_PP_CORES=4
+endif
+
+endif
+
+# Filename to use when patching. Original will be saved as this
+PATCH_BACKUP_FILE:=config_makefile_patch_backup.h
+
+ifdef ARM_INTERNAL_BUILD
+$(warning Looking for previous backup)
+endif
+# Look for previous backup
+shell_output:=$(shell [ -f arch/$(PATCH_BACKUP_FILE) ] && mv -vf arch/$(PATCH_BACKUP_FILE) arch/config.h && echo "Patch backup restored")
+ifneq ($(shell_output),)
+ifdef ARM_INTERNAL_BUILD
+$(warning $(shell_output))
+endif
+endif
+
+ifdef PATCH
+ifdef ARM_INTERNAL_BUILD
+$(warning Patching using: $(PATCH) )
+endif
+ifneq ($(shell [ -f arch/$(PATCH).diff ] && echo "OK"), OK)
+# The patch file does not exist
+shell_output:=$(shell echo "Possible PATCH arguments are:" ; find arch/ -name "*.diff" |sed -r 's/.*\/(.*)\.diff/\1/')
+ifdef ARM_INTERNAL_BUILD
+$(warning $(shell_output))
+endif
+$(error Could not find file arch-$(CONFIG)/$(PATCH).diff )
+else
+# Patch file found, do patching
+shell_output:=$(shell cp -f arch/config.h arch/$(PATCH_BACKUP_FILE) && patch --no-backup-if-mismatch -st -p0 arch/config.h -i arch/$(PATCH).diff && echo "OK")
+ifneq ($(shell_output), OK)
+$(warning Output from failed patch: $(shell_output))
+$(shell mv -f arch/$(PATCH_BACKUP_FILE) arch/config.h)
+$(error Could not patch file arch-$(CONFIG)/config.h with arch-$(CONFIG)/$(PATCH).diff.)
+endif
+endif
+endif
+
+# Extend common version-string
+VERSION_STRINGS += BUILD=$(shell echo $(BUILD) | tr a-z A-Z)
+VERSION_STRINGS += CPU=$(CPU)
+VERSION_STRINGS += USING_MMU=$(USING_MMU)
+VERSION_STRINGS += USING_UMP=$(USING_UMP)
+VERSION_STRINGS += USING_HWMEM=$(USING_HWMEM)
+VERSION_STRINGS += USING_PMM=$(USING_PMM)
+VERSION_STRINGS += USING_MALI200=$(call submodule_enabled, ., MALI200)
+VERSION_STRINGS += USING_MALI400=$(call submodule_enabled, ., MALI400)
+VERSION_STRINGS += USING_MALI400_L2_CACHE=$(call submodule_enabled, ., MALI400L2)
+VERSION_STRINGS += USING_GP2=$(call submodule_enabled, ., MALIGP2)
+VERSION_STRINGS += KDIR=$(KDIR)
+
+all: make-build-info-file $(UMP_SYMVERS_FILE)
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) CONFIG_GPU_MALI=m modules
+ @([ "$(PATCH)" != "" ] && [ -f arch/$(PATCH_BACKUP_FILE) ] && mv -f arch/$(PATCH_BACKUP_FILE) arch/config.h && echo "Patch backup restored") || echo No Unpatching needed
+ @rm -f $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+ @([ "$(PATCH)" != "" ] && [ -f arch/$(PATCH_BACKUP_FILE) ] && mv -f arch/$(PATCH_BACKUP_FILE) arch/config.h && echo "Patch backup restored") || echo No Unpatching needed
+
+make-build-info-file:
+ @echo 'char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(FILES_PREFIX)__malidrv_build_info.c
+
+kernelrelease:
+ $(MAKE) -C $(KDIR) kernelrelease
+
+# end of outside kernel build system block
+endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.common b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.common
new file mode 100644
index 00000000000..ebe3b9e3399
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.common
@@ -0,0 +1,56 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+OSKFILES=\
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_atomics.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_irq.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_locks.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_low_level_mem.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_math.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_memory.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_misc.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_mali.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_notification.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_time.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_timers.c
+
+UKKFILES=\
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_mem.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_gp.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_pp.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_core.c
+
+ifeq ($(USING_PROFILING),1)
+UKKFILES+=\
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_profiling.c
+endif
+
+ifeq ($(MALI_PLATFORM_FILE),)
+MALI_PLATFORM_FILE=platform/default/mali_platform.c
+endif
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+SVN_REV := $(shell (cd $(DRIVER_DIR); (svnversion | grep -qv exported && svnversion) || git svn info | grep '^Revision: '| sed -e 's/^Revision: //' ) 2>/dev/null )
+ifeq ($(SVN_REV),)
+SVN_REV := $(MALI_RELEASE_NAME)
+else
+SVN_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+endif
+
+# Common version-string, will be extended by OS-specifc sections
+VERSION_STRINGS =
+VERSION_STRINGS += CONFIG=$(CONFIG)
+VERSION_STRINGS += USING_OS_MEMORY=$(USING_OS_MEMORY)
+VERSION_STRINGS += API_VERSION=$(shell cd $(DRIVER_DIR); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)common\/mali_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'URL: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += REVISION=$(SVN_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'Last Changed Rev: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += CHANGE_DATE=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'Last Changed Date: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += BUILD_DATE=$(shell date)
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.platform b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.platform
new file mode 100644
index 00000000000..1f8068e25de
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.platform
@@ -0,0 +1,45 @@
+#these are the build options for the ST-Ericsson Ux500 platforms.
+
+ifeq ($(CONFIG_UX500_SOC_DB5500),y)
+TARGET_PLATFORM = default
+USING_GPU_UTILIZATION = 0
+DEFINES += -DSOC_DB5500=1
+endif
+
+CONFIG = ux500
+CPU = $(CONFIG)
+TARGET_PLATFORM ?= $(CONFIG)
+ARCH ?= arm
+USING_MMU ?= 1
+USING_PMM ?= 1
+USING_UMP ?= 1
+USING_HWMEM ?= 1
+USING_OS_MEMORY ?= 1
+USING_GPU_UTILIZATION ?= 1
+
+ifeq ($(CONFIG_GPU_MALI_DEBUG),y)
+BUILD ?= debug
+else
+BUILD ?= release
+endif
+
+KDIR-$(CPU)=$(srctree)
+
+#these are paths relative to the mali400ko/driver/src/devicedrv/mali folder
+#not to be confused with the drivers/gpu/mali symlink in the kernel tree
+EXTRA_CFLAGS += -I$(realpath $(DRIVER_DIR)/../../../include/ump)
+EXTRA_CFLAGS += -I$(realpath $(DRIVER_DIR)/../ump/common)
+
+#The following is duplicated from the main Makefile to ensure that the 'arch'
+#link is created even during an in-kernel build.
+
+# Validate selected config
+ifneq ($(shell [ -d $(DRIVER_DIR)/arch-$(CONFIG) ] && [ -f $(DRIVER_DIR)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(warning Current directory is $(shell pwd))
+$(error No configuration found for config $(CONFIG). Check that $(DRIVER_DIR)/arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L $(DRIVER_DIR)/arch ] && rm $(DRIVER_DIR)/arch)
+$(shell ln -sf $(DRIVER_DIR)/arch-$(CONFIG) $(DRIVER_DIR)/arch)
+$(shell touch $(DRIVER_DIR)/arch/config.h)
+endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m300/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m300/config.h
new file mode 100644
index 00000000000..5011c97a28c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m300/config.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = PMU,
+ .description = "Mali-300 PMU",
+ .base = 0xC0002000,
+ .irq = -1,
+ .mmu_id = 0
+
+ },
+ {
+ .type = MALI300GP,
+ .description = "Mali-300 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI300PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-300 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-300 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-300 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI300L2,
+ .base = 0xC0001000,
+ .description = "Mali-300 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-direct/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-direct/config.h
new file mode 100644
index 00000000000..ee53f49bfe0
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-direct/config.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+
+ {
+ .type = PMU,
+ .description = "Mali-400 PMU",
+ .base = 0xC0002000,
+ .irq = -1,
+ .mmu_id = 0
+ },
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = OS_MEMORY,
+ .description = "OS Memory",
+ .alloc_order = 10, /* Lowest preference for this memory */
+ .size = 96 * 1024 * 1024, /* 96 MB */
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = 0,
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0x80000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-pmu/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-pmu/config.h
new file mode 100644
index 00000000000..5c7c7f3ef13
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-pmu/config.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = PMU,
+ .description = "Mali-400 PMU",
+ .base = 0xC0002000,
+ .irq = -1,
+ .mmu_id = 0
+
+ },
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1/config.h
new file mode 100644
index 00000000000..9f675fd1d23
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1/config.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-2/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-2/config.h
new file mode 100644
index 00000000000..cc1f05cb5f4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-2/config.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000A000,
+ .irq = -1,
+ .description = "Mali-400 PP 1",
+ .mmu_id = 3
+},
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MMU,
+ .base = 0xC0005000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP 1",
+ .mmu_id = 3
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-3/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-3/config.h
new file mode 100644
index 00000000000..a672e7d22ef
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-3/config.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000A000,
+ .irq = -1,
+ .description = "Mali-400 PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000C000,
+ .irq = -1,
+ .description = "Mali-400 PP 2",
+ .mmu_id = 4
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = 102,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MMU,
+ .base = 0xC0005000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MMU,
+ .base = 0xC0006000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 2",
+ .mmu_id = 4
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-4/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-4/config.h
new file mode 100644
index 00000000000..ae9100c1ae4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-4/config.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the EB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000A000,
+ .irq = -1,
+ .description = "Mali-400 PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000C000,
+ .irq = -1,
+ .description = "Mali-400 PP 2",
+ .mmu_id = 4
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000E000,
+ .irq = -1,
+ .description = "Mali-400 PP 3",
+ .mmu_id = 5
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = 102,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MMU,
+ .base = 0xC0005000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MMU,
+ .base = 0xC0006000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 2",
+ .mmu_id = 4
+ },
+ {
+ .type = MMU,
+ .base = 0xC0007000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 3",
+ .mmu_id = 5
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-ux500/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-ux500/config.h
new file mode 100644
index 00000000000..dc324cf0d39
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-ux500/config.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Architecture configuration for ST-Ericsson Ux500 platforms
+ *
+ * Author: Magnus Wendt <magnus.wendt@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#ifndef __ARCH_UX500_CONFIG_H__
+#define __ARCH_UX500_CONFIG_H__
+
+/* #include <mach/hardware.h> */
+
+#define U5500_SGA_BASE 0x801D0000
+#define U8500_SGA_BASE 0xA0300000
+
+#if defined(SOC_DB5500) && (1 == SOC_DB5500)
+#define UX500_SGA_BASE U5500_SGA_BASE
+#else
+#define UX500_SGA_BASE U8500_SGA_BASE
+#endif
+
+#define MEGABYTE (1024*1024)
+#define MALI_MEM_BASE (128 * MEGABYTE)
+#define MALI_MEM_SIZE ( 32 * MEGABYTE)
+#define OS_MEM_SIZE (128 * MEGABYTE)
+
+/* Hardware revision u8500 v1: GX570-BU-00000-r0p1
+ * Hardware revision u8500 v2: GX570-BU-00000-r1p0
+ * Hardware revision u5500: GX570-BU-00000-r1p0
+ * configuration registers: 0xA0300000-0xA031FFFFF (stw8500v1_usermanual p269)
+ *
+ * Shared Peripheral Interrupt assignments: (stw8500v1_usermanual p265-266)
+ * Nb | Interrupt Source
+ * 116 | Mali400 combined
+ * 115 | Mali400 geometry processor
+ * 114 | Mali400 geometry processor MMU
+ * 113 | Mali400 pixel processor
+ * 112 | Mali400 pixel processor MMU
+ *
+ * irq offset: 32
+ */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = UX500_SGA_BASE + 0x0000,
+ .irq = 115+32,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = UX500_SGA_BASE + 0x8000,
+ .irq = 113+32,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = UX500_SGA_BASE + 0x3000,
+ .irq = 114+32,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = UX500_SGA_BASE + 0x4000,
+ .irq = 112+32,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM",
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = MALI_MEM_BASE,
+ .size = MALI_MEM_SIZE,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+#if USING_OS_MEMORY
+ {
+ .type = OS_MEMORY,
+ .description = "Linux kernel memory",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .size = OS_MEM_SIZE,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_MMU_READABLE | _MALI_MMU_WRITEABLE
+ },
+#endif
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0x00000000, /* Validate all memory for now */
+ .size = 2047 * MEGABYTE, /* "2GB ought to be enough for anyone" */
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = UX500_SGA_BASE + 0x1000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_UX500_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c
new file mode 100644
index 00000000000..fc80a20c813
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_osk.h"
+
+#define MALI_BLOCK_SIZE (256UL * 1024UL) /* 256 kB, remember to keep the ()s */
+
+typedef struct block_info
+{
+ struct block_info * next;
+} block_info;
+
+/* The structure used as the handle produced by block_allocator_allocate,
+ * and removed by block_allocator_release */
+typedef struct block_allocator_allocation
+{
+ /* The list will be released in reverse order */
+ block_info *last_allocated;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 start_offset;
+ u32 mapping_length;
+} block_allocator_allocation;
+
+
+typedef struct block_allocator
+{
+ _mali_osk_lock_t *mutex;
+ block_info * all_blocks;
+ block_info * first_free;
+ u32 base;
+ u32 cpu_usage_adjust;
+ u32 num_blocks;
+} block_allocator;
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block);
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static void block_allocator_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block );
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ block_allocator * info;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+ MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+ MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+ num_blocks = usable_size / MALI_BLOCK_SIZE;
+ MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+ if (usable_size == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+ return NULL;
+ }
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(block_allocator));
+ if (NULL != info)
+ {
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED, 0, 105);
+ if (NULL != info->mutex)
+ {
+ info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks);
+ if (NULL != info->all_blocks)
+ {
+ u32 i;
+ info->first_free = NULL;
+ info->num_blocks = num_blocks;
+
+ info->base = base_address;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ for ( i = 0; i < num_blocks; i++)
+ {
+ info->all_blocks[i].next = info->first_free;
+ info->first_free = &info->all_blocks[i];
+ }
+
+ allocator->allocate = block_allocator_allocate;
+ allocator->allocate_page_table_block = block_allocator_allocate_page_table_block;
+ allocator->destroy = block_allocator_destroy;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_lock_term(info->mutex);
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ block_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (block_allocator*)allocator->ctx;
+
+ _mali_osk_free(info->all_blocks);
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block)
+{
+ return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE);
+}
+
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ block_allocator * info;
+ u32 left;
+ block_info * last_allocated = NULL;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ block_allocator_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (block_allocator*)ctx;
+ left = descriptor->size - *offset;
+ MALI_DEBUG_ASSERT(0 != left);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ /* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return result;
+ }
+
+ ret_allocation->start_offset = *offset;
+ ret_allocation->mapping_length = 0;
+
+ while ((left > 0) && (info->first_free))
+ {
+ block_info * block;
+ u32 phys_addr;
+ u32 padding;
+ u32 current_mapping_size;
+
+ block = info->first_free;
+ info->first_free = info->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+
+ phys_addr = get_phys(info, block);
+
+ padding = *offset & (MALI_BLOCK_SIZE-1);
+
+ if (MALI_BLOCK_SIZE - padding < left)
+ {
+ current_mapping_size = MALI_BLOCK_SIZE - padding;
+ }
+ else
+ {
+ current_mapping_size = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ /* release all memory back to the pool */
+ while (last_allocated)
+ {
+ /* This relinks every block we've just allocated back into the free-list */
+ block = last_allocated->next;
+ last_allocated->next = info->first_free;
+ info->first_free = last_allocated;
+ last_allocated = block;
+ }
+
+ break;
+ }
+
+ *offset += current_mapping_size;
+ left -= current_mapping_size;
+ ret_allocation->mapping_length += current_mapping_size;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ if (last_allocated)
+ {
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Record all the information about this allocation */
+ ret_allocation->last_allocated = last_allocated;
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+
+ alloc_info->ctx = info;
+ alloc_info->handle = ret_allocation;
+ alloc_info->release = block_allocator_release;
+ }
+ else
+ {
+ /* Free the allocation information - nothing to be passed back */
+ _mali_osk_free( ret_allocation );
+ }
+
+ return result;
+}
+
+static void block_allocator_release(void * ctx, void * handle)
+{
+ block_allocator * info;
+ block_info * block, * next;
+ block_allocator_allocation *allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (block_allocator*)ctx;
+ allocation = (block_allocator_allocation*)handle;
+ block = allocation->last_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* unmap */
+ mali_allocation_engine_unmap_physical(allocation->engine, allocation->descriptor, allocation->start_offset, allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ while (block)
+ {
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ next = block->next;
+
+ /* relink into free-list */
+ block->next = info->first_free;
+ info->first_free = block;
+
+ /* advance the loop */
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free( allocation );}
+
+
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ block_allocator * info;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(block);
+ info = (block_allocator*)ctx;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ if (NULL != info->first_free)
+ {
+ void * virt;
+ u32 phys;
+ u32 size;
+ block_info * alloc;
+ alloc = info->first_free;
+
+ phys = get_phys(info, alloc); /* Does not modify info or alloc */
+ size = MALI_BLOCK_SIZE; /* Must be multiple of MALI_MMU_PAGE_SIZE */
+ virt = _mali_osk_mem_mapioregion( phys, size, "Mali block allocator page tables" );
+
+ /* Failure of _mali_osk_mem_mapioregion will result in MALI_MEM_ALLOC_INTERNAL_FAILURE,
+ * because it's unlikely another allocator will be able to map in. */
+
+ if ( NULL != virt )
+ {
+ block->ctx = info; /* same as incoming ctx */
+ block->handle = alloc;
+ block->phys_base = phys;
+ block->size = size;
+ block->release = block_allocator_release_page_table_block;
+ block->mapping = virt;
+
+ info->first_free = alloc->next;
+
+ alloc->next = NULL; /* Could potentially link many blocks together instead */
+
+ result = MALI_MEM_ALLOC_FINISHED;
+ }
+ }
+ else result = MALI_MEM_ALLOC_NONE;
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block )
+{
+ block_allocator * info;
+ block_info * block, * next;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (block_allocator*)page_table_block->ctx;
+ block = (block_info*)page_table_block->handle;
+
+ MALI_DEBUG_ASSERT_POINTER(info);
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* Unmap all the physical memory at once */
+ _mali_osk_mem_unmapioregion( page_table_block->phys_base, page_table_block->size, page_table_block->mapping );
+
+ /** @note This loop handles the case where more than one block_info was linked.
+ * Probably unnecssary for page table block releasing. */
+ while (block)
+ {
+ next = block->next;
+
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ block->next = info->first_free;
+ info->first_free = block;
+
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.h
new file mode 100644
index 00000000000..c3b5761e23a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_kernel_memory_engine.h"
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_GP2.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_GP2.c
new file mode 100644
index 00000000000..e91abe120cb
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_GP2.c
@@ -0,0 +1,1418 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_subsystem.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_rendercore.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+#if defined(USING_MALI400_L2_CACHE)
+#include "mali_kernel_l2_cache.h"
+#endif
+#if USING_MMU
+#include "mali_kernel_mem_mmu.h" /* Needed for mali_kernel_mmu_force_bus_reset() */
+#endif
+
+#if defined(USING_MALI200)
+#define MALI_GP_SUBSYSTEM_NAME "MaliGP2"
+#define MALI_GP_CORE_TYPE _MALI_GP2
+#elif defined(USING_MALI400)
+#define MALI_GP_SUBSYSTEM_NAME "Mali-400 GP"
+#define MALI_GP_CORE_TYPE _MALI_400_GP
+#else
+#error "No supported mali core defined"
+#endif
+
+#define GET_JOB_EMBEDDED_PTR(job) (&((job)->embedded_core_job))
+#define GET_JOBGP2_PTR(job_extern) _MALI_OSK_CONTAINER_OF(job_extern, maligp_job, embedded_core_job)
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_gp_id = -1;
+
+static mali_core_renderunit * last_gp_core_cookie = NULL;
+
+/* Describing a maligp job settings */
+typedef struct maligp_job
+{
+ /* The general job struct common for all mali cores */
+ mali_core_job embedded_core_job;
+ _mali_uk_gp_start_job_s user_input;
+
+ u32 irq_status;
+ u32 status_reg_on_stop;
+ u32 perf_counter0;
+ u32 perf_counter1;
+ u32 vscl_stop_addr;
+ u32 plbcl_stop_addr;
+ u32 heap_current_addr;
+
+ /* The data we will return back to the user */
+ _mali_osk_notification_t *notification_obj;
+
+ int is_stalled_waiting_for_more_memory;
+
+ u32 active_mask;
+ /* progress checking */
+ u32 last_vscl;
+ u32 last_plbcl;
+ /* extended progress checking, only enabled when we can use one of the performance counters */
+ u32 have_extended_progress_checking;
+ u32 vertices;
+
+#if defined(USING_MALI400_L2_CACHE)
+ u32 perf_counter_l2_src0;
+ u32 perf_counter_l2_src1;
+ u32 perf_counter_l2_val0;
+ u32 perf_counter_l2_val1;
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ u32 pid;
+ u32 tid;
+#endif
+} maligp_job;
+
+/*Functions Exposed to the General External System through
+ function pointers.*/
+
+static _mali_osk_errcode_t maligp_subsystem_startup(mali_kernel_subsystem_identifier id);
+#if USING_MMU
+static _mali_osk_errcode_t maligp_subsystem_mmu_connect(mali_kernel_subsystem_identifier id);
+#endif
+static void maligp_subsystem_terminate(mali_kernel_subsystem_identifier id);
+static _mali_osk_errcode_t maligp_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+static void maligp_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+static _mali_osk_errcode_t maligp_subsystem_core_system_info_fill(_mali_system_info* info);
+static _mali_osk_errcode_t maligp_renderunit_create(_mali_osk_resource_t * resource);
+#if USING_MMU
+static void maligp_subsystem_broadcast_notification(mali_core_notification_message message, u32 data);
+#endif
+#if MALI_STATE_TRACKING
+void maligp_subsystem_dump_state(void);
+#endif
+
+/* Internal support functions */
+static _mali_osk_errcode_t maligp_core_version_legal( mali_core_renderunit *core );
+static void maligp_raw_reset( mali_core_renderunit *core);
+static void maligp_reset_hard(struct mali_core_renderunit * core);
+static void maligp_reset(mali_core_renderunit *core);
+static void maligp_initialize_registers_mgmt(mali_core_renderunit *core );
+
+#ifdef DEBUG
+static void maligp_print_regs(int debug_level, mali_core_renderunit *core);
+#endif
+
+/* Functions exposed to mali_core system through functionpointers
+ in the subsystem struct. */
+static _mali_osk_errcode_t subsystem_maligp_start_job(mali_core_job * job, mali_core_renderunit * core);
+static u32 subsystem_maligp_irq_handler_upper_half(mali_core_renderunit * core);
+static int subsystem_maligp_irq_handler_bottom_half(mali_core_renderunit* core);
+static _mali_osk_errcode_t subsystem_maligp_get_new_job_from_user(struct mali_core_session * session, void * argument);
+static _mali_osk_errcode_t subsystem_maligp_suspend_response(struct mali_core_session * session, void * argument);
+static void subsystem_maligp_return_job_to_user(mali_core_job * job, mali_subsystem_job_end_code end_status);
+static void subsystem_maligp_renderunit_delete(mali_core_renderunit * core);
+static void subsystem_maligp_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style );
+static void subsystem_maligp_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core);
+static _mali_osk_errcode_t subsystem_maligp_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core);
+static void subsystem_maligp_renderunit_stop_bus(struct mali_core_renderunit* core);
+
+/* Variables */
+static register_address_and_value default_mgmt_regs[] =
+{
+ { MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED }
+};
+
+
+/* This will be one of the subsystems in the array of subsystems:
+ static struct mali_kernel_subsystem * subsystems[];
+ found in file: mali_kernel_core.c
+*/
+
+struct mali_kernel_subsystem mali_subsystem_gp2=
+{
+ maligp_subsystem_startup, /* startup */
+ maligp_subsystem_terminate, /* shutdown */
+#if USING_MMU
+ maligp_subsystem_mmu_connect, /* load_complete */
+#else
+ NULL,
+#endif
+ maligp_subsystem_core_system_info_fill, /* system_info_fill */
+ maligp_subsystem_session_begin, /* session_begin */
+ maligp_subsystem_session_end, /* session_end */
+#if USING_MMU
+ maligp_subsystem_broadcast_notification, /* broadcast_notification */
+#else
+ NULL,
+#endif
+#if MALI_STATE_TRACKING
+ maligp_subsystem_dump_state, /* dump_state */
+#endif
+} ;
+
+static mali_core_subsystem subsystem_maligp ;
+
+static _mali_osk_errcode_t maligp_subsystem_startup(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem * subsystem;
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_startup\n") ) ;
+
+ mali_subsystem_gp_id = id;
+
+ /* All values get 0 as default */
+ _mali_osk_memset(&subsystem_maligp, 0, sizeof(*subsystem));
+
+ subsystem = &subsystem_maligp;
+ subsystem->start_job = &subsystem_maligp_start_job;
+ subsystem->irq_handler_upper_half = &subsystem_maligp_irq_handler_upper_half;
+ subsystem->irq_handler_bottom_half = &subsystem_maligp_irq_handler_bottom_half;
+ subsystem->get_new_job_from_user = &subsystem_maligp_get_new_job_from_user;
+ subsystem->suspend_response = &subsystem_maligp_suspend_response;
+ subsystem->return_job_to_user = &subsystem_maligp_return_job_to_user;
+ subsystem->renderunit_delete = &subsystem_maligp_renderunit_delete;
+ subsystem->reset_core = &subsystem_maligp_renderunit_reset_core;
+ subsystem->stop_bus = &subsystem_maligp_renderunit_stop_bus;
+ subsystem->probe_core_irq_trigger = &subsystem_maligp_renderunit_probe_core_irq_trigger;
+ subsystem->probe_core_irq_acknowledge = &subsystem_maligp_renderunit_probe_core_irq_finished;
+
+ /* Setting variables in the general core part of the subsystem.*/
+ subsystem->name = MALI_GP_SUBSYSTEM_NAME;
+ subsystem->core_type = MALI_GP_CORE_TYPE;
+ subsystem->id = id;
+
+ /* Initiates the rest of the general core part of the subsystem */
+ MALI_CHECK_NO_ERROR(mali_core_subsystem_init( subsystem ));
+
+ /* This will register the function for adding MALIGP2 cores to the subsystem */
+#if defined(USING_MALI200)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALIGP2, maligp_renderunit_create));
+#endif
+#if defined(USING_MALI400)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALI400GP, maligp_renderunit_create));
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_subsystem_startup\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+#if USING_MMU
+static _mali_osk_errcode_t maligp_subsystem_mmu_connect(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem_attach_mmu(&subsystem_maligp);
+ MALI_SUCCESS; /* OK */
+}
+#endif
+
+static void maligp_subsystem_terminate(mali_kernel_subsystem_identifier id)
+{
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_terminate\n") ) ;
+ mali_core_subsystem_cleanup(&subsystem_maligp);
+}
+
+static _mali_osk_errcode_t maligp_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ mali_core_session * session;
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_session_begin\n") ) ;
+ MALI_CHECK_NON_NULL(session = _mali_osk_malloc( sizeof(*session) ), _MALI_OSK_ERR_FAULT);
+
+ _mali_osk_memset(session, 0, sizeof(*session) );
+ *slot = (mali_kernel_subsystem_session_slot)session;
+
+ session->subsystem = &subsystem_maligp;
+
+ session->notification_queue = queue;
+
+#if USING_MMU
+ session->mmu_session = mali_session_data;
+#endif
+
+ mali_core_session_begin(session);
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_subsystem_session_begin\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+static void maligp_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ mali_core_session * session;
+ /** @note mali_session_data not needed here */
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_session_end\n") ) ;
+ if ( NULL==slot || NULL==*slot)
+ {
+ MALI_PRINT_ERROR(("Input slot==NULL"));
+ return;
+ }
+ session = (mali_core_session *)*slot;
+ mali_core_session_close(session);
+
+ _mali_osk_free(session);
+ *slot = NULL;
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_subsystem_session_end\n") ) ;
+}
+
+/**
+ * We fill in info about all the cores we have
+ * @param info Pointer to system info struct to update
+ * @return _MALI_OSK_ERR_OK on success, or another _mali_osk_errcode_t for errors.
+ */
+static _mali_osk_errcode_t maligp_subsystem_core_system_info_fill(_mali_system_info* info)
+{
+ return mali_core_subsystem_system_info_fill(&subsystem_maligp, info);
+}
+
+static _mali_osk_errcode_t maligp_renderunit_create(_mali_osk_resource_t * resource)
+{
+ mali_core_renderunit *core;
+ int err;
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_renderunit_create\n") ) ;
+ /* Checking that the resource settings are correct */
+#if defined(USING_MALI200)
+ if(MALIGP2 != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_GP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#elif defined(USING_MALI400)
+ if(MALI400GP != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_GP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+ if ( 0 != resource->size )
+ {
+ MALI_PRINT_ERROR(("Memory size set to " MALI_GP_SUBSYSTEM_NAME " core should be zero."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if ( NULL == resource->description )
+ {
+ MALI_PRINT_ERROR(("A " MALI_GP_SUBSYSTEM_NAME " core needs a unique description field"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Create a new core object */
+ core = (mali_core_renderunit*) _mali_osk_malloc(sizeof(*core));
+ if ( NULL == core )
+ {
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Variables set to be able to open and register the core */
+ core->subsystem = &subsystem_maligp ;
+ core->registers_base_addr = resource->base ;
+ core->size = MALIGP2_REGISTER_ADDRESS_SPACE_SIZE ;
+ core->description = resource->description;
+ core->irq_nr = resource->irq ;
+#if USING_MMU
+ core->mmu_id = resource->mmu_id;
+ core->mmu = NULL;
+#endif
+#if USING_MALI_PMM
+ /* Set up core's PMM id */
+ core->pmm_id = MALI_PMM_CORE_GP;
+#endif
+
+ err = mali_core_renderunit_init( core );
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to initialize renderunit\n"));
+ goto exit_on_error0;
+ }
+
+ /* Map the new core object, setting: core->registers_mapped */
+ err = mali_core_renderunit_map_registers(core);
+ if (_MALI_OSK_ERR_OK != err) goto exit_on_error1;
+
+ /* Check that the register mapping of the core works.
+ Return 0 if maligp core is present and accessible. */
+ if (mali_benchmark) {
+ core->core_version = MALI_GP_PRODUCT_ID << 16;
+ } else {
+ core->core_version = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VERSION);
+ }
+
+ err = maligp_core_version_legal(core);
+ if (_MALI_OSK_ERR_OK != err) goto exit_on_error2;
+
+ /* Reset the core. Put the core into a state where it can start to render. */
+ maligp_reset(core);
+
+ /* Registering IRQ, init the work_queue_irq_handle */
+ /* Adding this core as an available renderunit in the subsystem. */
+ err = mali_core_subsystem_register_renderunit(&subsystem_maligp, core);
+ if (_MALI_OSK_ERR_OK != err) goto exit_on_error2;
+
+#ifdef DEBUG
+ MALI_DEBUG_PRINT(4, ("Mali GP: Initial Register settings:\n"));
+ maligp_print_regs(4, core);
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_renderunit_create\n") ) ;
+
+ MALI_SUCCESS;
+
+exit_on_error2:
+ mali_core_renderunit_unmap_registers(core);
+exit_on_error1:
+ mali_core_renderunit_term(core);
+exit_on_error0:
+ _mali_osk_free( core ) ;
+ MALI_PRINT_ERROR(("Renderunit NOT created."));
+ MALI_ERROR((_mali_osk_errcode_t)err);
+}
+
+#if USING_MMU
+/* Used currently only for signalling when MMU has a pagefault */
+static void maligp_subsystem_broadcast_notification(mali_core_notification_message message, u32 data)
+{
+ mali_core_subsystem_broadcast_notification(&subsystem_maligp, message, data);
+}
+#endif
+
+#ifdef DEBUG
+static void maligp_print_regs(int debug_level, mali_core_renderunit *core)
+{
+ if (debug_level <= mali_debug_level)
+ {
+ MALI_DEBUG_PRINT(1, (" VS 0x%08X 0x%08X, PLBU 0x%08X 0x%08X ALLOC 0x%08X 0x%08X\n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR))
+ );
+ MALI_DEBUG_PRINT(1, (" IntRaw 0x%08X IntMask 0x%08X, Status 0x%02X Ver: 0x%08X \n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_MASK),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_STATUS),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VERSION)));
+
+ MALI_DEBUG_PRINT(1, (" PERF_CNT Enbl:%d %d Src: %02d %02d VAL: 0x%08X 0x%08X\n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+
+ MALI_DEBUG_PRINT(1, (" VS_START 0x%08X PLBU_START 0x%08X AXI_ERR 0x%08X\n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ),
+ mali_core_renderunit_register_read(core, MALIGP2_CONTR_AXI_BUS_ERROR_STAT)));
+ }
+}
+#endif
+
+static _mali_osk_errcode_t maligp_core_version_legal( mali_core_renderunit *core )
+{
+ u32 mali_type;
+
+ mali_type = core->core_version >> 16;
+
+#if defined(USING_MALI400)
+ if ( MALI400_GP_PRODUCT_ID != mali_type && MALI300_GP_PRODUCT_ID != mali_type )
+#else
+ if ( MALI_GP_PRODUCT_ID != mali_type )
+#endif
+ {
+ MALI_PRINT_ERROR(("Error: reading this from maligp version register: 0x%x\n", core->core_version));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali GP: core_version_legal: Reads correct mali version: %d\n", core->core_version )) ;
+ MALI_SUCCESS;
+}
+
+static void subsystem_maligp_renderunit_stop_bus(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+static void maligp_reset( mali_core_renderunit *core )
+{
+ if (!mali_benchmark) {
+ maligp_raw_reset(core);
+ maligp_initialize_registers_mgmt(core);
+ }
+}
+
+
+static void maligp_reset_hard( mali_core_renderunit *core )
+{
+ const int reset_finished_loop_count = 15;
+ const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_invalid_value);
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+ for (i = 0; i < reset_finished_loop_count; i++)
+ {
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_core_renderunit_register_read(core, reset_wait_target_register))
+ {
+ MALI_DEBUG_PRINT(5, ("Reset loop exiting after %d iterations\n", i));
+ break;
+ }
+ }
+
+ if (i == reset_finished_loop_count)
+ {
+ MALI_DEBUG_PRINT(1, ("The reset loop didn't work\n"));
+ }
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+
+
+}
+
+static void maligp_raw_reset( mali_core_renderunit *core )
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: maligp_raw_reset: %s\n", core->description)) ;
+ if (mali_benchmark) return;
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+
+#if defined(USING_MALI200)
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, request_loop_count == i, ("Mali GP: Bus was never stopped during core reset\n"));
+
+ if (request_loop_count==i)
+ {
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+#if USING_MMU
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ MALI_DEBUG_PRINT(1, ("Mali GP: Forcing MMU bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow GP to stop its bus, system failure, unable to recover\n"));
+ return;
+ }
+
+ /* the bus was stopped OK, complete the reset */
+ /* use the hard reset routine to do the actual reset */
+ maligp_reset_hard(core);
+
+#elif defined(USING_MALI400)
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & /*Bitwise OR*/
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if ( request_loop_count==i )
+ {
+#if USING_MMU
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ MALI_DEBUG_PRINT(1, ("Mali GP: Forcing Bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow GP to stop its bus, system failure, unable to recover\n"));
+ }
+ else
+ {
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ }
+
+#else
+#error "no supported mali core defined"
+#endif
+}
+
+/* Sets the registers on maligp according to the const default_mgmt_regs array. */
+static void maligp_initialize_registers_mgmt(mali_core_renderunit *core )
+{
+ int i;
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_initialize_registers_mgmt: %s\n", core->description)) ;
+ for(i=0 ; i< (sizeof(default_mgmt_regs)/sizeof(*default_mgmt_regs)) ; ++i)
+ {
+ mali_core_renderunit_register_write(core, default_mgmt_regs[i].address, default_mgmt_regs[i].value);
+ }
+}
+
+
+/* Start this job on this core. Return MALI_TRUE if the job was started. */
+static _mali_osk_errcode_t subsystem_maligp_start_job(mali_core_job * job, mali_core_renderunit * core)
+{
+ maligp_job *jobgp;
+ u32 startcmd;
+ /* The local extended version of the general structs */
+ jobgp = _MALI_OSK_CONTAINER_OF(job, maligp_job, embedded_core_job);
+
+ startcmd = 0;
+ if ( jobgp->user_input.frame_registers[0] != jobgp->user_input.frame_registers[1] )
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+ }
+
+ if ( jobgp->user_input.frame_registers[2] != jobgp->user_input.frame_registers[3] )
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+ }
+
+ if(0 == startcmd)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali GP: Job: 0x%08x WILL NOT START SINCE JOB HAS ILLEGAL ADDRESSES\n",
+ (u32)jobgp->user_input.user_job_ptr));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+
+#ifdef DEBUG
+ MALI_DEBUG_PRINT(4, ("Mali GP: Registers Start\n"));
+ maligp_print_regs(4, core);
+#endif
+
+
+ mali_core_renderunit_register_write_array(
+ core,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR,
+ &(jobgp->user_input.frame_registers[0]),
+ sizeof(jobgp->user_input.frame_registers)/sizeof(jobgp->user_input.frame_registers[0]));
+
+ /* This selects which performance counters we are reading */
+ if ( 0 != jobgp->user_input.perf_counter_flag )
+ {
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
+ {
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC,
+ jobgp->user_input.perf_counter_src0);
+
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE,
+ MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
+ {
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC,
+ jobgp->user_input.perf_counter_src1);
+
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE,
+ MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if ( jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ int force_reset = ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET ) ? 1 : 0;
+ u32 src0 = 0;
+ u32 src1 = 0;
+
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE )
+ {
+ src0 = jobgp->user_input.perf_counter_l2_src0;
+ }
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE )
+ {
+ src1 = jobgp->user_input.perf_counter_l2_src1;
+ }
+
+ mali_kernel_l2_cache_set_perf_counters(src0, src1, force_reset); /* will activate and possibly reset counters */
+
+ /* Now, retrieve the current values, so we can substract them when the job has completed */
+ mali_kernel_l2_cache_get_perf_counters(&jobgp->perf_counter_l2_src0,
+ &jobgp->perf_counter_l2_val0,
+ &jobgp->perf_counter_l2_src1,
+ &jobgp->perf_counter_l2_val1);
+ }
+#endif
+ }
+
+ if ( 0 == (jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))
+ {
+ /* extended progress checking can be enabled */
+
+ jobgp->have_extended_progress_checking = 1;
+
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC,
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED
+ );
+
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE,
+ MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ subsystem_flush_mapped_mem_cache();
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: STARTING GP WITH CMD: 0x%x\n", startcmd));
+
+ /* This is the command that starts the Core */
+ mali_core_renderunit_register_write(core,
+ MALIGP2_REG_ADDR_MGMT_CMD,
+ startcmd);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), jobgp->pid, jobgp->tid, 0, 0, 0);
+#endif
+
+ MALI_SUCCESS;
+}
+
+/* Check if given core has an interrupt pending. Return MALI_TRUE and set mask to 0 if pending */
+
+static u32 subsystem_maligp_irq_handler_upper_half(mali_core_renderunit * core)
+{
+ u32 irq_readout;
+
+ if (mali_benchmark) {
+ return (core->current_job ? 1 : 0); /* simulate irq is pending when a job is pending */
+ }
+
+ irq_readout = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+
+ MALI_DEBUG_PRINT(5, ("Mali GP: IRQ: %04x\n", irq_readout)) ;
+
+ if ( MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout )
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK , MALIGP2_REG_VAL_IRQ_MASK_NONE);
+ /* We do need to handle this in a bottom half, return 1 */
+ return 1;
+ }
+ return 0;
+}
+
+/* This function should check if the interrupt indicates that job was finished.
+If so it should update the job-struct, reset the core registers, and return MALI_TRUE, .
+If the job is still working after this function it should return MALI_FALSE.
+The function must also enable the bits in the interrupt mask for the core.
+Called by the bottom half interrupt function. */
+static int subsystem_maligp_irq_handler_bottom_half(mali_core_renderunit* core)
+{
+ mali_core_job * job;
+ maligp_job * jobgp;
+ u32 irq_readout;
+ u32 core_status;
+ u32 vscl;
+ u32 plbcl;
+
+ job = core->current_job;
+
+ if (mali_benchmark) {
+ MALI_DEBUG_PRINT(3, ("MaliGP: Job: Benchmark\n") );
+ irq_readout = MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+ core_status = 0;
+ } else {
+ irq_readout = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+ core_status = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_STATUS);
+ }
+
+ if (NULL == job)
+ {
+ MALI_DEBUG_ASSERT(CORE_IDLE==core->state);
+ if ( 0 != irq_readout )
+ {
+ MALI_PRINT_ERROR(("Interrupt from a core not running a job. IRQ: 0x%04x Status: 0x%04x", irq_readout, core_status));
+ }
+ return JOB_STATUS_END_UNKNOWN_ERR;
+ }
+ MALI_DEBUG_ASSERT(CORE_IDLE!=core->state);
+
+ jobgp = GET_JOBGP2_PTR(job);
+
+ jobgp->heap_current_addr = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR);
+
+ vscl = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR);
+ plbcl = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Job: 0x%08x IRQ RECEIVED Rawstat: 0x%x Status: 0x%x\n",
+ (u32)jobgp->user_input.user_job_ptr, irq_readout , core_status )) ;
+
+ jobgp->irq_status |= irq_readout;
+ jobgp->status_reg_on_stop = core_status;
+
+ if ( 0 != jobgp->is_stalled_waiting_for_more_memory )
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ /* Readback the performance counters */
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) )
+ {
+ jobgp->perf_counter0 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ jobgp->perf_counter1 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ u32 src0;
+ u32 val0;
+ u32 src1;
+ u32 val1;
+ mali_kernel_l2_cache_get_perf_counters(&src0, &val0, &src1, &val1);
+
+ if (jobgp->perf_counter_l2_src0 == src0)
+ {
+ jobgp->perf_counter_l2_val0 = val0 - jobgp->perf_counter_l2_val0;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val0 = 0;
+ }
+
+ if (jobgp->perf_counter_l2_src1 == src1)
+ {
+ jobgp->perf_counter_l2_val1 = val1 - jobgp->perf_counter_l2_val1;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val1 = 0;
+ }
+ }
+#endif
+
+ MALI_DEBUG_PRINT(2, ("Mali GP: Job aborted - userspace would not provide more heap memory.\n"));
+ return JOB_STATUS_END_OOM; /* Core is ready for more jobs.*/
+ }
+ /* finished ? */
+ else if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE))
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+#ifdef DEBUG
+ MALI_DEBUG_PRINT(4, ("Mali GP: Registers On job end:\n"));
+ maligp_print_regs(4, core);
+#endif
+ MALI_DEBUG_PRINT_IF(5, irq_readout & 0x04, ("OOM when done, ignoring (reg.current = 0x%x, reg.end = 0x%x)\n",
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR),
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR))
+ );
+
+
+ if (0 != jobgp->user_input.perf_counter_flag )
+ {
+ /* Readback the performance counters */
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) )
+ {
+ jobgp->perf_counter0 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ jobgp->perf_counter1 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ u32 src0;
+ u32 val0;
+ u32 src1;
+ u32 val1;
+ mali_kernel_l2_cache_get_perf_counters(&src0, &val0, &src1, &val1);
+
+ if (jobgp->perf_counter_l2_src0 == src0)
+ {
+ jobgp->perf_counter_l2_val0 = val0 - jobgp->perf_counter_l2_val0;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val0 = 0;
+ }
+
+ if (jobgp->perf_counter_l2_src1 == src1)
+ {
+ jobgp->perf_counter_l2_val1 = val1 - jobgp->perf_counter_l2_val1;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val1 = 0;
+ }
+ }
+#endif
+ }
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+
+ return JOB_STATUS_END_SUCCESS; /* core idle */
+ }
+ /* sw watchdog timeout handling or time to do hang checking ? */
+ else if (
+ (CORE_WATCHDOG_TIMEOUT == core->state) ||
+ (
+ (CORE_HANG_CHECK_TIMEOUT == core->state) &&
+ (
+ (jobgp->have_extended_progress_checking ? (mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE) == jobgp->vertices) : 1/*TRUE*/) &&
+ ((core_status & MALIGP2_REG_VAL_STATUS_VS_ACTIVE) ? (vscl == jobgp->last_vscl) : 1/*TRUE*/) &&
+ ((core_status & MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE) ? (plbcl == jobgp->last_plbcl) : 1/*TRUE*/)
+ )
+ )
+ )
+ {
+ /* no progress detected, killed by the watchdog */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ MALI_DEBUG_PRINT(1, ("Mali GP: SW-Timeout. Regs:\n"));
+ if (core_status & MALIGP2_REG_VAL_STATUS_VS_ACTIVE) MALI_DEBUG_PRINT(1, ("vscl current = 0x%x last = 0x%x\n", (void*)vscl, (void*)jobgp->last_vscl));
+ if (core_status & MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE) MALI_DEBUG_PRINT(1, ("plbcl current = 0x%x last = 0x%x\n", (void*)plbcl, (void*)jobgp->last_plbcl));
+ if (jobgp->have_extended_progress_checking) MALI_DEBUG_PRINT(1, ("vertices processed = %d, last = %d\n", mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE),
+ jobgp->vertices));
+#ifdef DEBUG
+ maligp_print_regs(2, core);
+#endif
+ return JOB_STATUS_END_HANG;
+ }
+ /* if hang timeout checking was enabled and we detected progress, will be fall down to this check */
+ /* check for PLBU OOM before the hang check to avoid the race condition of the hw wd trigging while waiting for us to handle the OOM interrupt */
+ else if ( 0 != (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM))
+ {
+ mali_core_session *session;
+ _mali_osk_notification_t *notific;
+ _mali_uk_gp_job_suspended_s * suspended_job;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ session = job->session;
+
+ MALI_DEBUG_PRINT(4, ("OOM, new heap requested by GP\n"));
+ MALI_DEBUG_PRINT(4, ("Status when OOM: current = 0x%x, end = 0x%x\n",
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR),
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR))
+ );
+
+ notific = _mali_osk_notification_create(
+
+ _MALI_NOTIFICATION_GP_STALLED,
+ sizeof( _mali_uk_gp_job_suspended_s )
+ );
+ if ( NULL == notific)
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Could not get notification object\n")) ;
+ return JOB_STATUS_END_OOM; /* Core is ready for more jobs.*/
+ }
+
+ core->state = CORE_WORKING;
+ jobgp->is_stalled_waiting_for_more_memory = 1;
+ suspended_job = (_mali_uk_gp_job_suspended_s *)notific->result_buffer; /* this is ok - result_buffer was malloc'd */
+
+ suspended_job->user_job_ptr = jobgp->user_input.user_job_ptr;
+ suspended_job->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY ;
+ suspended_job->cookie = (u32) core;
+ last_gp_core_cookie = core;
+
+ _mali_osk_notification_queue_send( session->notification_queue, notific);
+
+#ifdef DEBUG
+ maligp_print_regs(4, core);
+#endif
+
+ /* stop all active timers */
+ _mali_osk_timer_del( core->timer);
+ _mali_osk_timer_del( core->timer_hang_detection);
+ MALI_DEBUG_PRINT(4, ("Mali GP: PLBU heap empty, sending memory request to userspace\n"));
+ /* save to watchdog_jiffies what was remaining WD timeout value when OOM was triggered */
+ job->watchdog_jiffies = (long)job->watchdog_jiffies - (long)_mali_osk_time_tickcount();
+ /* reuse core->timer as the userspace response timeout handler */
+ _mali_osk_timer_add( core->timer, _mali_osk_time_mstoticks(1000) ); /* wait max 1 sec for userspace to respond */
+ return JOB_STATUS_CONTINUE_RUN; /* The core is NOT available for new jobs. */
+ }
+ /* hw watchdog is reporting a new hang or an existing progress-during-hang check passed? */
+ else if ((CORE_HANG_CHECK_TIMEOUT == core->state) || (irq_readout & jobgp->active_mask & MALIGP2_REG_VAL_IRQ_HANG))
+ {
+ /* check interval in ms */
+ u32 timeout = mali_core_hang_check_timeout_get();
+ MALI_DEBUG_PRINT(3, ("Mali GP: HW/SW Watchdog triggered, checking for progress in %d ms\n", timeout));
+ core->state = CORE_WORKING;
+
+ /* save state for the progress checking */
+ jobgp->last_vscl = vscl;
+ jobgp->last_plbcl = plbcl;
+ if (jobgp->have_extended_progress_checking)
+ {
+ jobgp->vertices = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+ /* hw watchdog triggered, set up a progress checker every HANGCHECK ms */
+ _mali_osk_timer_add( core->timer_hang_detection, _mali_osk_time_mstoticks(timeout));
+ jobgp->active_mask &= ~MALIGP2_REG_VAL_IRQ_HANG; /* ignore the hw watchdog from now on */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, irq_readout);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, jobgp->active_mask);
+ return JOB_STATUS_CONTINUE_RUN; /* not finihsed */ }
+ /* no errors, but still working */
+ else if ( ( 0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ERROR)) &&
+ ( 0 != (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE ))
+ )
+ {
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, irq_readout);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, jobgp->active_mask);
+ return JOB_STATUS_CONTINUE_RUN;
+ }
+ /* Else there must be some error */
+ else
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ MALI_DEBUG_PRINT(1, ("Mali GP: Core crashed? *IRQ: 0x%x Status: 0x%x\n", irq_readout, core_status ));
+ #ifdef DEBUG
+ MALI_DEBUG_PRINT(1, ("Mali GP: Registers Before reset:\n"));
+ maligp_print_regs(1, core);
+ #endif
+ return JOB_STATUS_END_UNKNOWN_ERR;
+ }
+}
+
+
+/* This function is called from the ioctl function and should return a mali_core_job pointer
+to a created mali_core_job object with the data given from userspace */
+static _mali_osk_errcode_t subsystem_maligp_get_new_job_from_user(struct mali_core_session * session, void * argument)
+{
+ maligp_job *jobgp;
+ mali_core_job *job;
+ mali_core_job *previous_replaced_job;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_uk_gp_start_job_s * user_ptr_job_input;
+
+ user_ptr_job_input = (_mali_uk_gp_start_job_s *)argument;
+
+ MALI_CHECK_NON_NULL(jobgp = (maligp_job *) _mali_osk_calloc(1, sizeof(maligp_job)), _MALI_OSK_ERR_FAULT);
+
+ /* Copy the job data from the U/K interface */
+ if ( NULL == _mali_osk_memcpy(&jobgp->user_input, user_ptr_job_input, sizeof(_mali_uk_gp_start_job_s) ) )
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Could not copy data from U/K interface.\n")) ;
+ err = _MALI_OSK_ERR_FAULT;
+ goto function_exit;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: subsystem_maligp_get_new_job_from_user 0x%x\n", (void*)jobgp->user_input.user_job_ptr));
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Job Regs: 0x%08X 0x%08X, 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ jobgp->user_input.frame_registers[0],
+ jobgp->user_input.frame_registers[1],
+ jobgp->user_input.frame_registers[2],
+ jobgp->user_input.frame_registers[3],
+ jobgp->user_input.frame_registers[4],
+ jobgp->user_input.frame_registers[5]) );
+
+
+ job = GET_JOB_EMBEDDED_PTR(jobgp);
+
+ job->session = session;
+ job_priority_set(job, jobgp->user_input.priority);
+ job_watchdog_set(job, jobgp->user_input.watchdog_msecs );
+ jobgp->heap_current_addr = jobgp->user_input.frame_registers[4];
+
+ job->abort_id = jobgp->user_input.abort_id;
+
+ jobgp->is_stalled_waiting_for_more_memory = 0;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ jobgp->pid = _mali_osk_get_pid();
+ jobgp->tid = _mali_osk_get_tid();
+#endif
+
+ if (NULL != session->job_waiting_to_run)
+ {
+ /* IF NOT( newjow HAS HIGHER PRIORITY THAN waitingjob) EXIT_NOT_START new job */
+ if(!job_has_higher_priority(job, session->job_waiting_to_run))
+ {
+ /* The job we try to add does NOT have higher pri than current */
+ /* Cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ goto function_exit;
+ }
+ }
+
+ /* We now know that we have a job, and a slot to put it in */
+
+ jobgp->active_mask = MALIGP2_REG_VAL_IRQ_MASK_USED;
+
+ /* Allocating User Return Data */
+ jobgp->notification_obj = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_GP_FINISHED,
+ sizeof(_mali_uk_gp_job_finished_s) );
+
+ if ( NULL == jobgp->notification_obj )
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Could not get notification_obj.\n")) ;
+ err = _MALI_OSK_ERR_NOMEM;
+ goto function_exit;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD( &(job->list) ) ;
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Job: 0x%08x INPUT from user.\n", (u32)jobgp->user_input.user_job_ptr)) ;
+
+ /* This should not happen since we have the checking of priority above */
+ err = mali_core_session_add_job(session, job, &previous_replaced_job);
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Internal error\n")) ;
+ /* Cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ _mali_osk_notification_delete( jobgp->notification_obj );
+ goto function_exit;
+ }
+
+ /* If MALI_TRUE: This session had a job with lower priority which were removed.
+ This replaced job is given back to userspace. */
+ if ( NULL != previous_replaced_job )
+ {
+ maligp_job *previous_replaced_jobgp;
+
+ previous_replaced_jobgp = GET_JOBGP2_PTR(previous_replaced_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Replacing job: 0x%08x\n", (u32)previous_replaced_jobgp->user_input.user_job_ptr)) ;
+
+ /* Copy to the input data (which also is output data) the
+ pointer to the job that were replaced, so that the userspace
+ driver can put this job in the front of its job-queue */
+ user_ptr_job_input->returned_user_job_ptr = previous_replaced_jobgp->user_input.user_job_ptr;
+
+ /** @note failure to 'copy to user' at this point must not free jobgp,
+ * and so no transaction rollback required in the U/K interface */
+
+ /* This does not cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED;
+ MALI_DEBUG_PRINT(5, ("subsystem_maligp_get_new_job_from_user: Job added, prev returned\n")) ;
+ }
+ else
+ {
+ /* This does not cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED;
+ MALI_DEBUG_PRINT(5, ("subsystem_maligp_get_new_job_from_user: Job added\n")) ;
+ }
+
+function_exit:
+ if ( _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE == user_ptr_job_input->status
+ || _MALI_OSK_ERR_OK != err )
+ {
+ _mali_osk_free(jobgp);
+ }
+ MALI_ERROR(err);
+}
+
+
+static _mali_osk_errcode_t subsystem_maligp_suspend_response(struct mali_core_session * session, void * argument)
+{
+ mali_core_renderunit *core;
+ maligp_job *jobgp;
+ mali_core_job *job;
+
+ _mali_uk_gp_suspend_response_s * suspend_response;
+
+ MALI_DEBUG_PRINT(5, ("subsystem_maligp_suspend_response\n"));
+
+ suspend_response = (_mali_uk_gp_suspend_response_s *)argument;
+
+ /* We read job data from User */
+ /* On a single mali_gp system we can only have one Stalled GP,
+ and therefore one stalled request with a cookie. This checks
+ that we get the correct cookie */
+ if ( last_gp_core_cookie != (mali_core_renderunit *)suspend_response->cookie )
+ {
+ MALI_DEBUG_PRINT(2, ("Mali GP: Got an illegal cookie from Userspace.\n")) ;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ core = (mali_core_renderunit *)suspend_response->cookie;
+ last_gp_core_cookie = NULL;
+ job = core->current_job;
+ jobgp = GET_JOBGP2_PTR(job);
+
+ switch( suspend_response->code )
+ {
+ case _MALIGP_JOB_RESUME_WITH_NEW_HEAP :
+ MALI_DEBUG_PRINT(5, ("MALIGP_JOB_RESUME_WITH_NEW_HEAP jiffies: %li\n", _mali_osk_time_tickcount()));
+ MALI_DEBUG_PRINT(4, ("New Heap addr 0x%08x - 0x%08x\n", suspend_response->arguments[0], suspend_response->arguments[1]));
+
+ jobgp->is_stalled_waiting_for_more_memory = 0;
+ job->watchdog_jiffies += _mali_osk_time_tickcount(); /* convert to absolute time again */
+ _mali_osk_timer_mod( core->timer, job->watchdog_jiffies); /* update the timer */
+
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, jobgp->active_mask);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, suspend_response->arguments[0]);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, suspend_response->arguments[1]);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0);
+#endif
+
+ MALI_DEBUG_PRINT(4, ("GP resumed with new heap\n"));
+
+ break;
+
+ case _MALIGP_JOB_ABORT:
+ MALI_DEBUG_PRINT(3, ("MALIGP_JOB_ABORT on heap extend request\n"));
+ _mali_osk_irq_schedulework( core->irq );
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Wrong Suspend response from userspace\n"));
+ }
+ MALI_SUCCESS;
+}
+
+/* This function is called from the ioctl function and should write the necessary data
+to userspace telling which job was finished and the status and debuginfo for this job.
+The function must also free and cleanup the input job object. */
+static void subsystem_maligp_return_job_to_user( mali_core_job * job, mali_subsystem_job_end_code end_status )
+{
+ maligp_job *jobgp;
+ _mali_uk_gp_job_finished_s * job_out;
+ _mali_uk_gp_start_job_s* job_input;
+ mali_core_session *session;
+
+
+ jobgp = _MALI_OSK_CONTAINER_OF(job, maligp_job, embedded_core_job);
+ job_out = (_mali_uk_gp_job_finished_s *)jobgp->notification_obj->result_buffer; /* OK - this should've been malloc'd */
+ job_input= &(jobgp->user_input);
+ session = job->session;
+
+ MALI_DEBUG_PRINT(5, ("Mali GP: Job: 0x%08x OUTPUT to user. Runtime: %d ms, irq readout %x\n",
+ (u32)jobgp->user_input.user_job_ptr,
+ job->render_time_msecs,
+ jobgp->irq_status)) ;
+
+ _mali_osk_memset(job_out, 0 , sizeof(_mali_uk_gp_job_finished_s));
+
+ job_out->user_job_ptr = job_input->user_job_ptr;
+
+ switch( end_status )
+ {
+ case JOB_STATUS_CONTINUE_RUN:
+ case JOB_STATUS_END_SUCCESS:
+ case JOB_STATUS_END_OOM:
+ case JOB_STATUS_END_ABORT:
+ case JOB_STATUS_END_TIMEOUT_SW:
+ case JOB_STATUS_END_HANG:
+ case JOB_STATUS_END_SEG_FAULT:
+ case JOB_STATUS_END_ILLEGAL_JOB:
+ case JOB_STATUS_END_UNKNOWN_ERR:
+ case JOB_STATUS_END_SHUTDOWN:
+ case JOB_STATUS_END_SYSTEM_UNUSABLE:
+ job_out->status = (mali_subsystem_job_end_code) end_status;
+ break;
+ default:
+ job_out->status = JOB_STATUS_END_UNKNOWN_ERR ;
+ }
+
+ job_out->irq_status = jobgp->irq_status;
+ job_out->status_reg_on_stop = jobgp->status_reg_on_stop;
+ job_out->vscl_stop_addr = 0;
+ job_out->plbcl_stop_addr = 0;
+ job_out->heap_current_addr = jobgp->heap_current_addr;
+ job_out->perf_counter0 = jobgp->perf_counter0;
+ job_out->perf_counter1 = jobgp->perf_counter1;
+ job_out->perf_counter_src0 = jobgp->user_input.perf_counter_src0 ;
+ job_out->perf_counter_src1 = jobgp->user_input.perf_counter_src1 ;
+ job_out->render_time = job->render_time_msecs;
+#if defined(USING_MALI400_L2_CACHE)
+ job_out->perf_counter_l2_src0 = jobgp->perf_counter_l2_src0;
+ job_out->perf_counter_l2_src1 = jobgp->perf_counter_l2_src1;
+ job_out->perf_counter_l2_val0 = jobgp->perf_counter_l2_val0;
+ job_out->perf_counter_l2_val1 = jobgp->perf_counter_l2_val1;
+#endif
+
+ _mali_osk_notification_queue_send( session->notification_queue, jobgp->notification_obj);
+ jobgp->notification_obj = NULL;
+
+ _mali_osk_free(jobgp);
+
+ last_gp_core_cookie = NULL;
+}
+
+static void subsystem_maligp_renderunit_delete(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP: maligp_renderunit_delete\n"));
+ _mali_osk_free(core);
+}
+
+static void subsystem_maligp_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP: renderunit_reset_core\n"));
+
+ switch (style)
+ {
+ case MALI_CORE_RESET_STYLE_RUNABLE:
+ maligp_reset(core);
+ break;
+ case MALI_CORE_RESET_STYLE_DISABLE:
+ maligp_raw_reset(core); /* do the raw reset */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* then disable the IRQs */
+ break;
+ case MALI_CORE_RESET_STYLE_HARD:
+ maligp_reset_hard(core);
+ maligp_initialize_registers_mgmt(core);
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("Unknown reset type %d\n", style));
+ break;
+ }
+}
+
+static void subsystem_maligp_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core , MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+ mali_core_renderunit_register_write(core , MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG );
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t subsystem_maligp_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core)
+{
+ u32 irq_readout;
+
+ irq_readout = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+
+ if ( MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout )
+ {
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job( _mali_uk_gp_start_job_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_start_job(session, args);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_number_of_cores_get(session, &args->number_of_cores);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_core_version_get(session, &args->version);
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_suspend_response(session, args);
+}
+
+void _mali_ukk_gp_abort_job( _mali_uk_gp_abort_job_s * args)
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ if (NULL == args->ctx) return;
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ if (NULL == session) return;
+ mali_core_subsystem_ioctl_abort_job(session, args->abort_id);
+
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t maligp_signal_power_up( mali_bool queue_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali GP: signal power up core - queue_only: %d\n", queue_only ));
+
+ return( mali_core_subsystem_signal_power_up( &subsystem_maligp, 0, queue_only ) );
+}
+
+_mali_osk_errcode_t maligp_signal_power_down( mali_bool immediate_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali GP: signal power down core - immediate_only: %d\n", immediate_only ));
+
+ return( mali_core_subsystem_signal_power_down( &subsystem_maligp, 0, immediate_only ) );
+}
+
+#endif
+
+#if MALI_STATE_TRACKING
+void maligp_subsystem_dump_state(void)
+{
+ mali_core_renderunit_dump_state(&subsystem_maligp);
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_MALI200.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_MALI200.c
new file mode 100644
index 00000000000..0ac49379bea
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_MALI200.c
@@ -0,0 +1,1187 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_core.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_rendercore.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+#ifdef USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif
+#if USING_MMU
+#include "mali_kernel_mem_mmu.h" /* Needed for mali_kernel_mmu_force_bus_reset() */
+#endif
+
+#include "mali_osk_list.h"
+
+#if defined(USING_MALI200)
+#define MALI_PP_SUBSYSTEM_NAME "Mali200"
+#define MALI_PP_CORE_TYPE _MALI_200
+#elif defined(USING_MALI400)
+#define MALI_PP_SUBSYSTEM_NAME "Mali-400 PP"
+#define MALI_PP_CORE_TYPE _MALI_400_PP
+#else
+#error "No supported mali core defined"
+#endif
+
+#define GET_JOB_EMBEDDED_PTR(job) (&((job)->embedded_core_job))
+#define GET_JOB200_PTR(job_extern) _MALI_OSK_CONTAINER_OF(job_extern, mali200_job, embedded_core_job)
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_mali200_id = -1;
+
+/* Describing a mali200 job settings */
+typedef struct mali200_job
+{
+ /* The general job struct common for all mali cores */
+ mali_core_job embedded_core_job;
+ _mali_uk_pp_start_job_s user_input;
+
+ u32 irq_status;
+ u32 perf_counter0;
+ u32 perf_counter1;
+ u32 last_tile_list_addr; /* Neccessary to continue a stopped job */
+
+ u32 active_mask;
+
+ /* The data we will return back to the user */
+ _mali_osk_notification_t *notification_obj;
+
+#if defined(USING_MALI400_L2_CACHE)
+ u32 perf_counter_l2_src0;
+ u32 perf_counter_l2_src1;
+ u32 perf_counter_l2_val0;
+ u32 perf_counter_l2_val1;
+ u32 perf_counter_l2_val0_raw;
+ u32 perf_counter_l2_val1_raw;
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ u32 pid;
+ u32 tid;
+#endif
+} mali200_job;
+
+
+/*Functions Exposed to the General External System through
+ funciont pointers.*/
+
+static _mali_osk_errcode_t mali200_subsystem_startup(mali_kernel_subsystem_identifier id);
+#if USING_MMU
+static _mali_osk_errcode_t mali200_subsystem_mmu_connect(mali_kernel_subsystem_identifier id);
+#endif
+static void mali200_subsystem_terminate(mali_kernel_subsystem_identifier id);
+static _mali_osk_errcode_t mali200_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+static void mali200_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+static _mali_osk_errcode_t mali200_subsystem_core_system_info_fill(_mali_system_info* info);
+static _mali_osk_errcode_t mali200_renderunit_create(_mali_osk_resource_t * resource);
+#if USING_MMU
+static void mali200_subsystem_broadcast_notification(mali_core_notification_message message, u32 data);
+#endif
+#if MALI_STATE_TRACKING
+void mali200_subsystem_dump_state(void);
+#endif
+
+/* Internal support functions */
+static _mali_osk_errcode_t mali200_core_version_legal( mali_core_renderunit *core );
+static void mali200_reset(mali_core_renderunit *core);
+static void mali200_reset_hard(struct mali_core_renderunit * core);
+static void mali200_raw_reset(mali_core_renderunit * core);
+static void mali200_initialize_registers_mgmt(mali_core_renderunit *core );
+
+/* Functions exposed to mali_core system through functionpointers
+ in the subsystem struct. */
+static _mali_osk_errcode_t subsystem_mali200_start_job(mali_core_job * job, mali_core_renderunit * core);
+static _mali_osk_errcode_t subsystem_mali200_get_new_job_from_user(struct mali_core_session * session, void * argument);
+static void subsystem_mali200_return_job_to_user( mali_core_job * job, mali_subsystem_job_end_code end_status);
+static void subsystem_mali200_renderunit_delete(mali_core_renderunit * core);
+static void subsystem_mali200_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style);
+static void subsystem_mali200_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core);
+static _mali_osk_errcode_t subsystem_mali200_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core);
+
+static void subsystem_mali200_renderunit_stop_bus(struct mali_core_renderunit* core);
+static u32 subsystem_mali200_irq_handler_upper_half(struct mali_core_renderunit * core);
+static int subsystem_mali200_irq_handler_bottom_half(struct mali_core_renderunit* core);
+
+/* This will be one of the subsystems in the array of subsystems:
+ static struct mali_kernel_subsystem * subsystems[];
+ found in file: mali_kernel_core.c
+*/
+
+struct mali_kernel_subsystem mali_subsystem_mali200=
+{
+ mali200_subsystem_startup, /* startup */
+ mali200_subsystem_terminate, /* shutdown */
+#if USING_MMU
+ mali200_subsystem_mmu_connect, /* load_complete */
+#else
+ NULL,
+#endif
+ mali200_subsystem_core_system_info_fill, /* system_info_fill */
+ mali200_subsystem_session_begin, /* session_begin */
+ mali200_subsystem_session_end, /* session_end */
+#if USING_MMU
+ mali200_subsystem_broadcast_notification, /* broadcast_notification */
+#else
+ NULL,
+#endif
+#if MALI_STATE_TRACKING
+ mali200_subsystem_dump_state, /* dump_state */
+#endif
+} ;
+
+static mali_core_subsystem subsystem_mali200 ;
+
+static _mali_osk_errcode_t mali200_subsystem_startup(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem * subsystem;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_startup\n") ) ;
+
+ mali_subsystem_mali200_id = id;
+
+ /* All values get 0 as default */
+ _mali_osk_memset(&subsystem_mali200, 0, sizeof(subsystem_mali200));
+
+ subsystem = &subsystem_mali200;
+ subsystem->start_job = &subsystem_mali200_start_job;
+ subsystem->irq_handler_upper_half = &subsystem_mali200_irq_handler_upper_half;
+ subsystem->irq_handler_bottom_half = &subsystem_mali200_irq_handler_bottom_half;
+ subsystem->get_new_job_from_user = &subsystem_mali200_get_new_job_from_user;
+ subsystem->return_job_to_user = &subsystem_mali200_return_job_to_user;
+ subsystem->renderunit_delete = &subsystem_mali200_renderunit_delete;
+ subsystem->reset_core = &subsystem_mali200_renderunit_reset_core;
+ subsystem->stop_bus = &subsystem_mali200_renderunit_stop_bus;
+ subsystem->probe_core_irq_trigger = &subsystem_mali200_renderunit_probe_core_irq_trigger;
+ subsystem->probe_core_irq_acknowledge = &subsystem_mali200_renderunit_probe_core_irq_finished;
+
+ /* Setting variables in the general core part of the subsystem.*/
+ subsystem->name = MALI_PP_SUBSYSTEM_NAME;
+ subsystem->core_type = MALI_PP_CORE_TYPE;
+ subsystem->id = id;
+
+ /* Initiates the rest of the general core part of the subsystem */
+ MALI_CHECK_NO_ERROR(mali_core_subsystem_init( subsystem ));
+
+ /* This will register the function for adding MALI200 cores to the subsystem */
+#if defined(USING_MALI200)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALI200, mali200_renderunit_create));
+#endif
+#if defined(USING_MALI400)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALI400PP, mali200_renderunit_create));
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_startup\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+#if USING_MMU
+static _mali_osk_errcode_t mali200_subsystem_mmu_connect(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem_attach_mmu(&subsystem_mali200);
+ MALI_SUCCESS; /* OK */
+}
+#endif
+
+static void mali200_subsystem_terminate(mali_kernel_subsystem_identifier id)
+{
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_terminate\n") ) ;
+ mali_core_subsystem_cleanup(&subsystem_mali200);
+}
+
+static _mali_osk_errcode_t mali200_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ mali_core_session * session;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_session_begin\n") ) ;
+ MALI_CHECK_NON_NULL(session = _mali_osk_malloc( sizeof(mali_core_session) ), _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(session, 0, sizeof(*session) );
+ *slot = (mali_kernel_subsystem_session_slot)session;
+
+ session->subsystem = &subsystem_mali200;
+
+ session->notification_queue = queue;
+
+#if USING_MMU
+ session->mmu_session = mali_session_data;
+#endif
+
+ mali_core_session_begin(session);
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_session_begin\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+static void mali200_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ mali_core_session * session;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_session_end\n") ) ;
+ if ( NULL==slot || NULL==*slot)
+ {
+ MALI_PRINT_ERROR(("Input slot==NULL"));
+ return;
+ }
+ session = (mali_core_session*) *slot;
+ mali_core_session_close(session);
+
+ _mali_osk_free(session);
+ *slot = NULL;
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_session_end\n") ) ;
+}
+
+/**
+ * We fill in info about all the cores we have
+ * @param info Pointer to system info struct to update
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali200_subsystem_core_system_info_fill(_mali_system_info* info)
+{
+ return mali_core_subsystem_system_info_fill(&subsystem_mali200, info);
+}
+
+
+static _mali_osk_errcode_t mali200_renderunit_create(_mali_osk_resource_t * resource)
+{
+ mali_core_renderunit *core;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_renderunit_create\n") ) ;
+ /* Checking that the resource settings are correct */
+#if defined(USING_MALI200)
+ if(MALI200 != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_PP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#elif defined(USING_MALI400)
+ if(MALI400PP != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_PP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+ if ( 0 != resource->size )
+ {
+ MALI_PRINT_ERROR(("Memory size set to " MALI_PP_SUBSYSTEM_NAME " core should be zero."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if ( NULL == resource->description )
+ {
+ MALI_PRINT_ERROR(("A " MALI_PP_SUBSYSTEM_NAME " core needs a unique description field"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Create a new core object */
+ core = (mali_core_renderunit*) _mali_osk_malloc(sizeof(*core));
+ if ( NULL == core )
+ {
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Variables set to be able to open and register the core */
+ core->subsystem = &subsystem_mali200 ;
+ core->registers_base_addr = resource->base ;
+ core->size = MALI200_REG_SIZEOF_REGISTER_BANK ;
+ core->irq_nr = resource->irq ;
+ core->description = resource->description;
+#if USING_MMU
+ core->mmu_id = resource->mmu_id;
+ core->mmu = NULL;
+#endif
+#if USING_MALI_PMM
+ /* Set up core's PMM id */
+ switch( subsystem_mali200.number_of_cores )
+ {
+ case 0:
+ core->pmm_id = MALI_PMM_CORE_PP0;
+ break;
+ case 1:
+ core->pmm_id = MALI_PMM_CORE_PP1;
+ break;
+ case 2:
+ core->pmm_id = MALI_PMM_CORE_PP2;
+ break;
+ case 3:
+ core->pmm_id = MALI_PMM_CORE_PP3;
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("Unknown supported core for PMM\n"));
+ err = _MALI_OSK_ERR_FAULT;
+ goto exit_on_error0;
+ }
+#endif
+
+ err = mali_core_renderunit_init( core );
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to initialize renderunit\n"));
+ goto exit_on_error0;
+ }
+
+ /* Map the new core object, setting: core->registers_mapped */
+ err = mali_core_renderunit_map_registers(core);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to map register\n"));
+ goto exit_on_error1;
+ }
+
+ /* Check that the register mapping of the core works.
+ Return 0 if Mali PP core is present and accessible. */
+ if (mali_benchmark) {
+#if defined(USING_MALI200)
+ core->core_version = (((u32)MALI_PP_PRODUCT_ID) << 16) | 5 /* Fake Mali200-r0p5 */;
+#elif defined(USING_MALI400)
+ core->core_version = (((u32)MALI_PP_PRODUCT_ID) << 16) | 0x0101 /* Fake Mali400-r1p1 */;
+#else
+#error "No supported mali core defined"
+#endif
+ } else {
+ core->core_version = mali_core_renderunit_register_read(
+ core,
+ MALI200_REG_ADDR_MGMT_VERSION);
+ }
+
+ err = mali200_core_version_legal(core);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid core\n"));
+ goto exit_on_error2;
+ }
+
+ /* Reset the core. Put the core into a state where it can start to render. */
+ mali200_reset(core);
+
+ /* Registering IRQ, init the work_queue_irq_handle */
+ /* Adding this core as an available renderunit in the subsystem. */
+ err = mali_core_subsystem_register_renderunit(&subsystem_mali200, core);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to register with core\n"));
+ goto exit_on_error2;
+ }
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_renderunit_create\n") ) ;
+
+ MALI_SUCCESS;
+
+exit_on_error2:
+ mali_core_renderunit_unmap_registers(core);
+exit_on_error1:
+ mali_core_renderunit_term(core);
+exit_on_error0:
+ _mali_osk_free( core ) ;
+ MALI_PRINT_ERROR(("Renderunit NOT created."));
+ MALI_ERROR(err);
+}
+
+#if USING_MMU
+/* Used currently only for signalling when MMU has a pagefault */
+static void mali200_subsystem_broadcast_notification(mali_core_notification_message message, u32 data)
+{
+ mali_core_subsystem_broadcast_notification(&subsystem_mali200, message, data);
+}
+#endif
+
+static _mali_osk_errcode_t mali200_core_version_legal( mali_core_renderunit *core )
+{
+ u32 mali_type;
+
+ mali_type = core->core_version >> 16;
+#if defined(USING_MALI400)
+ /* Mali300 and Mali400 is compatible, accept either core. */
+ if (MALI400_PP_PRODUCT_ID != mali_type && MALI300_PP_PRODUCT_ID != mali_type)
+#else
+ if (MALI_PP_PRODUCT_ID != mali_type)
+#endif
+ {
+ MALI_PRINT_ERROR(("Error: reading this from " MALI_PP_SUBSYSTEM_NAME " version register: 0x%x\n", core->core_version));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali PP: core_version_legal: Reads correct mali version: %d\n", mali_type) ) ;
+ MALI_SUCCESS;
+}
+
+static void subsystem_mali200_renderunit_stop_bus(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+static void mali200_raw_reset( mali_core_renderunit *core )
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: mali200_raw_reset: %s\n", core->description));
+ if (mali_benchmark) return;
+
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable IRQs */
+
+#if defined(USING_MALI200)
+
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, request_loop_count == i, ("Mali PP: Bus was never stopped during core reset\n"));
+
+
+ if (request_loop_count==i)
+ {
+#if USING_MMU
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+ MALI_DEBUG_PRINT(1, ("Mali PP: Forcing Bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow PP to stop its bus, system failure, unable to recover\n"));
+ return;
+ }
+
+ /* use the hard reset routine to do the actual reset */
+ mali200_reset_hard(core);
+
+#elif defined(USING_MALI400)
+
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI400PP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count==i)
+ {
+#if USING_MMU
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+ MALI_DEBUG_PRINT(1, ("Mali PP: Forcing Bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow PP to stop its bus, system failure, unable to recover\n"));
+ return;
+ }
+ else
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+
+#else
+#error "no supported mali core defined"
+#endif
+}
+
+static void mali200_reset( mali_core_renderunit *core )
+{
+ if (!mali_benchmark) {
+ mali200_raw_reset(core);
+ mali200_initialize_registers_mgmt(core);
+ }
+}
+
+/* Sets the registers on mali200 according to the const default_mgmt_regs array. */
+static void mali200_initialize_registers_mgmt(mali_core_renderunit *core )
+{
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_initialize_registers_mgmt: %s\n", core->description)) ;
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+}
+
+/* Start this job on this core. Return MALI_TRUE if the job was started. */
+static _mali_osk_errcode_t subsystem_mali200_start_job(mali_core_job * job, mali_core_renderunit * core)
+{
+ mali200_job *job200;
+
+ /* The local extended version of the general structs */
+ job200 = _MALI_OSK_CONTAINER_OF(job, mali200_job, embedded_core_job);
+
+ if ( (0 == job200->user_input.frame_registers[0]) ||
+ (0 == job200->user_input.frame_registers[1]) )
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x WILL NOT START SINCE JOB HAS ILLEGAL ADDRESSES\n",
+ (u32)job200->user_input.user_job_ptr));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x START_RENDER Tile_list: 0x%08x\n",
+ (u32)job200->user_input.user_job_ptr,
+ job200->user_input.frame_registers[0]));
+ MALI_DEBUG_PRINT(6, ("Mali PP: RSW base addr: 0x%08x Vertex base addr: 0x%08x\n",
+ job200->user_input.frame_registers[1], job200->user_input.frame_registers[2]));
+
+ /* Frame registers. Copy from mem to physical registers */
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_FRAME,
+ &(job200->user_input.frame_registers[0]),
+ MALI200_NUM_REGS_FRAME);
+
+ /* Write Back unit 0. Copy from mem to physical registers*/
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_WB0,
+ &(job200->user_input.wb0_registers[0]),
+ MALI200_NUM_REGS_WBx);
+
+ /* Write Back unit 1. Copy from mem to physical registers */
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_WB1,
+ &(job200->user_input.wb1_registers[0]),
+ MALI200_NUM_REGS_WBx);
+
+ /* Write Back unit 2. Copy from mem to physical registers */
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_WB2,
+ &(job200->user_input.wb2_registers[0]),
+ MALI200_NUM_REGS_WBx);
+
+
+ /* This selects which performance counters we are reading */
+ if ( 0 != job200->user_input.perf_counter_flag )
+ {
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
+ {
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE,
+ MALI200_REG_VAL_PERF_CNT_ENABLE);
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC,
+ job200->user_input.perf_counter_src0);
+
+ }
+
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
+ {
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE,
+ MALI200_REG_VAL_PERF_CNT_ENABLE);
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC,
+ job200->user_input.perf_counter_src1);
+
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if ( job200->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ int force_reset = ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET ) ? 1 : 0;
+ u32 src0 = 0;
+ u32 src1 = 0;
+
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE )
+ {
+ src0 = job200->user_input.perf_counter_l2_src0;
+ }
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE )
+ {
+ src1 = job200->user_input.perf_counter_l2_src1;
+ }
+
+ mali_kernel_l2_cache_set_perf_counters(src0, src1, force_reset); /* will activate and possibly reset counters */
+
+ /* Now, retrieve the current values, so we can substract them when the job has completed */
+ mali_kernel_l2_cache_get_perf_counters(&job200->perf_counter_l2_src0,
+ &job200->perf_counter_l2_val0,
+ &job200->perf_counter_l2_src1,
+ &job200->perf_counter_l2_val1);
+ }
+#endif
+ }
+
+ subsystem_flush_mapped_mem_cache();
+ _mali_osk_mem_barrier();
+
+ /* This is the command that starts the Core */
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT,
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), job200->pid, job200->tid, 0, 0, 0);
+#endif
+
+ MALI_SUCCESS;
+}
+
+static u32 subsystem_mali200_irq_handler_upper_half(mali_core_renderunit * core)
+{
+ u32 irq_readout;
+
+ if (mali_benchmark) {
+ return (core->current_job ? 1 : 0); /* simulate irq is pending when a job is pending */
+ }
+
+ MALI_DEBUG_PRINT(5, ("Mali PP: subsystem_mali200_irq_handler_upper_half: %s\n", core->description)) ;
+ irq_readout = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+
+ if ( MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout )
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+ return 1;
+ }
+ return 0;
+}
+
+static int subsystem_mali200_irq_handler_bottom_half(struct mali_core_renderunit* core)
+{
+ u32 irq_readout;
+ u32 current_tile_addr;
+ u32 core_status;
+ mali_core_job * job;
+ mali200_job * job200;
+
+ job = core->current_job;
+ job200 = GET_JOB200_PTR(job);
+
+
+ if (mali_benchmark) {
+ irq_readout = MALI200_REG_VAL_IRQ_END_OF_FRAME;
+ current_tile_addr = 0;
+ core_status = 0;
+ } else {
+ irq_readout = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+ current_tile_addr = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR);
+ core_status = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_STATUS);
+ }
+
+ if (NULL == job)
+ {
+ MALI_DEBUG_ASSERT(CORE_IDLE==core->state);
+ if ( 0 != irq_readout )
+ {
+ MALI_PRINT_ERROR(("Interrupt from a core not running a job. IRQ: 0x%04x Status: 0x%04x", irq_readout, core_status));
+ }
+ return JOB_STATUS_END_UNKNOWN_ERR;
+ }
+ MALI_DEBUG_ASSERT(CORE_IDLE!=core->state);
+
+ job200->irq_status |= irq_readout;
+
+ MALI_DEBUG_PRINT_IF( 3, ( 0 != irq_readout ),
+ ("Mali PP: Job: 0x%08x IRQ RECEIVED Rawstat: 0x%x Tile_addr: 0x%x Status: 0x%x\n",
+ (u32)job200->user_input.user_job_ptr, irq_readout ,current_tile_addr ,core_status));
+
+ if ( MALI200_REG_VAL_IRQ_END_OF_FRAME & irq_readout)
+ {
+#if defined(USING_MALI200)
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES);
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status */
+#endif
+
+ if (0 != job200->user_input.perf_counter_flag )
+ {
+ if (job200->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) )
+ {
+ job200->perf_counter0 = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ job200->perf_counter1 = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if (job200->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ u32 src0;
+ u32 val0;
+ u32 src1;
+ u32 val1;
+ mali_kernel_l2_cache_get_perf_counters(&src0, &val0, &src1, &val1);
+
+ if (job200->perf_counter_l2_src0 == src0)
+ {
+ job200->perf_counter_l2_val0_raw = val0;
+ job200->perf_counter_l2_val0 = val0 - job200->perf_counter_l2_val0;
+ }
+ else
+ {
+ job200->perf_counter_l2_val0_raw = 0;
+ job200->perf_counter_l2_val0 = 0;
+ }
+
+ if (job200->perf_counter_l2_src1 == src1)
+ {
+ job200->perf_counter_l2_val1_raw = val1;
+ job200->perf_counter_l2_val1 = val1 - job200->perf_counter_l2_val1;
+ }
+ else
+ {
+ job200->perf_counter_l2_val1_raw = 0;
+ job200->perf_counter_l2_val1 = 0;
+ }
+ }
+#endif
+
+ }
+
+ return JOB_STATUS_END_SUCCESS; /* reschedule */
+ }
+ /* Overall SW watchdog timeout or (time to do hang checking and progress detected)? */
+ else if (
+ (CORE_WATCHDOG_TIMEOUT == core->state) ||
+ ((CORE_HANG_CHECK_TIMEOUT == core->state) && (current_tile_addr == job200->last_tile_list_addr))
+ )
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status */
+#endif
+ /* no progress detected, killed by the watchdog */
+ MALI_DEBUG_PRINT(2, ("M200: SW-Timeout Rawstat: 0x%x Tile_addr: 0x%x Status: 0x%x.\n", irq_readout ,current_tile_addr ,core_status) );
+ /* In this case will the system outside cleanup and reset the core */
+ return JOB_STATUS_END_HANG;
+ }
+ /* HW watchdog triggered or an existing hang check passed? */
+ else if ((CORE_HANG_CHECK_TIMEOUT == core->state) || (irq_readout & job200->active_mask & MALI200_REG_VAL_IRQ_HANG))
+ {
+ /* check interval in ms */
+ u32 timeout = mali_core_hang_check_timeout_get();
+ MALI_DEBUG_PRINT(3, ("M200: HW/SW Watchdog triggered, checking for progress in %d ms\n", timeout));
+ job200->last_tile_list_addr = current_tile_addr;
+ /* hw watchdog triggered, set up a progress checker every HANGCHECK ms */
+ _mali_osk_timer_add(core->timer_hang_detection, _mali_osk_time_mstoticks(timeout));
+ job200->active_mask &= ~MALI200_REG_VAL_IRQ_HANG; /* ignore the hw watchdoig from now on */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, irq_readout & ~MALI200_REG_VAL_IRQ_HANG);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, job200->active_mask);
+ return JOB_STATUS_CONTINUE_RUN; /* not finished */
+ }
+ /* No irq pending, core still busy */
+ else if ((0 == (irq_readout & MALI200_REG_VAL_IRQ_MASK_USED)) && ( 0 != (core_status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)))
+ {
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, irq_readout);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, job200->active_mask);
+ return JOB_STATUS_CONTINUE_RUN; /* Not finished */
+ }
+ else
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status */
+#endif
+
+ MALI_DEBUG_PRINT(1, ("Mali PP: Job: 0x%08x CRASH? Rawstat: 0x%x Tile_addr: 0x%x Status: 0x%x\n",
+ (u32)job200->user_input.user_job_ptr, irq_readout ,current_tile_addr ,core_status) ) ;
+
+ if (irq_readout & MALI200_REG_VAL_IRQ_BUS_ERROR)
+ {
+ u32 bus_error = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS);
+
+ MALI_DEBUG_PRINT(1, ("Bus error status: 0x%08X\n", bus_error));
+ MALI_DEBUG_PRINT_IF(1, (bus_error & 0x01), ("Bus write error from id 0x%02x\n", (bus_error>>2) & 0x0F));
+ MALI_DEBUG_PRINT_IF(1, (bus_error & 0x02), ("Bus read error from id 0x%02x\n", (bus_error>>6) & 0x0F));
+ MALI_DEBUG_PRINT_IF(1, (0 == (bus_error & 0x03)), ("Bus error but neither read or write was set as the error reason\n"));
+ (void)bus_error;
+ }
+
+ return JOB_STATUS_END_UNKNOWN_ERR; /* reschedule */
+ }
+}
+
+
+/* This function is called from the ioctl function and should return a mali_core_job pointer
+to a created mali_core_job object with the data given from userspace */
+static _mali_osk_errcode_t subsystem_mali200_get_new_job_from_user(struct mali_core_session * session, void * argument)
+{
+ mali200_job *job200;
+ mali_core_job *job;
+ mali_core_job *previous_replaced_job;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_uk_pp_start_job_s * user_ptr_job_input;
+
+ user_ptr_job_input = (_mali_uk_pp_start_job_s *)argument;
+
+ MALI_CHECK_NON_NULL(job200 = (mali200_job *) _mali_osk_malloc(sizeof(mali200_job)), _MALI_OSK_ERR_NOMEM);
+ _mali_osk_memset(job200, 0 , sizeof(mali200_job) );
+
+ /* We read job data from Userspace pointer */
+ if ( NULL == _mali_osk_memcpy((void*)&job200->user_input, user_ptr_job_input, sizeof(job200->user_input)) )
+ {
+ MALI_PRINT_ERROR( ("Mali PP: Could not copy data from U/K interface.\n")) ;
+ err = _MALI_OSK_ERR_FAULT;
+ goto function_exit;
+ }
+
+ MALI_DEBUG_PRINT(5, ("Mali PP: subsystem_mali200_get_new_job_from_user 0x%x\n", (void*)job200->user_input.user_job_ptr));
+
+ MALI_DEBUG_PRINT(5, ("Mali PP: Frameregs: 0x%x 0x%x 0x%x Writeback[1] 0x%x, Pri:%d; Watchd:%d\n",
+ job200->user_input.frame_registers[0], job200->user_input.frame_registers[1], job200->user_input.frame_registers[2],
+ job200->user_input.wb0_registers[1], job200->user_input.priority,
+ job200->user_input.watchdog_msecs));
+
+ if ( job200->user_input.perf_counter_flag)
+ {
+#if defined(USING_MALI400_L2_CACHE)
+ MALI_DEBUG_PRINT(5, ("Mali PP: Performance counters: flag:0x%x src0:0x%x src1:0x%x l2_src0:0x%x l2_src1:0x%x\n",
+ job200->user_input.perf_counter_flag,
+ job200->user_input.perf_counter_src0,
+ job200->user_input.perf_counter_src1,
+ job200->user_input.perf_counter_l2_src0,
+ job200->user_input.perf_counter_l2_src1));
+#else
+ MALI_DEBUG_PRINT(5, ("Mali PP: Performance counters: flag:0x%x src0:0x%x src1:0x%x\n",
+ job200->user_input.perf_counter_flag,
+ job200->user_input.perf_counter_src0,
+ job200->user_input.perf_counter_src1));
+#endif
+ }
+
+ job = GET_JOB_EMBEDDED_PTR(job200);
+
+ job->session = session;
+ job_priority_set(job, job200->user_input.priority);
+ job_watchdog_set(job, job200->user_input.watchdog_msecs );
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ job200->pid = _mali_osk_get_pid();
+ job200->tid = _mali_osk_get_tid();
+#endif
+
+ job->abort_id = job200->user_input.abort_id;
+ if (NULL != session->job_waiting_to_run)
+ {
+ /* IF NOT( newjow HAS HIGHER PRIORITY THAN waitingjob) EXIT_NOT_START newjob */
+ if(!job_has_higher_priority(job, session->job_waiting_to_run))
+ {
+ /* The job we try to add does NOT have higher pri than current */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ goto function_exit;
+ }
+ }
+
+ /* We now know that we has a job, and a empty session slot to put it in */
+
+ job200->active_mask = MALI200_REG_VAL_IRQ_MASK_USED;
+
+ /* Allocating User Return Data */
+ job200->notification_obj = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_PP_FINISHED,
+ sizeof(_mali_uk_pp_job_finished_s) );
+
+ if ( NULL == job200->notification_obj )
+ {
+ MALI_PRINT_ERROR( ("Mali PP: Could not get notification_obj.\n")) ;
+ err = _MALI_OSK_ERR_NOMEM;
+ goto function_exit;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD( &(job->list) ) ;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x INPUT from user.\n", (u32)job200->user_input.user_job_ptr)) ;
+
+ /* This should not happen since we have the checking of priority above */
+ if ( _MALI_OSK_ERR_OK != mali_core_session_add_job(session, job, &previous_replaced_job))
+ {
+ MALI_PRINT_ERROR( ("Mali PP: Internal error\n")) ;
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ _mali_osk_notification_delete( job200->notification_obj );
+ goto function_exit;
+ }
+
+ /* If MALI_TRUE: This session had a job with lower priority which were removed.
+ This replaced job is given back to userspace. */
+ if ( NULL != previous_replaced_job )
+ {
+ mali200_job *previous_replaced_job200;
+
+ previous_replaced_job200 = GET_JOB200_PTR(previous_replaced_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Replacing job: 0x%08x\n", (u32)previous_replaced_job200->user_input.user_job_ptr)) ;
+
+ /* Copy to the input data (which also is output data) the
+ pointer to the job that were replaced, so that the userspace
+ driver can put this job in the front of its job-queue */
+
+ user_ptr_job_input->returned_user_job_ptr = previous_replaced_job200->user_input.user_job_ptr;
+
+ /** @note failure to 'copy to user' at this point must not free job200,
+ * and so no transaction rollback required in the U/K interface */
+
+ /* This does not cause job200 to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED;
+ MALI_DEBUG_PRINT(5, ("subsystem_mali200_get_new_job_from_user: Job added, prev returned\n")) ;
+ }
+ else
+ {
+ /* This does not cause job200 to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED;
+ MALI_DEBUG_PRINT(5, ("subsystem_mali200_get_new_job_from_user: Job added\n")) ;
+ }
+
+function_exit:
+ if (_MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE == user_ptr_job_input->status
+ || _MALI_OSK_ERR_OK != err )
+ {
+ _mali_osk_free(job200);
+ }
+ MALI_ERROR(err);
+}
+
+/* This function is called from the ioctl function and should write the necessary data
+to userspace telling which job was finished and the status and debuginfo for this job.
+The function must also free and cleanup the input job object. */
+static void subsystem_mali200_return_job_to_user( mali_core_job * job, mali_subsystem_job_end_code end_status)
+{
+ mali200_job *job200;
+ _mali_uk_pp_job_finished_s * job_out;
+ _mali_uk_pp_start_job_s * job_input;
+ mali_core_session *session;
+
+ if (NULL == job)
+ {
+ MALI_DEBUG_PRINT(1, ("subsystem_mali200_return_job_to_user received a NULL ptr\n"));
+ return;
+ }
+
+ job200 = _MALI_OSK_CONTAINER_OF(job, mali200_job, embedded_core_job);
+
+ if (NULL == job200->notification_obj)
+ {
+ MALI_DEBUG_PRINT(1, ("Found job200 with NULL notification object, abandoning userspace sending\n"));
+ return;
+ }
+
+ job_out = job200->notification_obj->result_buffer;
+ job_input= &(job200->user_input);
+ session = job->session;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x OUTPUT to user. Runtime: %dms\n",
+ (u32)job200->user_input.user_job_ptr,
+ job->render_time_msecs)) ;
+
+ _mali_osk_memset(job_out, 0 , sizeof(_mali_uk_pp_job_finished_s));
+
+ job_out->user_job_ptr = job_input->user_job_ptr;
+
+ switch( end_status )
+ {
+ case JOB_STATUS_CONTINUE_RUN:
+ case JOB_STATUS_END_SUCCESS:
+ case JOB_STATUS_END_OOM:
+ case JOB_STATUS_END_ABORT:
+ case JOB_STATUS_END_TIMEOUT_SW:
+ case JOB_STATUS_END_HANG:
+ case JOB_STATUS_END_SEG_FAULT:
+ case JOB_STATUS_END_ILLEGAL_JOB:
+ case JOB_STATUS_END_UNKNOWN_ERR:
+ case JOB_STATUS_END_SHUTDOWN:
+ case JOB_STATUS_END_SYSTEM_UNUSABLE:
+ job_out->status = (mali_subsystem_job_end_code) end_status;
+ break;
+
+ default:
+ job_out->status = JOB_STATUS_END_UNKNOWN_ERR ;
+ }
+ job_out->irq_status = job200->irq_status;
+ job_out->perf_counter0 = job200->perf_counter0;
+ job_out->perf_counter1 = job200->perf_counter1;
+ job_out->render_time = job->render_time_msecs;
+
+#if defined(USING_MALI400_L2_CACHE)
+ job_out->perf_counter_l2_src0 = job200->perf_counter_l2_src0;
+ job_out->perf_counter_l2_src1 = job200->perf_counter_l2_src1;
+ job_out->perf_counter_l2_val0 = job200->perf_counter_l2_val0;
+ job_out->perf_counter_l2_val1 = job200->perf_counter_l2_val1;
+ job_out->perf_counter_l2_val0_raw = job200->perf_counter_l2_val0_raw;
+ job_out->perf_counter_l2_val1_raw = job200->perf_counter_l2_val1_raw;
+#endif
+
+ _mali_osk_notification_queue_send( session->notification_queue, job200->notification_obj);
+ job200->notification_obj = NULL;
+
+ _mali_osk_free(job200);
+}
+
+static void subsystem_mali200_renderunit_delete(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP: mali200_renderunit_delete\n"));
+ _mali_osk_free(core);
+}
+
+static void mali200_reset_hard(struct mali_core_renderunit * core)
+{
+ const int reset_finished_loop_count = 15;
+ const u32 reset_wait_target_register = MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ MALI_DEBUG_PRINT(5, ("subsystem_mali200_renderunit_reset_core_hard called for core %s\n", core->description));
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_invalid_value);
+
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT,
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+ for (i = 0; i < reset_finished_loop_count; i++)
+ {
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_core_renderunit_register_read(core, reset_wait_target_register))
+ {
+ MALI_DEBUG_PRINT(5, ("Reset loop exiting after %d iterations\n", i));
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (i == reset_finished_loop_count)
+ {
+ MALI_DEBUG_PRINT(1, ("The reset loop didn't work\n"));
+ }
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+}
+
+static void subsystem_mali200_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP: renderunit_reset_core\n"));
+
+ switch (style)
+ {
+ case MALI_CORE_RESET_STYLE_RUNABLE:
+ mali200_reset(core);
+ break;
+ case MALI_CORE_RESET_STYLE_DISABLE:
+ mali200_raw_reset(core); /* do the raw reset */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* then disable the IRQs */
+ break;
+ case MALI_CORE_RESET_STYLE_HARD:
+ mali200_reset_hard(core);
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("Unknown reset type %d\n", style));
+ }
+}
+
+static void subsystem_mali200_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t subsystem_mali200_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core)
+{
+ u32 irq_readout;
+
+ irq_readout = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+
+ if ( MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout )
+ {
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job( _mali_uk_pp_start_job_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_start_job(session, args);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_number_of_cores_get(session, &args->number_of_cores);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_core_version_get(session, &args->version);
+}
+
+void _mali_ukk_pp_abort_job( _mali_uk_pp_abort_job_s * args)
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ if (NULL == args->ctx) return;
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ if (NULL == session) return;
+ mali_core_subsystem_ioctl_abort_job(session, args->abort_id);
+
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t malipp_signal_power_up( u32 core_num, mali_bool queue_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali PP: signal power up core: %d - queue_only: %d\n", core_num, queue_only ));
+
+ return( mali_core_subsystem_signal_power_up( &subsystem_mali200, core_num, queue_only ) );
+}
+
+_mali_osk_errcode_t malipp_signal_power_down( u32 core_num, mali_bool immediate_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali PP: signal power down core: %d - immediate_only: %d\n", core_num, immediate_only ));
+
+ return( mali_core_subsystem_signal_power_down( &subsystem_mali200, core_num, immediate_only ) );
+}
+
+#endif
+
+#if MALI_STATE_TRACKING
+void mali200_subsystem_dump_state(void)
+{
+ mali_core_renderunit_dump_state(&subsystem_mali200);
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_common.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_common.h
new file mode 100644
index 00000000000..08516c56a9f
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_common.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+ #if defined(_DEBUG)
+ #define DEBUG
+ #endif
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+ /**
+ * Fundamental error macro. Reports an error code. This is abstracted to allow us to
+ * easily switch to a different error reporting method if we want, and also to allow
+ * us to search for error returns easily.
+ *
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+ */
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ * Basic error macro, to indicate success.
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ * Basic error macro. This checks whether the given condition is true, and if not returns
+ * from this function with the supplied error code. This is a macro so that we can override it
+ * for stress testing.
+ *
+ * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ * else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ * Error propagation macro. If the expression given is anything other than _MALI_OSK_NO_ERROR,
+ * then the value is returned from the enclosing function as an error code. This effectively
+ * acts as a guard clause, and propagates error values up the call stack. This uses a
+ * temporary value to ensure that the error expression is not evaluated twice.
+ * If the counter for forcing a failure has been set using _mali_force_error, this error will be
+ * returned without evaluating the expression in MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+ do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+ if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+ MALI_ERROR(_check_no_error_result); \
+ } while(0)
+
+/**
+ * Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ * Error macro with goto. This checks whether the given condition is true, and if not jumps
+ * to the specified label using a goto. The label must therefore be local to the function in
+ * which this macro appears. This is most usually used to execute some clean-up code before
+ * exiting with a call to ERROR.
+ *
+ * Like the other macros, this is a macro to allow us to override the condition if we wish,
+ * e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ * Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ * Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+
+#define MALI_PRINT_ERROR(args) do{ \
+ MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+ MALI_PRINTF((" %s()%4d\n ", __FUNCTION__, __LINE__)) ; \
+ MALI_PRINTF(args); \
+ MALI_PRINTF(("\n")); \
+ } while(0)
+
+#define MALI_PRINT(args) do{ \
+ MALI_PRINTF(("Mali: ")); \
+ MALI_PRINTF(args); \
+ } while (0)
+
+#ifdef DEBUG
+extern int mali_debug_level;
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args) do { \
+ if((level) <= mali_debug_level)\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+ } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args) \
+ if((condition)&&((level) <= mali_debug_level))\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+ else if((level) <= mali_debug_level)\
+ { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.c
new file mode 100644
index 00000000000..a40808bd2d8
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.c
@@ -0,0 +1,892 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_gp.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_rendercore.h"
+#if defined USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif
+#if USING_MALI_PMM
+#include "mali_pmm.h"
+#endif /* USING_MALI_PMM */
+
+/* platform specific set up */
+#include "mali_platform.h"
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_core_id = -1;
+
+/** Pointer to table of resource definitions available to the Mali driver.
+ * _mali_osk_resources_init() sets up the pointer to this table.
+ */
+static _mali_osk_resource_t *arch_configuration = NULL;
+
+/** Number of resources initialized by _mali_osk_resources_init() */
+static u32 num_resources;
+
+static _mali_osk_errcode_t register_resources( _mali_osk_resource_t **arch_configuration, u32 num_resources );
+
+static _mali_osk_errcode_t initialize_subsystems(void);
+static void terminate_subsystems(void);
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_setup(mali_kernel_subsystem_identifier id);
+static void mali_kernel_subsystem_core_cleanup(mali_kernel_subsystem_identifier id);
+static _mali_osk_errcode_t mali_kernel_subsystem_core_system_info_fill(_mali_system_info* info);
+static _mali_osk_errcode_t mali_kernel_subsystem_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+
+static _mali_osk_errcode_t build_system_info(void);
+
+/**
+ * @brief handler for MEM_VALIDATION resources
+ *
+ * This resource handler is common to all memory systems. It provides a default
+ * means for validating requests to map in external memory via
+ * _mali_ukk_map_external_mem. In addition, if _mali_ukk_va_to_pa is
+ * implemented, then _mali_ukk_va_to_pa can make use of this MEM_VALIDATION
+ * resource.
+ *
+ * MEM_VALIDATION also provide a CPU physical to Mali physical address
+ * translation, for use by _mali_ukk_map_external_mem.
+ *
+ * @note MEM_VALIDATION resources are only to handle simple cases where a
+ * certain physical address range is allowed to be mapped in by any process,
+ * e.g. a framebuffer at a fixed location. If the implementor has more complex
+ * mapping requirements, then they must either:
+ * - implement their own memory validation function
+ * - or, integrate with UMP.
+ *
+ * @param resource The resource to handle (type MEM_VALIDATION)
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+static _mali_osk_errcode_t mali_kernel_core_resource_mem_validation(_mali_osk_resource_t * resource);
+
+/* MEM_VALIDATION handler state */
+typedef struct
+{
+ u32 phys_base; /**< Mali physical base of the memory, page aligned */
+ u32 size; /**< size in bytes of the memory, multiple of page size */
+ s32 cpu_usage_adjust; /**< Offset to add to Mali Physical address to obtain CPU physical address */
+} _mali_mem_validation_t;
+
+#define INVALID_MEM 0xffffffff
+
+static _mali_mem_validation_t mem_validator = { INVALID_MEM, INVALID_MEM, -1 };
+
+static struct mali_kernel_subsystem mali_subsystem_core =
+{
+ mali_kernel_subsystem_core_setup, /* startup */
+ mali_kernel_subsystem_core_cleanup, /* shutdown */
+ NULL, /* load_complete */
+ mali_kernel_subsystem_core_system_info_fill, /* system_info_fill */
+ mali_kernel_subsystem_core_session_begin, /* session_begin */
+ NULL, /* session_end */
+ NULL, /* broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+};
+
+static struct mali_kernel_subsystem * subsystems[] =
+{
+ /* always initialize the hw subsystems first */
+ /* always included */
+ &mali_subsystem_memory,
+
+#if USING_MALI_PMM
+ /* The PMM must be initialized before any cores - including L2 cache */
+ &mali_subsystem_pmm,
+#endif
+
+ /* The rendercore subsystem must be initialized before any subsystem based on the
+ * rendercores is started e.g. mali_subsystem_mali200 and mali_subsystem_gp2 */
+ &mali_subsystem_rendercore,
+
+ /* add reference to the subsystem */
+ &mali_subsystem_mali200,
+
+ /* add reference to the subsystem */
+ &mali_subsystem_gp2,
+
+#if defined USING_MALI400_L2_CACHE
+ &mali_subsystem_l2_cache,
+#endif
+
+ /* always included */
+ /* NOTE Keep the core entry at the tail of the list */
+ &mali_subsystem_core
+};
+
+#define SUBSYSTEMS_COUNT ( sizeof(subsystems) / sizeof(subsystems[0]) )
+
+/* Pointers to this type available as incomplete struct in mali_kernel_session_manager.h */
+struct mali_session_data
+{
+ void * subsystem_data[SUBSYSTEMS_COUNT];
+ _mali_osk_notification_queue_t * ioctl_queue;
+};
+
+static mali_kernel_resource_registrator resource_handler[RESOURCE_TYPE_COUNT] = { NULL, };
+
+/* system info variables */
+static _mali_osk_lock_t *system_info_lock = NULL;
+static _mali_system_info * system_info = NULL;
+static u32 system_info_size = 0;
+
+/* is called from OS specific driver entry point */
+_mali_osk_errcode_t mali_kernel_constructor( void )
+{
+ _mali_osk_errcode_t err;
+
+ err = mali_platform_init(NULL);
+ if (_MALI_OSK_ERR_OK != err) goto error1;
+
+ err = _mali_osk_init();
+ if (_MALI_OSK_ERR_OK != err) goto error2;
+
+ MALI_DEBUG_PRINT(2, ("\n"));
+ MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n",_MALI_API_VERSION));
+ MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
+ MALI_DEBUG_PRINT(2, ("Svn revision: %s\n", SVN_REV_STRING));
+
+ err = initialize_subsystems();
+ if (_MALI_OSK_ERR_OK != err) goto error3;
+
+ MALI_PRINT(("Mali device driver %s loaded\n", SVN_REV_STRING));
+
+ MALI_SUCCESS;
+
+error3:
+ MALI_PRINT(("Mali subsystems failed\n"));
+ _mali_osk_term();
+error2:
+ MALI_PRINT(("Mali device driver init failed\n"));
+ if (_MALI_OSK_ERR_OK != mali_platform_deinit(NULL))
+ {
+ MALI_PRINT(("Failed to deinit platform\n"));
+ }
+error1:
+ MALI_PRINT(("Failed to init platform\n"));
+ MALI_ERROR(err);
+}
+
+/* is called from OS specific driver exit point */
+void mali_kernel_destructor( void )
+{
+ MALI_DEBUG_PRINT(2, ("\n"));
+ MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n",_MALI_API_VERSION));
+ terminate_subsystems(); /* subsystems are responsible for their registered resources */
+ _mali_osk_term();
+
+ if (_MALI_OSK_ERR_OK != mali_platform_deinit(NULL))
+ {
+ MALI_PRINT(("Failed to deinit platform\n"));
+ }
+ MALI_DEBUG_PRINT(2, ("Module unloaded.\n"));
+}
+
+_mali_osk_errcode_t register_resources( _mali_osk_resource_t **arch_configuration, u32 num_resources )
+{
+ _mali_osk_resource_t *arch_resource = *arch_configuration;
+ u32 i;
+#if USING_MALI_PMM
+ u32 is_pmu_first_resource = 1;
+#endif /* USING_MALI_PMM */
+
+ /* loop over arch configuration */
+ for (i = 0; i < num_resources; ++i, arch_resource++)
+ {
+ if ( (arch_resource->type >= RESOURCE_TYPE_FIRST) &&
+ (arch_resource->type < RESOURCE_TYPE_COUNT) &&
+ (NULL != resource_handler[arch_resource->type])
+ )
+ {
+#if USING_MALI_PMM
+ if((arch_resource->type != PMU) && (is_pmu_first_resource == 1))
+ {
+ _mali_osk_resource_t mali_pmu_virtual_resource;
+ mali_pmu_virtual_resource.type = PMU;
+ mali_pmu_virtual_resource.description = "Virtual PMU";
+ mali_pmu_virtual_resource.base = 0x00000000;
+ mali_pmu_virtual_resource.cpu_usage_adjust = 0;
+ mali_pmu_virtual_resource.size = 0;
+ mali_pmu_virtual_resource.irq = 0;
+ mali_pmu_virtual_resource.flags = 0;
+ mali_pmu_virtual_resource.mmu_id = 0;
+ mali_pmu_virtual_resource.alloc_order = 0;
+ MALI_CHECK_NO_ERROR(resource_handler[mali_pmu_virtual_resource.type](&mali_pmu_virtual_resource));
+ }
+ is_pmu_first_resource = 0;
+#endif /* USING_MALI_PMM */
+
+ MALI_CHECK_NO_ERROR(resource_handler[arch_resource->type](arch_resource));
+ /* the subsystem shutdown process will release all the resources already registered */
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("No handler installed for resource %s, type %d\n", arch_resource->description, arch_resource->type));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t initialize_subsystems(void)
+{
+ int i, j;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT; /* default error code */
+
+ MALI_CHECK_NON_NULL(system_info_lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0 ), _MALI_OSK_ERR_FAULT);
+
+ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->startup)
+ {
+ /* the subsystem has a startup function defined */
+ err = subsystems[i]->startup(i); /* the subsystem identifier is the offset in our subsystems array */
+ if (_MALI_OSK_ERR_OK != err) goto cleanup;
+ }
+ }
+
+ for (j = 0; j < (int)SUBSYSTEMS_COUNT; ++j)
+ {
+ if (NULL != subsystems[j]->load_complete)
+ {
+ /* the subsystem has a load_complete function defined */
+ err = subsystems[j]->load_complete(j);
+ if (_MALI_OSK_ERR_OK != err) goto cleanup;
+ }
+ }
+
+ /* All systems loaded and resources registered */
+ /* Build system info */
+ if (_MALI_OSK_ERR_OK != build_system_info()) goto cleanup;
+
+ MALI_SUCCESS; /* all ok */
+
+cleanup:
+ /* i is index of subsystem which failed to start, all indices before that has to be shut down */
+ for (i = i - 1; i >= 0; --i)
+ {
+ /* the subsystem identifier is the offset in our subsystems array */
+ /* Call possible shutdown notficiation functions */
+ if (NULL != subsystems[i]->shutdown) subsystems[i]->shutdown(i);
+ }
+
+ _mali_osk_lock_term( system_info_lock );
+ MALI_ERROR(err); /* err is what the module which failed its startup returned, or the default */
+}
+
+static void terminate_subsystems(void)
+{
+ int i;
+ /* shut down subsystems in reverse order from startup */
+ for (i = SUBSYSTEMS_COUNT - 1; i >= 0; --i)
+ {
+ /* the subsystem identifier is the offset in our subsystems array */
+ if (NULL != subsystems[i]->shutdown) subsystems[i]->shutdown(i);
+ }
+ if (system_info_lock) _mali_osk_lock_term( system_info_lock );
+}
+
+void _mali_kernel_core_broadcast_subsystem_message(mali_core_notification_message message, u32 data)
+{
+ int i;
+
+ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->broadcast_notification)
+ {
+ subsystems[i]->broadcast_notification(message, data);
+ }
+ }
+}
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_setup(mali_kernel_subsystem_identifier id)
+{
+ mali_subsystem_core_id = id;
+
+ /* Register our own resources */
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MEM_VALIDATION, mali_kernel_core_resource_mem_validation));
+
+ /* parse the arch resource definition and tell all the subsystems */
+ /* this is why the core subsystem has to be specified last in the subsystem array */
+ MALI_CHECK_NO_ERROR(_mali_osk_resources_init(&arch_configuration, &num_resources));
+
+ MALI_CHECK_NO_ERROR(register_resources(&arch_configuration, num_resources));
+
+ /* resource parsing succeeded and the subsystem have corretly accepted their resources */
+ MALI_SUCCESS;
+}
+
+static void mali_kernel_subsystem_core_cleanup(mali_kernel_subsystem_identifier id)
+{
+ _mali_osk_resources_term(&arch_configuration, num_resources);
+}
+
+
+static _mali_osk_errcode_t build_system_info(void)
+{
+ unsigned int i;
+ int err = _MALI_OSK_ERR_FAULT;
+ _mali_system_info * new_info, * cleanup;
+ _mali_core_info * current_core;
+ _mali_mem_info * current_mem;
+ u32 new_size = 0;
+
+ /* create a new system info struct */
+ MALI_CHECK_NON_NULL(new_info = (_mali_system_info *)_mali_osk_malloc(sizeof(_mali_system_info)), _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(new_info, 0, sizeof(_mali_system_info));
+
+ /* if an error happens during any of the system_info_fill calls cleanup the new info structs */
+ cleanup = new_info;
+
+ /* ask each subsystems to fill in their info */
+ for (i = 0; i < SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->system_info_fill)
+ {
+ err = subsystems[i]->system_info_fill(new_info);
+ if (_MALI_OSK_ERR_OK != err) goto error_exit;
+ }
+ }
+
+ /* building succeeded, calculate the size */
+
+ /* size needed of the system info struct itself */
+ new_size = sizeof(_mali_system_info);
+
+ /* size needed for the cores */
+ for (current_core = new_info->core_info; NULL != current_core; current_core = current_core->next)
+ {
+ new_size += sizeof(_mali_core_info);
+ }
+
+ /* size needed for the memory banks */
+ for (current_mem = new_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
+ {
+ new_size += sizeof(_mali_mem_info);
+ }
+
+ /* lock system info access so a user wont't get a corrupted version */
+ _mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* cleanup the old one */
+ cleanup = system_info;
+ /* set new info */
+ system_info = new_info;
+ system_info_size = new_size;
+
+ /* we're safe */
+ _mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* ok result */
+ err = _MALI_OSK_ERR_OK;
+
+ /* we share the cleanup routine with the error case */
+error_exit:
+ if (NULL == cleanup) MALI_ERROR((_mali_osk_errcode_t)err); /* no cleanup needed, return what err contains */
+
+ /* cleanup */
+
+ /* delete all the core info structs */
+ while (NULL != cleanup->core_info)
+ {
+ current_core = cleanup->core_info;
+ cleanup->core_info = cleanup->core_info->next;
+ _mali_osk_free(current_core);
+ }
+
+ /* delete all the mem info struct */
+ while (NULL != cleanup->mem_info)
+ {
+ current_mem = cleanup->mem_info;
+ cleanup->mem_info = cleanup->mem_info->next;
+ _mali_osk_free(current_mem);
+ }
+
+ /* delete the system info struct itself */
+ _mali_osk_free(cleanup);
+
+ /* return whatever err is, we could end up here in both the error and success cases */
+ MALI_ERROR((_mali_osk_errcode_t)err);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check compatability */
+ if ( args->version == _MALI_UK_API_VERSION )
+ {
+ args->compatible = 1;
+ }
+ else
+ {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_system_info_size(_mali_uk_get_system_info_size_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ args->size = system_info_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args )
+{
+ _mali_core_info * current_core;
+ _mali_mem_info * current_mem;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ void * current_write_pos, ** current_patch_pos;
+ u32 adjust_ptr_base;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->system_info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* lock the system info */
+ _mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* first check size */
+ if (args->size < system_info_size) goto exit_when_locked;
+
+ /* we build a copy of system_info in the user space buffer specified by the user and
+ * patch up the pointers. The ukk_private members of _mali_uk_get_system_info_s may
+ * indicate a different base address for patching the pointers (normally the
+ * address of the provided system_info buffer would be used). This is helpful when
+ * the system_info buffer needs to get copied to user space and the pointers need
+ * to be in user space.
+ */
+ if (0 == args->ukk_private)
+ {
+ adjust_ptr_base = (u32)args->system_info;
+ }
+ else
+ {
+ adjust_ptr_base = args->ukk_private;
+ }
+
+ /* copy each struct into the buffer, and update its pointers */
+ current_write_pos = (void *)args->system_info;
+
+ /* first, the master struct */
+ _mali_osk_memcpy(current_write_pos, system_info, sizeof(_mali_system_info));
+
+ /* advance write pointer */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_system_info));
+
+ /* first we write the core info structs, patch starts at master's core_info pointer */
+ current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, core_info));
+
+ for (current_core = system_info->core_info; NULL != current_core; current_core = current_core->next)
+ {
+
+ /* patch the pointer pointing to this core */
+ *current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));
+
+ /* copy the core info */
+ _mali_osk_memcpy(current_write_pos, current_core, sizeof(_mali_core_info));
+
+ /* update patch pos */
+ current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_core_info, next));
+
+ /* advance write pos in memory */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_core_info));
+ }
+ /* patching of last patch pos is not needed, since we wrote NULL there in the first place */
+
+ /* then we write the mem info structs, patch starts at master's mem_info pointer */
+ current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, mem_info));
+
+ for (current_mem = system_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
+ {
+ /* patch the pointer pointing to this core */
+ *current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));
+
+ /* copy the core info */
+ _mali_osk_memcpy(current_write_pos, current_mem, sizeof(_mali_mem_info));
+
+ /* update patch pos */
+ current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_mem_info, next));
+
+ /* advance write pos in memory */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_mem_info));
+ }
+ /* patching of last patch pos is not needed, since we wrote NULL there in the first place */
+
+ err = _MALI_OSK_ERR_OK;
+exit_when_locked:
+ _mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+ MALI_ERROR(err);
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args )
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_notification_t * notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = (_mali_osk_notification_queue_t *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_core_id);
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+ MALI_SUCCESS;
+ }
+
+ /* receive a notification, might sleep */
+ err = _mali_osk_notification_queue_receive(queue, &notification);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_ERROR(err); /* errcode returned, pass on to caller */
+ }
+
+ /* copy the buffer to the user */
+ args->type = (_mali_uk_notification_type)notification->notification_type;
+ _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+ /* finished with the notification */
+ _mali_osk_notification_delete( notification );
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args )
+{
+ _mali_osk_notification_t * notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = (_mali_osk_notification_queue_t *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_core_id);
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ MALI_SUCCESS;
+ }
+
+ notification = _mali_osk_notification_create(args->type, 0);
+ if ( NULL == notification)
+ {
+ MALI_PRINT_ERROR( ("Failed to create notification object\n")) ;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_notification_queue_send(queue, notification);
+
+ MALI_SUCCESS; /* all ok */
+}
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_system_info_fill(_mali_system_info* info)
+{
+ MALI_CHECK_NON_NULL(info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ info->drivermode = _MALI_DRIVER_MODE_NORMAL;
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ MALI_CHECK_NON_NULL(slot, _MALI_OSK_ERR_INVALID_ARGS);
+ *slot = queue;
+ MALI_SUCCESS;
+}
+
+/* MEM_VALIDATION resource handler */
+static _mali_osk_errcode_t mali_kernel_core_resource_mem_validation(_mali_osk_resource_t * resource)
+{
+ /* Check that no other MEM_VALIDATION resources exist */
+ MALI_CHECK( ((u32)-1) == mem_validator.phys_base, _MALI_OSK_ERR_FAULT );
+
+ /* Check restrictions on page alignment */
+ MALI_CHECK( 0 == (resource->base & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+ MALI_CHECK( 0 == (resource->size & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+ MALI_CHECK( 0 == (resource->cpu_usage_adjust & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+
+ mem_validator.phys_base = resource->base;
+ mem_validator.size = resource->size;
+ mem_validator.cpu_usage_adjust = resource->cpu_usage_adjust;
+ MALI_DEBUG_PRINT( 2, ("Memory Validator '%s' installed for Mali physical address base==0x%08X, size==0x%08X, cpu_adjust==0x%08X\n",
+ resource->description, mem_validator.phys_base, mem_validator.size, mem_validator.cpu_usage_adjust ));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_kernel_core_translate_cpu_to_mali_phys_range( u32 *phys_base, u32 size )
+{
+ u32 mali_phys_base;
+
+ mali_phys_base = *phys_base - mem_validator.cpu_usage_adjust;
+
+ MALI_CHECK( 0 == ( mali_phys_base & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+ MALI_CHECK( 0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+
+ MALI_CHECK_NO_ERROR( mali_kernel_core_validate_mali_phys_range( mali_phys_base, size ) );
+
+ *phys_base = mali_phys_base;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_kernel_core_validate_mali_phys_range( u32 phys_base, u32 size )
+{
+ MALI_CHECK_GOTO( 0 == ( phys_base & (~_MALI_OSK_CPU_PAGE_MASK)), failure );
+ MALI_CHECK_GOTO( 0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK)), failure );
+
+ if ( phys_base >= mem_validator.phys_base
+ && (phys_base + size) >= mem_validator.phys_base
+ && phys_base <= (mem_validator.phys_base + mem_validator.size)
+ && (phys_base + size) <= (mem_validator.phys_base + mem_validator.size) )
+ {
+ MALI_SUCCESS;
+ }
+
+ failure:
+ MALI_PRINTF( ("*******************************************************************************\n") );
+ MALI_PRINTF( ("MALI PHYSICAL RANGE VALIDATION ERROR!\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("We failed to validate a Mali-Physical range that the user-side wished to map in\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("It is likely that the user-side wished to do Direct Rendering, but a suitable\n") );
+ MALI_PRINTF( ("address range validation mechanism has not been correctly setup\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_base, size) );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("Please refer to the ARM Mali Software Integration Guide for more information.\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("*******************************************************************************\n") );
+
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+}
+
+
+_mali_osk_errcode_t _mali_kernel_core_register_resource_handler(_mali_osk_resource_type_t type, mali_kernel_resource_registrator handler)
+{
+ MALI_CHECK(type < RESOURCE_TYPE_COUNT, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT(NULL == resource_handler[type]); /* A handler for resource already exists */
+ resource_handler[type] = handler;
+ MALI_SUCCESS;
+}
+
+void * mali_kernel_session_manager_slot_get(struct mali_session_data * session_data, int id)
+{
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ if(id >= SUBSYSTEMS_COUNT) { MALI_DEBUG_PRINT(3, ("mali_kernel_session_manager_slot_get: id %d out of range\n", id)); return NULL; }
+
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3, ("mali_kernel_session_manager_slot_get: got NULL session data\n")); return NULL; }
+ return session_data->subsystem_data[id];
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+ int i;
+ _mali_osk_errcode_t err;
+ struct mali_session_data * session_data;
+
+ /* allocated struct to track this session */
+ session_data = (struct mali_session_data *)_mali_osk_malloc(sizeof(struct mali_session_data));
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(session_data->subsystem_data, 0, sizeof(session_data->subsystem_data));
+
+ /* create a response queue for this session */
+ session_data->ioctl_queue = _mali_osk_notification_queue_init();
+ if (NULL == session_data->ioctl_queue)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Session starting\n"));
+
+ /* call session_begin on all subsystems */
+ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->session_begin)
+ {
+ /* subsystem has a session_begin */
+ err = subsystems[i]->session_begin(session_data, &session_data->subsystem_data[i], session_data->ioctl_queue);
+ MALI_CHECK_GOTO(err == _MALI_OSK_ERR_OK, cleanup);
+ }
+ }
+
+ *context = (void*)session_data;
+
+ MALI_DEBUG_PRINT(3, ("Session started\n"));
+ MALI_SUCCESS;
+
+cleanup:
+ MALI_DEBUG_PRINT(2, ("Session startup failed\n"));
+ /* i is index of subsystem which failed session begin, all indices before that has to be ended */
+ /* end subsystem sessions in the reverse order they where started in */
+ for (i = i - 1; i >= 0; --i)
+ {
+ if (NULL != subsystems[i]->session_end) subsystems[i]->session_end(session_data, &session_data->subsystem_data[i]);
+ }
+
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+
+ /* return what the subsystem which failed session start returned */
+ MALI_ERROR(err);
+}
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+ int i;
+ struct mali_session_data * session_data;
+
+ MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)*context;
+
+ MALI_DEBUG_PRINT(2, ("Session ending\n"));
+
+ /* end subsystem sessions in the reverse order they where started in */
+ for (i = SUBSYSTEMS_COUNT - 1; i >= 0; --i)
+ {
+ if (NULL != subsystems[i]->session_end) subsystems[i]->session_end(session_data, &session_data->subsystem_data[i]);
+ }
+
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+
+ *context = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+
+ MALI_SUCCESS;
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t mali_core_signal_power_up( mali_pmm_core_id core, mali_bool queue_only )
+{
+ switch( core )
+ {
+ case MALI_PMM_CORE_GP:
+ MALI_CHECK_NO_ERROR(maligp_signal_power_up(queue_only));
+ break;
+#if defined USING_MALI400_L2_CACHE
+ case MALI_PMM_CORE_L2:
+ if( !queue_only )
+ {
+ /* Enable L2 cache due to power up */
+ mali_kernel_l2_cache_do_enable();
+
+ /* Invalidate the cache on power up */
+ MALI_DEBUG_PRINT(5, ("L2 Cache: Invalidate all\n"));
+ MALI_CHECK_NO_ERROR(mali_kernel_l2_cache_invalidate_all());
+ }
+ break;
+#endif
+ case MALI_PMM_CORE_PP0:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(0, queue_only));
+ break;
+ case MALI_PMM_CORE_PP1:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(1, queue_only));
+ break;
+ case MALI_PMM_CORE_PP2:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(2, queue_only));
+ break;
+ case MALI_PMM_CORE_PP3:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(3, queue_only));
+ break;
+ default:
+ /* Unknown core */
+ MALI_DEBUG_PRINT_ERROR( ("Unknown core signalled with power up: %d\n", core) );
+ MALI_ERROR( _MALI_OSK_ERR_INVALID_ARGS );
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_core_signal_power_down( mali_pmm_core_id core, mali_bool immediate_only )
+{
+ switch( core )
+ {
+ case MALI_PMM_CORE_GP:
+ MALI_CHECK_NO_ERROR(maligp_signal_power_down(immediate_only));
+ break;
+#if defined USING_MALI400_L2_CACHE
+ case MALI_PMM_CORE_L2:
+ /* Nothing to do */
+ break;
+#endif
+ case MALI_PMM_CORE_PP0:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(0, immediate_only));
+ break;
+ case MALI_PMM_CORE_PP1:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(1, immediate_only));
+ break;
+ case MALI_PMM_CORE_PP2:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(2, immediate_only));
+ break;
+ case MALI_PMM_CORE_PP3:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(3, immediate_only));
+ break;
+ default:
+ /* Unknown core */
+ MALI_DEBUG_PRINT_ERROR( ("Unknown core signalled with power down: %d\n", core) );
+ MALI_ERROR( _MALI_OSK_ERR_INVALID_ARGS );
+ }
+
+ MALI_SUCCESS;
+}
+
+#endif
+
+
+
+#if MALI_STATE_TRACKING
+void _mali_kernel_core_dump_state(void)
+{
+ int i;
+ for (i = 0; i < SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->dump_state)
+ {
+ subsystems[i]->dump_state();
+ }
+ }
+#if USING_MALI_PMM
+ mali_pmm_dump_os_thread_state();
+#endif
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.h
new file mode 100644
index 00000000000..3819ecc5e1c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+#if USING_MALI_PMM
+#include "mali_ukk.h"
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#endif
+
+_mali_osk_errcode_t mali_kernel_constructor( void );
+void mali_kernel_destructor( void );
+
+/**
+ * @brief Tranlate CPU physical to Mali physical addresses.
+ *
+ * This function is used to convert CPU physical addresses to Mali Physical
+ * addresses, such that _mali_ukk_map_external_mem may be used to map them
+ * into Mali. This will be used by _mali_ukk_va_to_mali_pa.
+ *
+ * This function only supports physically contiguous regions.
+ *
+ * A default implementation is provided, which uses a registered MEM_VALIDATION
+ * resource to do a static translation. Only an address range which will lie
+ * in the range specified by MEM_VALIDATION will be successfully translated.
+ *
+ * If a more complex, or non-static translation is required, then the
+ * implementor has the following options:
+ * - Rewrite this function to provide such a translation
+ * - Integrate the provider of the memory with UMP.
+ *
+ * @param[in,out] phys_base pointer to the page-aligned base address of the
+ * physical range to be translated
+ *
+ * @param[in] size size of the address range to be translated, which must be a
+ * multiple of the physical page size.
+ *
+ * @return on success, _MALI_OSK_ERR_OK and *phys_base is translated. If the
+ * cpu physical address range is not in the valid range, then a suitable
+ * _mali_osk_errcode_t error.
+ *
+ */
+_mali_osk_errcode_t mali_kernel_core_translate_cpu_to_mali_phys_range( u32 *phys_base, u32 size );
+
+
+/**
+ * @brief Validate a Mali physical address range.
+ *
+ * This function is used to ensure that an address range passed to
+ * _mali_ukk_map_external_mem is allowed to be mapped into Mali.
+ *
+ * This function only supports physically contiguous regions.
+ *
+ * A default implementation is provided, which uses a registered MEM_VALIDATION
+ * resource to do a static translation. Only an address range which will lie
+ * in the range specified by MEM_VALIDATION will be successfully validated.
+ *
+ * If a more complex, or non-static validation is required, then the
+ * implementor has the following options:
+ * - Rewrite this function to provide such a validation
+ * - Integrate the provider of the memory with UMP.
+ *
+ * @param phys_base page-aligned base address of the Mali physical range to be
+ * validated.
+ *
+ * @param size size of the address range to be validated, which must be a
+ * multiple of the physical page size.
+ *
+ * @return _MALI_OSK_ERR_OK if the Mali physical range is valid. Otherwise, a
+ * suitable _mali_osk_errcode_t error.
+ *
+ */
+_mali_osk_errcode_t mali_kernel_core_validate_mali_phys_range( u32 phys_base, u32 size );
+
+#if USING_MALI_PMM
+/**
+ * @brief Signal a power up on a Mali core.
+ *
+ * This function flags a core as powered up.
+ * For PP and GP cores it calls functions that move the core from a power off
+ * queue into the idle queue ready to run jobs. It also tries to schedule any
+ * pending jobs to run on it.
+ *
+ * This function will fail if the core is not powered off - either running or
+ * already idle.
+ *
+ * @param core The PMM core id to power up.
+ * @param queue_only When MALI_TRUE only re-queue the core - do not reset.
+ *
+ * @return _MALI_OSK_ERR_OK if the core has been powered up. Otherwise a
+ * suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_core_signal_power_up( mali_pmm_core_id core, mali_bool queue_only );
+
+/**
+ * @brief Signal a power down on a Mali core.
+ *
+ * This function flags a core as powered down.
+ * For PP and GP cores it calls functions that move the core from an idle
+ * queue into the power off queue.
+ *
+ * This function will fail if the core is not idle - either running or
+ * already powered down.
+ *
+ * @param core The PMM core id to power up.
+ * @param immediate_only Do not set the core to pending power down if it can't
+ * power down immediately
+ *
+ * @return _MALI_OSK_ERR_OK if the core has been powered up. Otherwise a
+ * suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_core_signal_power_down( mali_pmm_core_id core, mali_bool immediate_only );
+
+#endif
+
+/**
+ * Flag to indicate whether or not mali_benchmark is turned on.
+ */
+extern int mali_benchmark;
+
+
+#endif /* __MALI_KERNEL_CORE_H__ */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.c
new file mode 100644
index 00000000000..45c280edd6a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static mali_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(mali_descriptor_table * table);
+
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ mali_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+#if !USING_MMU
+ map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 20);
+#else
+ map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 116);
+#endif
+ if (NULL != map->lock)
+ {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_lock_term(map->lock);
+ _mali_osk_free(map);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *odescriptor)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ int new_descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(odescriptor);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (new_descriptor == map->current_nr_mappings)
+ {
+ /* no free descriptor, try to expand the table */
+ mali_descriptor_table * new_table, * old_table;
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;
+
+ map->current_nr_mappings += BITS_PER_LONG;
+ new_table = descriptor_table_alloc(map->current_nr_mappings);
+ if (NULL == new_table) goto unlock_and_exit;
+
+ old_table = map->table;
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ map->table = new_table;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
+ map->table->mappings[new_descriptor] = target;
+ *odescriptor = new_descriptor;
+ err = _MALI_OSK_ERR_OK;
+
+unlock_and_exit:
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(err);
+}
+
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*))
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(callback);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ /* id 0 is skipped as it's an reserved ID not mapping to anything */
+ for (i = 1; i < map->current_nr_mappings; ++i)
+ {
+ if (_mali_osk_test_bit(i, map->table->usage))
+ {
+ callback(i, map->table->mappings[i]);
+ }
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(map);
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = _MALI_OSK_ERR_OK;
+ }
+ else *target = NULL;
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = target;
+ result = _MALI_OSK_ERR_OK;
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+void mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor)
+{
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static mali_descriptor_table * descriptor_table_alloc(int count)
+{
+ mali_descriptor_table * table;
+
+ table = _mali_osk_calloc(1, sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count));
+
+ if (NULL != table)
+ {
+ table->usage = (u32*)((u8*)table + sizeof(mali_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(mali_descriptor_table * table)
+{
+ _mali_osk_free(table);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.h
new file mode 100644
index 00000000000..f626aa59455
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_descriptor_mapping.h
+ */
+
+#ifndef __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct mali_descriptor_table
+{
+ u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} mali_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct mali_descriptor_mapping
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ mali_descriptor_table * table; /**< Pointer to the current mapping table */
+} mali_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *descriptor);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Call the specified callback function for each descriptor in map.
+ * Entire function is mutex protected.
+ * @param map The map to do callbacks for
+ * @param callback A callback function which will be calle for each entry in map
+ */
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*));
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ */
+void mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor);
+
+#endif /* __MALI_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_gp.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_gp.h
new file mode 100644
index 00000000000..c2467edc677
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_gp.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_GP2_H__
+#define __MALI_KERNEL_GP2_H__
+
+extern struct mali_kernel_subsystem mali_subsystem_gp2;
+
+#if USING_MALI_PMM
+_mali_osk_errcode_t maligp_signal_power_up( mali_bool queue_only );
+_mali_osk_errcode_t maligp_signal_power_down( mali_bool immediate_only );
+#endif
+
+#endif /* __MALI_KERNEL_GP2_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.c
new file mode 100644
index 00000000000..fded5c93ab9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.c
@@ -0,0 +1,515 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+#include "mali_kernel_core.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_subsystem.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_rendercore.h"
+#include "mali_kernel_l2_cache.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+ MALI400_L2_CACHE_REGISTER_STATUS = 0x0002,
+ /*unused = 0x0003 */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0004, /**< Misc cache commands, e.g. clear */
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0005,
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0006, /**< Limit of outstanding read requests */
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x0007, /**< Enable misc cache features */
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0008,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0009,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x000A,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x000B,
+} mali_l2_cache_register;
+
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command
+{
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
+ /* Read HW TRM carefully before adding/using other commands than the clear above */
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable
+{
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status
+{
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
+} mali_l2_cache_status;
+
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+typedef struct mali_kernel_l2_cache_core
+{
+ unsigned long base; /**< Physical address of the registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ u32 mapping_size; /**< Size of registers in bytes */
+ _mali_osk_list_t list; /**< Used to link multiple cache cores into a list */
+ _mali_osk_lock_t *lock; /**< Serialize all L2 cache commands */
+} mali_kernel_l2_cache_core;
+
+
+#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+
+
+/**
+ * Mali L2 cache subsystem startup function
+ * Called by the driver core when the driver is loaded.
+ *
+ * @param id Identifier assigned by the core to the L2 cache subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_l2_cache_initialize(mali_kernel_subsystem_identifier id);
+
+/**
+ * Mali L2 cache subsystem shutdown function
+ * Called by the driver core when the driver is unloaded.
+ * Cleans up
+ * @param id Identifier assigned by the core to the L2 cache subsystem
+ */
+static void mali_l2_cache_terminate(mali_kernel_subsystem_identifier id);
+
+/**
+ * L2 cache subsystem complete notification function.
+ * Called by the driver core when all drivers have loaded and all resources has been registered
+ * @param id Identifier assigned by the core to the L2 cache subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_l2_cache_load_complete(mali_kernel_subsystem_identifier id);
+
+/**
+ * Mali L2 cache subsystem's notification handler for a Mali L2 cache resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each Mali L2 cache described in the active architecture's config.h file.
+ * @param resource The resource to handle (type MALI400L2)
+ * @return 0 if the Mali L2 cache was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_l2_cache_core_create(_mali_osk_resource_t * resource);
+
+/**
+ * Write to a L2 cache register
+ * Writes the given value to the specified register
+ * @param unit The L2 cache to write to
+ * @param reg The register to write to
+ * @param val The value to write to the register
+ */
+static void mali_l2_cache_register_write(mali_kernel_l2_cache_core * unit, mali_l2_cache_register reg, u32 val);
+
+
+
+/**
+ * Invalidate specified L2 cache
+ * @param cache The L2 cache to invalidate
+ * @return 0 if Mali L2 cache was successfully invalidated, otherwise error
+ */
+static _mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all_cache(mali_kernel_l2_cache_core *cache);
+
+
+/*
+ The fixed Mali L2 cache system's mali subsystem interface implementation.
+ We currently handle module and session life-time management.
+*/
+struct mali_kernel_subsystem mali_subsystem_l2_cache =
+{
+ mali_l2_cache_initialize, /**< startup */
+ mali_l2_cache_terminate, /**< shutdown */
+ mali_l2_cache_load_complete, /**< load_complete */
+ NULL, /**< system_info_fill */
+ NULL, /**< session_begin */
+ NULL, /**< session_end */
+ NULL, /**< broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /**< dump_state */
+#endif
+};
+
+
+
+static _MALI_OSK_LIST_HEAD(caches_head);
+
+
+
+
+/* called during module init */
+static _mali_osk_errcode_t mali_l2_cache_initialize(mali_kernel_subsystem_identifier id)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_IGNORE( id );
+
+ MALI_DEBUG_PRINT(2, ( "Mali L2 cache system initializing\n"));
+
+ _MALI_OSK_INIT_LIST_HEAD(&caches_head);
+
+ /* This will register the function for adding Mali L2 cache cores to the subsystem */
+ err = _mali_kernel_core_register_resource_handler(MALI400L2, mali_l2_cache_core_create);
+
+ MALI_ERROR(err);
+}
+
+
+
+/* called if/when our module is unloaded */
+static void mali_l2_cache_terminate(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ MALI_DEBUG_PRINT(2, ( "Mali L2 cache system terminating\n"));
+
+ /* loop over all L2 cache units and shut them down */
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list )
+ {
+ /* reset to defaults */
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
+
+ /* remove from the list of cacges on the system */
+ _mali_osk_list_del( &cache->list );
+
+ /* release resources */
+ _mali_osk_mem_unmapioregion( cache->base, cache->mapping_size, cache->mapped_registers );
+ _mali_osk_mem_unreqregion( cache->base, cache->mapping_size );
+ _mali_osk_lock_term( cache->lock );
+ _mali_osk_free( cache );
+
+ #if USING_MALI_PMM
+ /* Unregister the L2 cache with the PMM */
+ malipmm_core_unregister( MALI_PMM_CORE_L2 );
+ #endif
+ }
+}
+
+static _mali_osk_errcode_t mali_l2_cache_core_create(_mali_osk_resource_t * resource)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT ;
+ mali_kernel_l2_cache_core * cache = NULL;
+
+ MALI_DEBUG_PRINT(2, ( "Creating Mali L2 cache: %s\n", resource->description));
+
+#if USING_MALI_PMM
+ /* Register the L2 cache with the PMM */
+ err = malipmm_core_register( MALI_PMM_CORE_L2 );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to register L2 cache unit with PMM"));
+ return err;
+ }
+#endif
+
+ err = _mali_osk_mem_reqregion( resource->base, MALI400_L2_CACHE_REGISTERS_SIZE, resource->description);
+
+ MALI_CHECK_GOTO( _MALI_OSK_ERR_OK == err, err_cleanup_requestmem_failed);
+
+ /* Reset error that might be passed out */
+ err = _MALI_OSK_ERR_FAULT;
+
+ cache = _mali_osk_malloc(sizeof(mali_kernel_l2_cache_core));
+
+ MALI_CHECK_GOTO( NULL != cache, err_cleanup);
+
+ cache->lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 104 );
+
+ MALI_CHECK_GOTO( NULL != cache->lock, err_cleanup);
+
+ /* basic setup */
+ _MALI_OSK_INIT_LIST_HEAD(&cache->list);
+
+ cache->base = resource->base;
+ cache->mapping_size = MALI400_L2_CACHE_REGISTERS_SIZE;
+
+ /* map the registers */
+ cache->mapped_registers = _mali_osk_mem_mapioregion( cache->base, cache->mapping_size, resource->description );
+
+ MALI_CHECK_GOTO( NULL != cache->mapped_registers, err_cleanup);
+
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ err = mali_kernel_l2_cache_invalidate_all_cache(cache);
+
+ MALI_CHECK_GOTO( _MALI_OSK_ERR_OK == err, err_cleanup);
+
+ /* add to our list of L2 caches */
+ _mali_osk_list_add( &cache->list, &caches_head );
+
+ MALI_SUCCESS;
+
+err_cleanup:
+ /* This cleanup used when resources have been requested successfully */
+
+ if ( NULL != cache )
+ {
+ if (NULL != cache->mapped_registers)
+ {
+ _mali_osk_mem_unmapioregion( cache->base, cache->mapping_size, cache->mapped_registers);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to map Mali L2 cache registers at 0x%08lX\n", cache->base));
+ }
+
+ if( NULL != cache->lock )
+ {
+ _mali_osk_lock_term( cache->lock );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to allocate a lock for handling a L2 cache unit"));
+ }
+
+ _mali_osk_free( cache );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to allocate memory for handling a L2 cache unit"));
+ }
+
+ /* A call is to request region, so this must always be reversed */
+ _mali_osk_mem_unreqregion( resource->base, MALI400_L2_CACHE_REGISTERS_SIZE);
+#if USING_MALI_PMM
+ malipmm_core_unregister( MALI_PMM_CORE_L2 );
+#endif
+ return err;
+
+err_cleanup_requestmem_failed:
+ MALI_DEBUG_PRINT(1, ("Failed to request Mali L2 cache '%s' register address space at (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + MALI400_L2_CACHE_REGISTERS_SIZE - 1) );
+#if USING_MALI_PMM
+ malipmm_core_unregister( MALI_PMM_CORE_L2 );
+#endif
+ return err;
+
+}
+
+
+static void mali_l2_cache_register_write(mali_kernel_l2_cache_core * unit, mali_l2_cache_register reg, u32 val)
+{
+ _mali_osk_mem_iowrite32(unit->mapped_registers, (u32)reg * sizeof(u32), val);
+}
+
+
+static u32 mali_l2_cache_register_read(mali_kernel_l2_cache_core * unit, mali_l2_cache_register reg)
+{
+ return _mali_osk_mem_ioread32(unit->mapped_registers, (u32)reg * sizeof(u32));
+}
+
+void mali_kernel_l2_cache_do_enable(void)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ /* loop over all L2 cache units and enable them*/
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
+ }
+}
+
+
+static _mali_osk_errcode_t mali_l2_cache_load_complete(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_l2_cache_do_enable();
+ MALI_DEBUG_PRINT(2, ( "Mali L2 cache system load complete\n"));
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_kernel_l2_cache_send_command(mali_kernel_l2_cache_core *cache, u32 reg, u32 val)
+{
+ int i = 0;
+ const int loop_count = 100000;
+
+ /*
+ * Grab lock in order to send commands to the L2 cache in a serialized fashion.
+ * The L2 cache will ignore commands if it is busy.
+ */
+ _mali_osk_lock_wait(cache->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* First, wait for L2 cache command handler to go idle */
+
+ for (i = 0; i < loop_count; i++)
+ {
+ if (!(_mali_osk_mem_ioread32(cache->mapped_registers , (u32)MALI400_L2_CACHE_REGISTER_STATUS * sizeof(u32)) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY))
+ {
+ break;
+ }
+ }
+
+ if (i == loop_count)
+ {
+ _mali_osk_lock_signal(cache->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+ }
+
+ /* then issue the command */
+ mali_l2_cache_register_write(cache, reg, val);
+
+ _mali_osk_lock_signal(cache->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_SUCCESS;
+}
+
+
+static _mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all_cache(mali_kernel_l2_cache_core *cache)
+{
+ return mali_kernel_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+}
+
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all(void)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ /* loop over all L2 cache units and invalidate them */
+
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ MALI_CHECK_NO_ERROR( mali_kernel_l2_cache_invalidate_all_cache(cache) );
+ }
+
+ MALI_SUCCESS;
+}
+
+
+static _mali_osk_errcode_t mali_kernel_l2_cache_invalidate_page_cache(mali_kernel_l2_cache_core *cache, u32 page)
+{
+ return mali_kernel_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, page);
+}
+
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_page(u32 page)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ /* loop over all L2 cache units and invalidate them */
+
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ MALI_CHECK_NO_ERROR( mali_kernel_l2_cache_invalidate_page_cache(cache, page) );
+ }
+
+ MALI_SUCCESS;
+}
+
+
+void mali_kernel_l2_cache_set_perf_counters(u32 src0, u32 src1, int force_reset)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+ int reset0 = force_reset;
+ int reset1 = force_reset;
+ MALI_DEBUG_CODE(
+ int changed0 = 0;
+ int changed1 = 0;
+ )
+
+ /* loop over all L2 cache units and activate the counters on them */
+ _MALI_OSK_LIST_FOREACHENTRY(cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ u32 cur_src0 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0);
+ u32 cur_src1 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1);
+
+ if (src0 != cur_src0)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, src0);
+ MALI_DEBUG_CODE(changed0 = 1;)
+ reset0 = 1;
+ }
+
+ if (src1 != cur_src1)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, src1);
+ MALI_DEBUG_CODE(changed1 = 1;)
+ reset1 = 1;
+ }
+
+ if (reset0)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0, 0);
+ }
+
+ if (reset1)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1, 0);
+ }
+
+ MALI_DEBUG_PRINT(5, ("L2 cache counters set: SRC0=%u, CHANGED0=%d, RESET0=%d, SRC1=%u, CHANGED1=%d, RESET1=%d\n",
+ src0, changed0, reset0,
+ src1, changed1, reset1));
+ }
+}
+
+
+void mali_kernel_l2_cache_get_perf_counters(u32 *src0, u32 *val0, u32 *src1, u32 *val1)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+ int first_time = 1;
+ *src0 = 0;
+ *src1 = 0;
+ *val0 = 0;
+ *val1 = 0;
+
+ /* loop over all L2 cache units and read the counters */
+ _MALI_OSK_LIST_FOREACHENTRY(cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ u32 cur_src0 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0);
+ u32 cur_src1 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1);
+ u32 cur_val0 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ u32 cur_val1 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+
+ MALI_DEBUG_PRINT(5, ("L2 cache counters get: SRC0=%u, VAL0=%u, SRC1=%u, VAL1=%u\n", cur_src0, cur_val0, cur_src1, cur_val1));
+
+ if (first_time)
+ {
+ *src0 = cur_src0;
+ *src1 = cur_src1;
+ first_time = 0;
+ }
+
+ if (*src0 == cur_src0 && *src1 == cur_src1)
+ {
+ *val0 += cur_val0;
+ *val1 += cur_val1;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("Warning: Mali L2 caches has different performance counters set, not retrieving data\n"));
+ }
+ }
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.h
new file mode 100644
index 00000000000..de3229a0688
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_subsystem.h"
+extern struct mali_kernel_subsystem mali_subsystem_l2_cache;
+
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all(void);
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_page(u32 page);
+
+void mali_kernel_l2_cache_do_enable(void);
+void mali_kernel_l2_cache_set_perf_counters(u32 src0, u32 src1, int force_reset);
+void mali_kernel_l2_cache_get_perf_counters(u32 *src0, u32 *val0, u32 *src1, u32 *val1);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem.h
new file mode 100644
index 00000000000..681b6dd47c9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_H__
+#define __MALI_KERNEL_MEM_H__
+
+#include "mali_kernel_subsystem.h"
+extern struct mali_kernel_subsystem mali_subsystem_memory;
+
+#endif /* __MALI_KERNEL_MEM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_buddy.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_buddy.c
new file mode 100644
index 00000000000..8277fd1e1c6
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_buddy.c
@@ -0,0 +1,1425 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_core.h"
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_kernel_session_manager.h"
+
+/* kernel side OS functions and user-kernel interface */
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#ifdef _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+#include "mali_osk_indir_mmap.h"
+#endif
+
+/**
+ * Minimum memory allocation size
+ */
+#define MIN_BLOCK_SIZE (1024*1024UL)
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 4096
+
+/**
+ * Enum uses to store multiple fields in one u32 to keep the memory block struct small
+ */
+enum MISC_SHIFT { MISC_SHIFT_FREE = 0, MISC_SHIFT_ORDER = 1, MISC_SHIFT_TOPLEVEL = 6 };
+enum MISC_MASK { MISC_MASK_FREE = 0x01, MISC_MASK_ORDER = 0x1F, MISC_MASK_TOPLEVEL = 0x1F };
+
+/* forward declaration of the block struct */
+struct mali_memory_block;
+
+/**
+ * Definition of memory bank type.
+ * Represents a memory bank (separate address space)
+ * Each bank keeps track of its block usage.
+ * A buddy system used to track the usage
+*/
+typedef struct mali_memory_bank
+{
+ _mali_osk_list_t list; /* links multiple banks together */
+ _mali_osk_lock_t *lock;
+ u32 base_addr; /* Mali seen address of bank */
+ u32 cpu_usage_adjust; /* Adjustmen factor for what the CPU sees */
+ u32 size; /* the effective size */
+ u32 real_size; /* the real size of the bank, as given by to the subsystem */
+ int min_order;
+ int max_order;
+ struct mali_memory_block * blocklist;
+ _mali_osk_list_t *freelist;
+ _mali_osk_atomic_t num_active_allocations;
+ u32 used_for_flags;
+ u32 alloc_order; /**< Order in which the bank will be used for allocations */
+ const char *name; /**< Descriptive name of the bank */
+} mali_memory_bank;
+
+/**
+ * Definition of the memory block type
+ * Represents a memory block, which is the smallest memory unit operated on.
+ * A block keeps info about its mapping, if in use by a user process
+ */
+typedef struct mali_memory_block
+{
+ _mali_osk_list_t link; /* used for freelist and process usage list*/
+ mali_memory_bank * bank; /* the bank it belongs to */
+ void __user * mapping; /* possible user space mapping of this block */
+ u32 misc; /* used while a block is free to track the number blocks it represents */
+ int descriptor;
+ u32 mmap_cookie; /**< necessary for interaction with _mali_ukk_mem_mmap/munmap */
+} mali_memory_block;
+
+/**
+ * Defintion of the type used to represent memory used by a session.
+ * Containts the head of the list of memory currently in use by a session.
+ */
+typedef struct memory_session
+{
+ _mali_osk_lock_t *lock;
+ _mali_osk_list_t memory_head; /* List of the memory blocks used by this session. */
+ mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+} memory_session;
+
+/*
+ Subsystem interface implementation
+*/
+/**
+ * Buddy block memory subsystem startup function
+ * Called by the driver core when the driver is loaded.
+ * Registers the memory systems ioctl handler, resource handlers and memory map function with the core.
+ *
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id);
+
+/**
+ * Buddy block memory subsystem shutdown function
+ * Called by the driver core when the driver is unloaded.
+ * Cleans up
+ * @param id Identifier assigned by the core to the memory subsystem
+ */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id);
+
+/**
+ * Buddy block memory load complete notification function.
+ * Called by the driver core when all drivers have loaded and all resources has been registered
+ * Reports on the memory resources registered
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id);
+
+
+/**
+ * Buddy block memory subsystem session begin notification
+ * Called by the core when a new session to the driver is started.
+ * Creates a memory session object and sets it as the subsystem slot data for this session
+ * @param slot Pointer to the slot to use for storing per-session data
+ * @param queue The user space event sink
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+
+/**
+ * Buddy block memory subsystem session end notification
+ * Called by the core when a session to the driver has ended.
+ * Cleans up per session data, which includes checking and fixing memory leaks
+ *
+ * @param slot Pointer to the slot to use for storing per-session data
+ */
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+
+/**
+ * Buddy block memory subsystem system info filler
+ * Called by the core when a system info update is needed
+ * We fill in info about all the memory types we have
+ * @param info Pointer to system info struct to update
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info);
+
+/* our registered resource handlers */
+/**
+ * Buddy block memory subsystem's notification handler for MEMORY resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each memory bank described in the active architecture's config.h file.
+ * Requests memory region ownership and calls backend.
+ * @param resource The resource to handle (type MEMORY)
+ * @return 0 if the memory was claimed and accepted, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_memory(_mali_osk_resource_t * resource);
+
+/**
+ * Buddy block memory subsystem's notification handler for MMU resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each mmu described in the active architecture's config.h file.
+ * @param resource The resource to handle (type MMU)
+ * @return 0 if the MMU was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource);
+
+/**
+ * Buddy block memory subsystem's notification handler for FPGA_FRAMEWORK resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each fpga framework described in the active architecture's config.h file.
+ * @param resource The resource to handle (type FPGA_FRAMEWORK)
+ * @return 0 if the FPGA framework was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource);
+
+/* ioctl command implementations */
+/**
+ * Buddy block memory subsystem's handler for MALI_IOC_MEM_GET_BIG_BLOCK ioctl
+ * Called by the generic ioctl handler when the MALI_IOC_MEM_GET_BIG_BLOCK command is received.
+ * Finds an available memory block and maps into the current process' address space.
+ * @param ukk_private private word for use by the User/Kernel interface
+ * @param session_data Pointer to the per-session object which will track the memory usage
+ * @param argument The argument from the user. A pointer to an struct mali_dd_get_big_block in user space
+ * @return Zero if successful, a standard Linux error value value on error (a negative value)
+ */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args );
+
+/**
+ * Buddy block memory subsystem's handler for MALI_IOC_MEM_FREE_BIG_BLOCK ioctl
+ * Called by the generic ioctl handler when the MALI_IOC_MEM_FREE_BIG_BLOCK command is received.
+ * Unmaps the memory from the process' address space and marks the block as free.
+ * @param session_data Pointer to the per-session object which tracks the memory usage
+ * @param argument The argument from the user. A pointer to an struct mali_dd_get_big_block in user space
+ * @return Zero if successful, a standard Linux error value value on error (a negative value)
+ */
+
+/* this static version allows us to make use of it while holding the memory_session lock.
+ * This is required for the session_end code */
+static _mali_osk_errcode_t _mali_ukk_free_big_block_internal( struct mali_session_data * mali_session_data, memory_session * session_data, _mali_uk_free_big_block_s *args);
+
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args );
+
+/**
+ * Buddy block memory subsystem's memory bank registration routine
+ * Called when a MEMORY resource has been found.
+ * The memory region has already been reserved for use by this driver.
+ * Create a bank object to represent this region and initialize its slots.
+ * @note Can only be called in an module atomic scope, i.e. during module init since no locking is performed
+ * @param phys_base Physical base address of this bank
+ * @param cpu_usage_adjust Adjustment factor for CPU seen address
+ * @param size Size of the bank in bytes
+ * @param flags Memory type bits
+ * @param alloc_order Order in which the bank will be used for allocations
+ * @param name descriptive name of the bank
+ * @return Zero on success, negative on error
+ */
+static int mali_memory_bank_register(u32 phys_base, u32 cpu_usage_adjust, u32 size, u32 flags, u32 alloc_order, const char *name);
+
+/**
+ * Get a block of mali memory of at least the given size and of the given type
+ * This is the backend for get_big_block.
+ * @param type_id The type id of memory requested.
+ * @param minimum_size The size requested
+ * @return Pointer to a block on success, NULL on failure
+ */
+static mali_memory_block * mali_memory_block_get(u32 type_id, u32 minimum_size);
+
+/**
+ * Get the mali seen address of the memory described by the block
+ * @param block The memory block to return the address of
+ * @return The mali seen address of the memory block
+ */
+MALI_STATIC_INLINE u32 block_mali_addr_get(mali_memory_block * block);
+
+/**
+ * Get the cpu seen address of the memory described by the block
+ * The cpu_usage_adjust will be used to change the mali seen phys address
+ * @param block The memory block to return the address of
+ * @return The mali seen address of the memory block
+ */
+MALI_STATIC_INLINE u32 block_cpu_addr_get(mali_memory_block * block);
+
+/**
+ * Get the size of the memory described by the given block
+ * @param block The memory block to return the size of
+ * @return The size of the memory block described by the object
+ */
+MALI_STATIC_INLINE u32 block_size_get(mali_memory_block * block);
+
+/**
+ * Get the user space accessible mapping the memory described by the given memory block
+ * Returns a pointer in user space to the memory, if one has been created.
+ * @param block The memory block to return the mapping of
+ * @return User space pointer to cpu accessible memory or NULL if not mapped
+ */
+MALI_STATIC_INLINE void __user * block_mapping_get(mali_memory_block * block);
+
+/**
+ * Set the user space accessible mapping the memory described by the given memory block.
+ * Sets the stored pointer to user space for the memory described by this block.
+ * @param block The memory block to set mapping info for
+ * @param ptr User space pointer to cpu accessible memory or NULL if not mapped
+ */
+MALI_STATIC_INLINE void block_mapping_set(mali_memory_block * block, void __user * ptr);
+
+/**
+ * Get the cookie for use with _mali_ukk_mem_munmap().
+ * @param block The memory block to get the cookie from
+ * @return the cookie. A return of 0 is still a valid cookie.
+ */
+MALI_STATIC_INLINE u32 block_mmap_cookie_get(mali_memory_block * block);
+
+/**
+ * Set the cookie returned via _mali_ukk_mem_mmap().
+ * @param block The memory block to set the cookie for
+ * @param cookie the cookie
+ */
+MALI_STATIC_INLINE void block_mmap_cookie_set(mali_memory_block * block, u32 cookie);
+
+
+/**
+ * Get a memory block's free status
+ * @param block The block to get the state of
+ */
+MALI_STATIC_INLINE u32 get_block_free(mali_memory_block * block);
+
+/**
+ * Set a memory block's free status
+ * @param block The block to set the state for
+ * @param state The state to set
+ */
+MALI_STATIC_INLINE void set_block_free(mali_memory_block * block, int state);
+
+/**
+ * Set a memory block's order
+ * @param block The block to set the order for
+ * @param order The order to set
+ */
+MALI_STATIC_INLINE void set_block_order(mali_memory_block * block, u32 order);
+
+/**
+ * Get a memory block's order
+ * @param block The block to get the order for
+ * @return The order this block exists on
+ */
+MALI_STATIC_INLINE u32 get_block_order(mali_memory_block * block);
+
+/**
+ * Tag a block as being a toplevel block.
+ * A toplevel block has no buddy and no parent
+ * @param block The block to tag as being toplevel
+ */
+MALI_STATIC_INLINE void set_block_toplevel(mali_memory_block * block, u32 level);
+
+/**
+ * Check if a block is a toplevel block
+ * @param block The block to check
+ * @return 1 if toplevel, 0 else
+ */
+MALI_STATIC_INLINE u32 get_block_toplevel(mali_memory_block * block);
+
+/**
+ * Checks if the given block is a buddy at the given order and that it's free
+ * @param block The block to check
+ * @param order The order to check against
+ * @return 0 if not valid, else 1
+ */
+MALI_STATIC_INLINE int block_is_valid_buddy(mali_memory_block * block, int order);
+
+/*
+ The buddy system uses the following rules to quickly find a blocks buddy
+ and parent (block representing this block at a higher order level):
+ - Given a block with index i the blocks buddy is at index i ^ ( 1 << order)
+ - Given a block with index i the blocks parent is at i & ~(1 << order)
+*/
+
+/**
+ * Get a blocks buddy
+ * @param block The block to find the buddy for
+ * @param order The order to operate on
+ * @return Pointer to the buddy block
+ */
+MALI_STATIC_INLINE mali_memory_block * block_get_buddy(mali_memory_block * block, u32 order);
+
+/**
+ * Get a blocks parent
+ * @param block The block to find the parent for
+ * @param order The order to operate on
+ * @return Pointer to the parent block
+ */
+MALI_STATIC_INLINE mali_memory_block * block_get_parent(mali_memory_block * block, u32 order);
+
+/**
+ * Release mali memory
+ * Backend for free_big_block.
+ * Will release the mali memory described by the given block struct.
+ * @param block Memory block to free
+ */
+static void block_release(mali_memory_block * block);
+
+/* end interface implementation */
+
+/**
+ * List of all the memory banks registerd with the subsystem.
+ * Access to this list is NOT synchronized since it's only
+ * written to during module init and termination.
+ */
+static _MALI_OSK_LIST_HEAD(memory_banks_list);
+
+/*
+ The buddy memory system's mali subsystem interface implementation.
+ We currently handle module and session life-time management.
+*/
+struct mali_kernel_subsystem mali_subsystem_memory =
+{
+ mali_memory_core_initialize, /* startup */
+ mali_memory_core_terminate, /* shutdown */
+ mali_memory_core_load_complete, /* load_complete */
+ mali_memory_core_system_info_fill, /* system_info_fill */
+ mali_memory_core_session_begin, /* session_begin */
+ mali_memory_core_session_end, /* session_end */
+ NULL, /* broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+};
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_memory_id = -1;
+
+/* called during module init */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id)
+{
+ _MALI_OSK_INIT_LIST_HEAD(&memory_banks_list);
+
+ mali_subsystem_memory_id = id;
+
+ /* register our handlers */
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MEMORY, mali_memory_core_resource_memory));
+
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MMU, mali_memory_core_resource_mmu));
+
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(FPGA_FRAMEWORK, mali_memory_core_resource_fpga));
+
+ MALI_SUCCESS;
+}
+
+/* called if/when our module is unloaded */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id)
+{
+ mali_memory_bank * bank, *temp;
+
+ /* loop over all memory banks to free them */
+ /* we use the safe version since we delete the current bank in the body */
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ MALI_DEBUG_CODE(int usage_count = _mali_osk_atomic_read(&bank->num_active_allocations));
+ /*
+ Report leaked memory
+ If this happens we have a bug in our session cleanup code.
+ */
+ MALI_DEBUG_PRINT_IF(1, 0 != usage_count, ("%d allocation(s) from memory bank at 0x%X still in use\n", usage_count, bank->base_addr));
+
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+
+ _mali_osk_lock_term(bank->lock);
+
+ /* unlink from bank list */
+ _mali_osk_list_del(&bank->list);
+
+ /* release kernel resources used by the bank */
+ _mali_osk_mem_unreqregion(bank->base_addr, bank->real_size);
+
+ /* remove all resources used to represent this bank*/
+ _mali_osk_free(bank->freelist);
+ _mali_osk_free(bank->blocklist);
+
+ /* destroy the bank object itself */
+ _mali_osk_free(bank);
+ }
+
+ /* No need to de-initialize mali_subsystem_memory_id - it could only be
+ * re-initialized to the same value */
+}
+
+/* load_complete handler */
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id)
+{
+ mali_memory_bank * bank, *temp;
+
+ MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest number first) :\n"));
+
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ if ( NULL != bank->name )
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", bank->alloc_order, bank->name) );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", bank->alloc_order ) );
+ }
+ }
+ MALI_SUCCESS;
+}
+
+MALI_STATIC_INLINE u32 order_needed_for_size(u32 size, struct mali_memory_bank * bank)
+{
+ u32 order = 0;
+
+ if (0 < size)
+ {
+ for ( order = sizeof(u32)*8 - 1; ((1UL<<order) & size) == 0; --order)
+ /* nothing */;
+
+ /* check if size is pow2, if not we need increment order by one */
+ if (0 != (size & ((1UL<<order)-1))) ++order;
+ }
+
+ if ((NULL != bank) && (order < bank->min_order)) order = bank->min_order;
+ /* Not capped to max order, that doesn't make sense */
+
+ return order;
+}
+
+MALI_STATIC_INLINE u32 maximum_order_which_fits(u32 size)
+{
+ u32 order = 0;
+ u32 powsize = 1;
+ while (powsize < size)
+ {
+ powsize <<= 1;
+ if (powsize > size) break;
+ order++;
+ }
+
+ return order;
+}
+
+/* called for new MEMORY resources */
+static _mali_osk_errcode_t mali_memory_bank_register(u32 phys_base, u32 cpu_usage_adjust, u32 size, u32 flags, u32 alloc_order, const char *name)
+{
+ /* no locking performed due to function contract */
+ int i;
+ u32 left, offset;
+ mali_memory_bank * bank;
+ mali_memory_bank * bank_enum, *temp;
+
+ _mali_osk_errcode_t err;
+
+ /* Only a multiple of MIN_BLOCK_SIZE is usable */
+ u32 usable_size = size & ~(MIN_BLOCK_SIZE - 1);
+
+ /* handle zero sized banks and bank smaller than the fixed block size */
+ if (0 == usable_size)
+ {
+ MALI_PRINT(("Usable size == 0\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ /* warn for banks not a muliple of the block size */
+ MALI_DEBUG_PRINT_IF(1, usable_size != size, ("Memory bank @ 0x%X not a multiple of minimum block size. %d bytes wasted\n", phys_base, size - usable_size));
+
+ /* check against previous registrations */
+ MALI_DEBUG_CODE(
+ {
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ /* duplicate ? */
+ if (bank->base_addr == phys_base)
+ {
+ MALI_PRINT(("Duplicate registration of a memory bank at 0x%X detected\n", phys_base));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ /* overlapping ? */
+ else if (
+ ( (phys_base > bank->base_addr) && (phys_base < (bank->base_addr + bank->real_size)) ) ||
+ ( (phys_base + size) > bank->base_addr && ((phys_base + size) < (bank->base_addr + bank->real_size)) )
+ )
+ {
+ MALI_PRINT(("Overlapping memory blocks found. Memory at 0x%X overlaps with memory at 0x%X size 0x%X\n", bank->base_addr, phys_base, size));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ }
+ }
+ );
+
+ /* create an object to represent this memory bank */
+ MALI_CHECK_NON_NULL(bank = (mali_memory_bank*)_mali_osk_malloc(sizeof(mali_memory_bank)), _MALI_OSK_ERR_NOMEM);
+
+ /* init the fields */
+ _MALI_OSK_INIT_LIST_HEAD(&bank->list);
+ bank->base_addr = phys_base;
+ bank->cpu_usage_adjust = cpu_usage_adjust;
+ bank->size = usable_size;
+ bank->real_size = size;
+ bank->alloc_order = alloc_order;
+ bank->name = name;
+
+ err = _mali_osk_atomic_init(&bank->num_active_allocations, 0);
+ if (err != _MALI_OSK_ERR_OK)
+ {
+ _mali_osk_free(bank);
+ MALI_ERROR(err);
+ }
+
+ bank->used_for_flags = flags;
+ bank->min_order = order_needed_for_size(MIN_BLOCK_SIZE, NULL);
+ bank->max_order = maximum_order_which_fits(usable_size);
+ bank->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0);
+ if (NULL == bank->lock)
+ {
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+ _mali_osk_free(bank);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ bank->blocklist = _mali_osk_calloc(1, sizeof(struct mali_memory_block) * (usable_size / MIN_BLOCK_SIZE));
+ if (NULL == bank->blocklist)
+ {
+ _mali_osk_lock_term(bank->lock);
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+ _mali_osk_free(bank);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ for (i = 0; i < (usable_size / MIN_BLOCK_SIZE); i++)
+ {
+ bank->blocklist[i].bank = bank;
+ }
+
+ bank->freelist = _mali_osk_calloc(1, sizeof(_mali_osk_list_t) * (bank->max_order - bank->min_order + 1));
+ if (NULL == bank->freelist)
+ {
+ _mali_osk_lock_term(bank->lock);
+ _mali_osk_free(bank->blocklist);
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+ _mali_osk_free(bank);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ for (i = 0; i < (bank->max_order - bank->min_order + 1); i++) _MALI_OSK_INIT_LIST_HEAD(&bank->freelist[i]);
+
+ /* init slot info */
+ for (offset = 0, left = usable_size; offset < (usable_size / MIN_BLOCK_SIZE); /* updated inside the body */)
+ {
+ u32 block_order;
+ mali_memory_block * block;
+
+ /* the maximum order which fits in the remaining area */
+ block_order = maximum_order_which_fits(left);
+
+ /* find the block pointer */
+ block = &bank->blocklist[offset];
+
+ /* tag the block as being toplevel */
+ set_block_toplevel(block, block_order);
+
+ /* tag it as being free */
+ set_block_free(block, 1);
+
+ /* set the order */
+ set_block_order(block, block_order);
+
+ _mali_osk_list_addtail(&block->link, bank->freelist + (block_order - bank->min_order));
+
+ left -= (1 << block_order);
+ offset += ((1 << block_order) / MIN_BLOCK_SIZE);
+ }
+
+ /* add bank to list of banks on the system */
+ _MALI_OSK_LIST_FOREACHENTRY( bank_enum, temp, &memory_banks_list, mali_memory_bank, list )
+ {
+ if ( bank_enum->alloc_order >= alloc_order )
+ {
+ /* Found insertion point - our item must go before this one */
+ break;
+ }
+ }
+ _mali_osk_list_addtail(&bank->list, &bank_enum->list);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_memory_mmu_register(u32 type, u32 phys_base)
+{
+ /* not supported */
+ return _MALI_OSK_ERR_INVALID_FUNC;
+}
+
+void mali_memory_mmu_unregister(u32 phys_base)
+{
+ /* not supported */
+ return;
+}
+
+static mali_memory_block * mali_memory_block_get(u32 type_id, u32 minimum_size)
+{
+ mali_memory_bank * bank;
+ mali_memory_block * block = NULL;
+ u32 requested_order, current_order;
+
+ /* input validation */
+ if (0 == minimum_size)
+ {
+ /* bad size */
+ MALI_DEBUG_PRINT(2, ("Zero size block requested by mali_memory_block_get\n"));
+ return NULL;
+ }
+
+ bank = (mali_memory_bank*)type_id;
+
+ requested_order = order_needed_for_size(minimum_size, bank);
+
+ MALI_DEBUG_PRINT(4, ("For size %d we need order %d (%d)\n", minimum_size, requested_order, 1 << requested_order));
+
+ _mali_osk_lock_wait(bank->lock, _MALI_OSK_LOCKMODE_RW);
+ /* ! critical section begin */
+
+ MALI_DEBUG_PRINT(7, ("Bank 0x%x locked\n", bank));
+
+ for (current_order = requested_order; current_order <= bank->max_order; ++current_order)
+ {
+ _mali_osk_list_t * list = bank->freelist + (current_order - bank->min_order);
+ MALI_DEBUG_PRINT(7, ("Checking freelist 0x%x for order %d\n", list, current_order));
+ if (0 != _mali_osk_list_empty(list)) continue; /* empty list */
+
+ MALI_DEBUG_PRINT(7, ("Found an entry on the freelist for order %d\n", current_order));
+
+
+ block = _MALI_OSK_LIST_ENTRY(list->next, mali_memory_block, link);
+ _mali_osk_list_delinit(&block->link);
+
+ while (current_order > requested_order)
+ {
+ mali_memory_block * buddy_block;
+ MALI_DEBUG_PRINT(7, ("Splitting block 0x%x\n", block));
+ current_order--;
+ list--;
+ buddy_block = block_get_buddy(block, current_order - bank->min_order);
+ set_block_order(buddy_block, current_order);
+ set_block_free(buddy_block, 1);
+ _mali_osk_list_add(&buddy_block->link, list);
+ }
+
+ set_block_order(block, current_order);
+ set_block_free(block, 0);
+
+ /* update usage count */
+ _mali_osk_atomic_inc(&bank->num_active_allocations);
+
+ break;
+ }
+
+ /* ! critical section end */
+ _mali_osk_lock_signal(bank->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_DEBUG_PRINT(7, ("Lock released for bank 0x%x\n", bank));
+
+ MALI_DEBUG_PRINT_IF(7, NULL != block, ("Block 0x%x allocated\n", block));
+
+ return block;
+}
+
+
+static void block_release(mali_memory_block * block)
+{
+ mali_memory_bank * bank;
+ u32 current_order;
+
+ if (NULL == block) return;
+
+ bank = block->bank;
+
+ /* we're manipulating the free list, so we need to lock it */
+ _mali_osk_lock_wait(bank->lock, _MALI_OSK_LOCKMODE_RW);
+ /* ! critical section begin */
+
+ set_block_free(block, 1);
+ current_order = get_block_order(block);
+
+ while (current_order <= bank->max_order)
+ {
+ mali_memory_block * buddy_block;
+ buddy_block = block_get_buddy(block, current_order - bank->min_order);
+ if (!block_is_valid_buddy(buddy_block, current_order)) break;
+ _mali_osk_list_delinit(&buddy_block->link); /* remove from free list */
+ /* clear tracked data in both blocks */
+ set_block_order(block, 0);
+ set_block_free(block, 0);
+ set_block_order(buddy_block, 0);
+ set_block_free(buddy_block, 0);
+ /* make the parent control the new state */
+ block = block_get_parent(block, current_order - bank->min_order);
+ set_block_order(block, current_order + 1); /* merged has a higher order */
+ set_block_free(block, 1); /* mark it as free */
+ current_order++;
+ if (get_block_toplevel(block) == current_order) break; /* stop the merge if we've arrived at a toplevel block */
+ }
+
+ _mali_osk_list_add(&block->link, &bank->freelist[current_order - bank->min_order]);
+
+ /* update bank usage statistics */
+ _mali_osk_atomic_dec(&block->bank->num_active_allocations);
+
+ /* !critical section end */
+ _mali_osk_lock_signal(bank->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return;
+}
+
+MALI_STATIC_INLINE u32 block_get_offset(mali_memory_block * block)
+{
+ return block - block->bank->blocklist;
+}
+
+MALI_STATIC_INLINE u32 block_mali_addr_get(mali_memory_block * block)
+{
+ if (NULL != block) return block->bank->base_addr + MIN_BLOCK_SIZE * block_get_offset(block);
+ else return 0;
+}
+
+MALI_STATIC_INLINE u32 block_cpu_addr_get(mali_memory_block * block)
+{
+ if (NULL != block) return (block->bank->base_addr + MIN_BLOCK_SIZE * block_get_offset(block)) + block->bank->cpu_usage_adjust;
+ else return 0;
+}
+
+MALI_STATIC_INLINE u32 block_size_get(mali_memory_block * block)
+{
+ if (NULL != block) return 1 << get_block_order(block);
+ else return 0;
+}
+
+MALI_STATIC_INLINE void __user * block_mapping_get(mali_memory_block * block)
+{
+ if (NULL != block) return block->mapping;
+ else return NULL;
+}
+
+MALI_STATIC_INLINE void block_mapping_set(mali_memory_block * block, void __user * ptr)
+{
+ if (NULL != block) block->mapping = ptr;
+}
+
+MALI_STATIC_INLINE u32 block_mmap_cookie_get(mali_memory_block * block)
+{
+ if (NULL != block) return block->mmap_cookie;
+ else return 0;
+}
+
+/**
+ * Set the cookie returned via _mali_ukk_mem_mmap().
+ * @param block The memory block to set the cookie for
+ * @param cookie the cookie
+ */
+MALI_STATIC_INLINE void block_mmap_cookie_set(mali_memory_block * block, u32 cookie)
+{
+ if (NULL != block) block->mmap_cookie = cookie;
+}
+
+
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ if (NULL != *slot)
+ {
+ MALI_DEBUG_PRINT(1, ("The slot given to memory session begin already contains data"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ /* create the session data object */
+ MALI_CHECK_NON_NULL(session_data = _mali_osk_malloc(sizeof(memory_session)), _MALI_OSK_ERR_NOMEM);
+
+ /* create descriptor mapping table */
+ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+ if (NULL == session_data->descriptor_mapping)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->memory_head); /* no memory in use */
+ session_data->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0);
+ if (NULL == session_data->lock)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ *slot = session_data; /* slot will point to our data object */
+
+ MALI_SUCCESS;
+}
+
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ return;
+ }
+
+ if (NULL == *slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL memory_session found in current session object"));
+ return;
+ }
+
+ _mali_osk_lock_wait(((memory_session*)*slot)->lock, _MALI_OSK_LOCKMODE_RW);
+ session_data = (memory_session *)*slot;
+ /* clear our slot */
+ *slot = NULL;
+
+ /*
+ First free all memory still being used.
+ This can happen if the caller has leaked memory or
+ the application has crashed forcing an auto-session end.
+ */
+ if (0 == _mali_osk_list_empty(&session_data->memory_head))
+ {
+ mali_memory_block * block, * temp;
+ MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n"));
+
+ /* use the _safe version since fre_big_block removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(block, temp, &session_data->memory_head, mali_memory_block, link)
+ {
+ _mali_osk_errcode_t err;
+ _mali_uk_free_big_block_s uk_args;
+
+ MALI_DEBUG_PRINT(4, ("Freeing block 0x%x with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ block,
+ (void*)block_mali_addr_get(block),
+ block_size_get(block),
+ block_mapping_get(block))
+ );
+
+ /* free the block */
+ /** @note manual type safety check-point */
+ uk_args.ctx = mali_session_data;
+ uk_args.cookie = (u32)block->descriptor;
+ err = _mali_ukk_free_big_block_internal( mali_session_data, session_data, &uk_args );
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT_ERROR(("_mali_ukk_free_big_block_internal() failed during session termination on block with cookie==0x%X\n",
+ uk_args.cookie)
+ );
+ }
+ }
+ }
+
+ if (NULL != session_data->descriptor_mapping)
+ {
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ session_data->descriptor_mapping = NULL;
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_lock_term(session_data->lock);
+
+ /* free the session data object */
+ _mali_osk_free(session_data);
+
+ return;
+}
+
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info)
+{
+ mali_memory_bank * bank, *temp;
+ _mali_mem_info **mem_info_tail;
+
+ /* check input */
+ MALI_CHECK_NON_NULL(info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* make sure we won't leak any memory. It could also be that it's an uninitialized variable, but that would be a bug in the caller */
+ MALI_DEBUG_ASSERT(NULL == info->mem_info);
+
+ mem_info_tail = &info->mem_info;
+
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ _mali_mem_info * mem_info;
+
+ mem_info = (_mali_mem_info *)_mali_osk_calloc(1, sizeof(_mali_mem_info));
+ if (NULL == mem_info) return _MALI_OSK_ERR_NOMEM; /* memory already allocated will be freed by the caller */
+
+ /* set info */
+ mem_info->size = bank->size;
+ mem_info->flags = (_mali_bus_usage)bank->used_for_flags;
+ mem_info->maximum_order_supported = bank->max_order;
+ mem_info->identifier = (u32)bank;
+
+ /* add to system info linked list */
+ (*mem_info_tail) = mem_info;
+ mem_info_tail = &mem_info->next;
+ }
+
+ /* all OK */
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_memory(_mali_osk_resource_t * resource)
+{
+ _mali_osk_errcode_t err;
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, resource->size, resource->description))
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region %s (0x%08X - 0x%08X)\n", resource->description, resource->base, resource->base + resource->size - 1));
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* call backend */
+ err = mali_memory_bank_register(resource->base, resource->cpu_usage_adjust, resource->size, resource->flags, resource->alloc_order, resource->description);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ /* if backend refused the memory we have to release the region again */
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ MALI_ERROR(err);
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource)
+{
+ /* Not supported by the fixed block memory system */
+ MALI_DEBUG_PRINT(1, ("MMU resource not supported by non-MMU driver!\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_FUNC);
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource)
+{
+ mali_io_address mapping;
+
+ MALI_DEBUG_PRINT(5, ("FPGA framework '%s' @ (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + sizeof(u32) * 2 - 1
+ ));
+
+ mapping = _mali_osk_mem_mapioregion(resource->base + 0x1000, sizeof(u32) * 2, "fpga framework");
+ if (mapping)
+ {
+ u32 data;
+ data = _mali_osk_mem_ioread32(mapping, 0);
+ MALI_DEBUG_PRINT(2, ("FPGA framwork '%s' @ 0x%08X:\n", resource->description, resource->base));
+ MALI_DEBUG_PRINT(2, ("\tBitfile date: %d%02d%02d_%02d%02d\n",
+ (data >> 20),
+ (data >> 16) & 0xF,
+ (data >> 11) & 0x1F,
+ (data >> 6) & 0x1F,
+ (data >> 0) & 0x3F));
+ data = _mali_osk_mem_ioread32(mapping, sizeof(u32));
+ MALI_DEBUG_PRINT(2, ("\tBitfile SCCS rev: %d\n", data));
+
+ _mali_osk_mem_unmapioregion(resource->base + 0x1000, sizeof(u32) *2, mapping);
+ }
+ else MALI_DEBUG_PRINT(1, ("Failed to access FPGA framwork '%s' @ 0x%08X\n", resource->description, resource->base));
+
+ MALI_SUCCESS;
+}
+
+/* static _mali_osk_errcode_t get_big_block(void * ukk_private, struct mali_session_data * mali_session_data, void __user * argument) */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args )
+{
+ _mali_uk_mem_mmap_s args_mmap = {0, };
+ int md;
+ mali_memory_block * block;
+ _mali_osk_errcode_t err;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER( args );
+
+ MALI_DEBUG_ASSERT_POINTER( args->ctx );
+
+ /** @note manual type safety check-point */
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (!args->type_id)
+ {
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* at least min block size */
+ if (MIN_BLOCK_SIZE > args->minimum_size_requested) args->minimum_size_requested = MIN_BLOCK_SIZE;
+
+ /* perform the actual allocation */
+ block = mali_memory_block_get(args->type_id, args->minimum_size_requested);
+ if ( NULL == block )
+ {
+ /* no memory available with requested type_id */
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, block, &md))
+ {
+ block_release(block);
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+ block->descriptor = md;
+
+
+ /* fill in response */
+ args->mali_address = block_mali_addr_get(block);
+ args->block_size = block_size_get(block);
+ args->cookie = (u32)md;
+ args->flags = block->bank->used_for_flags;
+
+ /* map the block into the process' address space */
+
+ /** @note manual type safety check-point */
+ args_mmap.ukk_private = (void *)args->ukk_private;
+ args_mmap.ctx = args->ctx;
+ args_mmap.size = args->block_size;
+ args_mmap.phys_addr = block_cpu_addr_get(block);
+
+#ifndef _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+ err = _mali_ukk_mem_mmap( &args_mmap );
+#else
+ err = _mali_osk_specific_indirect_mmap( &args_mmap );
+#endif
+
+ /* check if the mapping failed */
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT(1, ("Memory mapping failed 0x%x\n", args->cpuptr));
+ /* mapping failed */
+
+ /* remove descriptor entry */
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+
+ /* free the mali memory */
+ block_release(block);
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+ }
+
+ args->cpuptr = args_mmap.mapping;
+ block_mmap_cookie_set(block, args_mmap.cookie);
+ block_mapping_set(block, args->cpuptr);
+
+ MALI_DEBUG_PRINT(2, ("Mali memory 0x%x (size %d) mapped in process memory space at 0x%x\n", (void*)args->mali_address, args->block_size, args->cpuptr));
+
+ /* track memory in use for the session */
+ _mali_osk_list_addtail(&block->link, &session_data->memory_head);
+
+ /* memory assigned to the session, memory mapped into the process' view */
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_SUCCESS;
+}
+
+/* Internal code that assumes the memory session lock is held */
+static _mali_osk_errcode_t _mali_ukk_free_big_block_internal( struct mali_session_data * mali_session_data, memory_session * session_data, _mali_uk_free_big_block_s *args)
+{
+ mali_memory_block * block = NULL;
+ _mali_osk_errcode_t err;
+ _mali_uk_mem_munmap_s args_munmap = {0,};
+
+ MALI_DEBUG_ASSERT_POINTER( mali_session_data );
+ MALI_DEBUG_ASSERT_POINTER( session_data );
+ MALI_DEBUG_ASSERT_POINTER( args );
+
+ err = mali_descriptor_mapping_get(session_data->descriptor_mapping, (int)args->cookie, (void**)&block);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release memory pages\n", (int)args->cookie));
+ MALI_ERROR(err);
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+ MALI_DEBUG_PRINT(4, ("Asked to free block 0x%x with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ block,
+ (void*)block_mali_addr_get(block),
+ block_size_get(block),
+ block_mapping_get(block))
+ );
+
+ /** @note manual type safety check-point */
+ args_munmap.ctx = (void*)mali_session_data;
+ args_munmap.mapping = block_mapping_get( block );
+ args_munmap.size = block_size_get( block );
+ args_munmap.cookie = block_mmap_cookie_get( block );
+
+#ifndef _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+ _mali_ukk_mem_munmap( &args_munmap );
+#else
+ _mali_osk_specific_indirect_munmap( &args_munmap );
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Session data 0x%x, lock 0x%x\n", session_data, &session_data->lock));
+
+ /* unlink from session usage list */
+ MALI_DEBUG_PRINT(5, ("unlink from session usage list\n"));
+ _mali_osk_list_delinit(&block->link);
+
+ /* remove descriptor entry */
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, (int)args->cookie);
+
+ /* free the mali memory */
+ block_release(block);
+ MALI_DEBUG_PRINT(5, ("Block freed\n"));
+
+ MALI_SUCCESS;
+}
+
+/* static _mali_osk_errcode_t free_big_block( struct mali_session_data * mali_session_data, void __user * argument) */
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args )
+{
+ _mali_osk_errcode_t err;
+ struct mali_session_data * mali_session_data;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER( args );
+
+ MALI_DEBUG_ASSERT_POINTER( args->ctx );
+
+ /** @note manual type safety check-point */
+ mali_session_data = (struct mali_session_data *)args->ctx;
+
+ /* Must always verify this, since these are provided by the user */
+ MALI_CHECK_NON_NULL(mali_session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /** @note this has been separated out so that the session_end handler can call this while it has the memory_session lock held */
+ err = _mali_ukk_free_big_block_internal( mali_session_data, session_data, args );
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return err;
+}
+
+MALI_STATIC_INLINE u32 get_block_free(mali_memory_block * block)
+{
+ return (block->misc >> MISC_SHIFT_FREE) & MISC_MASK_FREE;
+}
+
+MALI_STATIC_INLINE void set_block_free(mali_memory_block * block, int state)
+{
+ if (state) block->misc |= (MISC_MASK_FREE << MISC_SHIFT_FREE);
+ else block->misc &= ~(MISC_MASK_FREE << MISC_SHIFT_FREE);
+}
+
+MALI_STATIC_INLINE void set_block_order(mali_memory_block * block, u32 order)
+{
+ block->misc &= ~(MISC_MASK_ORDER << MISC_SHIFT_ORDER);
+ block->misc |= ((order & MISC_MASK_ORDER) << MISC_SHIFT_ORDER);
+}
+
+MALI_STATIC_INLINE u32 get_block_order(mali_memory_block * block)
+{
+ return (block->misc >> MISC_SHIFT_ORDER) & MISC_MASK_ORDER;
+}
+
+MALI_STATIC_INLINE void set_block_toplevel(mali_memory_block * block, u32 level)
+{
+ block->misc |= ((level & MISC_MASK_TOPLEVEL) << MISC_SHIFT_TOPLEVEL);
+}
+
+MALI_STATIC_INLINE u32 get_block_toplevel(mali_memory_block * block)
+{
+ return (block->misc >> MISC_SHIFT_TOPLEVEL) & MISC_MASK_TOPLEVEL;
+}
+
+MALI_STATIC_INLINE int block_is_valid_buddy(mali_memory_block * block, int order)
+{
+ if (get_block_free(block) && (get_block_order(block) == order)) return 1;
+ else return 0;
+}
+
+MALI_STATIC_INLINE mali_memory_block * block_get_buddy(mali_memory_block * block, u32 order)
+{
+ return block + ( (block_get_offset(block) ^ (1 << order)) - block_get_offset(block));
+}
+
+MALI_STATIC_INLINE mali_memory_block * block_get_parent(mali_memory_block * block, u32 order)
+{
+ return block + ((block_get_offset(block) & ~(1 << order)) - block_get_offset(block));
+}
+
+/* This handler registered to mali_mmap for non-MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args )
+{
+ _mali_osk_errcode_t ret;
+ struct mali_session_data * mali_session_data;
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ /* Unpack arguments */
+ mali_session_data = (struct mali_session_data *)args->ctx;
+
+ if (NULL == mali_session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: mali_session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ MALI_DEBUG_ASSERT( mali_subsystem_memory_id >= 0 );
+
+ session_data = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+ /* validate input */
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); }
+
+ descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) );
+ if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); }
+
+ descriptor->size = args->size;
+ descriptor->mali_address = args->phys_addr;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE;
+
+ ret = _mali_osk_mem_mapregion_init( descriptor );
+ if ( _MALI_OSK_ERR_OK != ret )
+ {
+ MALI_DEBUG_PRINT(3, ("_mali_osk_mem_mapregion_init() failed\n"));
+ _mali_osk_free(descriptor);
+ MALI_ERROR(ret);
+ }
+
+ ret = _mali_osk_mem_mapregion_map( descriptor, 0, &descriptor->mali_address, descriptor->size );
+ if ( _MALI_OSK_ERR_OK != ret )
+ {
+ MALI_DEBUG_PRINT(3, ("_mali_osk_mem_mapregion_map() failed\n"));
+ _mali_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+ MALI_ERROR(ret);
+ }
+
+ args->mapping = descriptor->mapping;
+
+ /**
+ * @note we do not require use of mali_descriptor_mapping here:
+ * the cookie gets stored in the mali_memory_block struct, which itself is
+ * protected by mali_descriptor_mapping, and so this cookie never leaves
+ * kernel space (on any OS).
+ *
+ * In the MMU case, we must use a mali_descriptor_mapping, since on _some_
+ * OSs, the cookie leaves kernel space.
+ */
+ args->cookie = (u32)descriptor;
+ MALI_SUCCESS;
+}
+
+/* This handler registered to mali_munmap for non-MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args )
+{
+ mali_memory_allocation * descriptor;
+
+ /** see note in _mali_ukk_mem_mmap() - no need to use descriptor mapping */
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* args->mapping and args->size are also discarded. They are only necessary for certain do_munmap implementations. However, they could be used to check the descriptor at this point. */
+ _mali_osk_mem_mapregion_unmap( descriptor, 0, descriptor->size, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_mem_mapregion_term( descriptor );
+
+ _mali_osk_free(descriptor);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.c
new file mode 100644
index 00000000000..7a48e27ede7
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.c
@@ -0,0 +1,2924 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_ioctl.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_kernel_mem_mmu.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_kernel_mem_os.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_kernel_core.h"
+
+#if defined USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+#include "ump_kernel_interface.h"
+#endif
+
+/* kernel side OS functions and user-kernel interface */
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+/**
+ * Linux kernel version has marked SA_SHIRQ as deprecated, IRQF_SHARED should be used.
+ * This is to handle older kernels which haven't done this swap.
+ */
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif /* IRQF_SHARED */
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+/**
+ * Used to disallow more than one core to run a MMU at the same time
+ *
+ * @note This value is hardwired into some systems' configuration files,
+ * which \em might not be a header file (e.g. some external data configuration
+ * file). Therefore, if this value is modified, its occurance must be
+ * \b manually checked for in the entire driver source tree.
+ */
+#define MALI_MMU_DISALLOW_PARALLELL_WORK_OF_MALI_CORES 1
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags
+{
+ MALI_MMU_FLAGS_PRESENT = 0x01,
+ MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+ MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+ MALI_MMU_FLAGS_MASK = 0x07
+} mali_mmu_entry_flags;
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+ MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+ MALI_MMU_REGISTER_STATUS = 0x0001, /**< Status of the MMU */
+ MALI_MMU_REGISTER_COMMAND = 0x0002, /**< Command register, used to control the MMU */
+ MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x0003, /**< Logical address of the last page fault */
+ MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x004, /**< Used to invalidate the mapping of a single page from the MMU */
+ MALI_MMU_REGISTER_INT_RAWSTAT = 0x0005, /**< Raw interrupt status, all interrupts visible */
+ MALI_MMU_REGISTER_INT_CLEAR = 0x0006, /**< Indicate to the MMU that the interrupt has been received */
+ MALI_MMU_REGISTER_INT_MASK = 0x0007, /**< Enable/disable types of interrupts */
+ MALI_MMU_REGISTER_INT_STATUS = 0x0008 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt
+{
+ MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+ MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command
+{
+ MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+ MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+ MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
+ MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+ MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+ MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+ MALI_MMU_COMMAND_SOFT_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+typedef enum mali_mmu_status_bits
+{
+ MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
+ MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
+ MALI_MMU_STATUS_BIT_IDLE = 1 << 3,
+ MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+} mali_mmu_status_bits;
+
+/**
+ * Defintion of the type used to represent memory used by a session.
+ * Containts the pointer to the huge user space virtual memory area
+ * used to access the Mali memory.
+ */
+typedef struct memory_session
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting the vm manipulation */
+
+ u32 mali_base_address; /**< Mali virtual memory area used by this session */
+ mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+
+ u32 page_directory; /**< Physical address of the memory session's page directory */
+
+ mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+ mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+ u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+
+ _mali_osk_list_t active_mmus; /**< The MMUs in this session, in increasing order of ID (so we can lock them in the correct order when necessary) */
+ _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+} memory_session;
+
+typedef struct mali_kernel_memory_mmu_idle_callback
+{
+ _mali_osk_list_t link;
+ void (*callback)(void*);
+ void * callback_argument;
+} mali_kernel_memory_mmu_idle_callback;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+typedef struct mali_kernel_memory_mmu
+{
+ int id; /**< ID of the MMU, no duplicate IDs may exist on the system */
+ const char * description; /**< Description text received from the resource manager to help identify the resource for people */
+ int irq_nr; /**< IRQ number */
+ u32 base; /**< Physical address of the registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ u32 mapping_size; /**< Size of registers in bytes */
+ _mali_osk_list_t list; /**< Used to link multiple MMU's into a list */
+ _mali_osk_irq_t *irq;
+ u32 flags; /**< Used to store if there is something special with this mmu. */
+
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the usage fields */
+ /* usage fields */
+ memory_session * active_session; /**< Active session, NULL if no session is active */
+ u32 usage_count; /**< Number of nested activations of the active session */
+ _mali_osk_list_t callbacks; /**< Callback registered for MMU idle notification */
+
+ int in_page_fault_handler;
+
+ _mali_osk_list_t session_link;
+} mali_kernel_memory_mmu;
+
+typedef struct dedicated_memory_info
+{
+ u32 base;
+ u32 size;
+ struct dedicated_memory_info * next;
+} dedicated_memory_info;
+
+/* types used for external_memory and ump_memory physical memory allocators, which are using the mali_allocation_engine */
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+typedef struct ump_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size_allocated;
+ ump_dd_handle ump_mem;
+} ump_mem_allocation ;
+#endif
+
+typedef struct external_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size;
+} external_mem_allocation;
+
+/*
+ Subsystem interface implementation
+*/
+/**
+ * Fixed block memory subsystem startup function.
+ * Called by the driver core when the driver is loaded.
+ * Registers the memory systems ioctl handler, resource handlers and memory map function with the core.
+ *
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id);
+
+/**
+ * Fixed block memory subsystem shutdown function.
+ * Called by the driver core when the driver is unloaded.
+ * Cleans up
+ * @param id Identifier assigned by the core to the memory subsystem
+ */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id);
+
+/**
+ * MMU Memory load complete notification function.
+ * Called by the driver core when all drivers have loaded and all resources has been registered
+ * Builds the memory overall memory list
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id);
+
+/**
+ * Fixed block memory subsystem session begin notification
+ * Called by the core when a new session to the driver is started.
+ * Creates a memory session object and sets it as the subsystem slot data for this session
+ * @param slot Pointer to the slot to use for storing per-session data
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+
+/**
+ * Fixed block memory subsystem session end notification
+ * Called by the core when a session to the driver has ended.
+ * Cleans up per session data, which includes checking and fixing memory leaks
+ *
+ * @param slot Pointer to the slot to use for storing per-session data
+ */
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+
+/**
+ * Fixed block memory subsystem system info filler
+ * Called by the core when a system info update is needed
+ * We fill in info about all the memory types we have
+ * @param info Pointer to system info struct to update
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info);
+
+/* our registered resource handlers */
+
+/**
+ * Fixed block memory subsystem's notification handler for MMU resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each mmu described in the active architecture's config.h file.
+ * @param resource The resource to handle (type MMU)
+ * @return 0 if the MMU was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource);
+
+/**
+ * Fixed block memory subsystem's notification handler for FPGA_FRAMEWORK resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each fpga framework described in the active architecture's config.h file.
+ * @param resource The resource to handle (type FPGA_FRAMEWORK)
+ * @return 0 if the FPGA framework was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource);
+
+
+static _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource);
+static _mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource);
+
+/**
+ * @brief Internal function for unmapping memory
+ *
+ * Worker function for unmapping memory from a user-process. We assume that the
+ * session/descriptor's lock was obtained before entry. For example, the
+ * wrapper _mali_ukk_mem_munmap() will lock the descriptor, then call this
+ * function to do the actual unmapping. mali_memory_core_session_end() could
+ * also call this directly (depending on compilation options), having locked
+ * the descriptor.
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+static void _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args );
+
+/**
+ * The MMU interrupt handler
+ * Upper half of the MMU interrupt processing.
+ * Called by the kernel when the MMU has triggered an interrupt.
+ * The interrupt function supports IRQ sharing. So it'll probe the MMU in question
+ * @param irq The irq number (not used)
+ * @param dev_id Points to the MMU object being handled
+ * @param regs Registers of interrupted process (not used)
+ * @return Standard Linux interrupt result.
+ * Subset used by the driver is IRQ_HANDLED processed
+ * IRQ_NONE Not processed
+ */
+static _mali_osk_errcode_t mali_kernel_memory_mmu_interrupt_handler_upper_half(void * data);
+
+/**
+ * The MMU reset hander
+ * Bottom half of the MMU interrupt processing for page faults and bus errors
+ * @param work The item to operate on, NULL in our case
+ */
+static void mali_kernel_memory_mmu_interrupt_handler_bottom_half ( void *data );
+
+/**
+ * Read MMU register value
+ * Reads the contents of the specified register.
+ * @param unit The MMU to read from
+ * @param reg The register to read
+ * @return The contents of the register
+ */
+static u32 mali_mmu_register_read(mali_kernel_memory_mmu * unit, mali_mmu_register reg);
+
+/**
+ * Write to a MMU register
+ * Writes the given value to the specified register
+ * @param unit The MMU to write to
+ * @param reg The register to write to
+ * @param val The value to write to the register
+ */
+static void mali_mmu_register_write(mali_kernel_memory_mmu * unit, mali_mmu_register reg, u32 val);
+
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+static void ump_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0*/
+
+
+static void external_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+
+
+
+
+/* nop functions */
+
+/* mali address manager needs to allocate page tables on allocate, write to page table(s) on map, write to page table(s) and release page tables on release */
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor); /* validates the range, allocates memory for the page tables if needed */
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+static void mali_address_manager_release(mali_memory_allocation * descriptor);
+
+static void mali_mmu_activate_address_space(mali_kernel_memory_mmu * mmu, u32 page_directory);
+
+_mali_osk_errcode_t mali_mmu_page_table_cache_create(void);
+void mali_mmu_page_table_cache_destroy(void);
+
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping);
+void mali_mmu_release_table_page(u32 pa);
+
+static _mali_osk_errcode_t mali_allocate_empty_page_directory(void);
+
+static void mali_free_empty_page_directory(void);
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+static _mali_osk_errcode_t mali_allocate_fault_flush_pages(void);
+
+static void mali_free_fault_flush_pages(void);
+
+static void mali_mmu_probe_irq_trigger(mali_kernel_memory_mmu * mmu);
+static _mali_osk_errcode_t mali_mmu_probe_irq_acknowledge(mali_kernel_memory_mmu * mmu);
+
+/* MMU variables */
+
+typedef struct mali_mmu_page_table_allocation
+{
+ _mali_osk_list_t list;
+ u32 * usage_map;
+ u32 usage_count;
+ u32 num_pages;
+ mali_page_table_block pages;
+} mali_mmu_page_table_allocation;
+
+typedef struct mali_mmu_page_table_allocations
+{
+ _mali_osk_lock_t *lock;
+ _mali_osk_list_t partial;
+ _mali_osk_list_t full;
+ /* we never hold on to a empty allocation */
+} mali_mmu_page_table_allocations;
+
+/* Head of the list of MMUs */
+static _MALI_OSK_LIST_HEAD(mmu_head);
+
+/* the mmu page table cache */
+static struct mali_mmu_page_table_allocations page_table_cache;
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
+static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
+static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static u32 mali_empty_page_directory = MALI_INVALID_PAGE;
+
+/*
+ The fixed memory system's mali subsystem interface implementation.
+ We currently handle module and session life-time management.
+*/
+struct mali_kernel_subsystem mali_subsystem_memory =
+{
+ mali_memory_core_initialize, /* startup */
+ mali_memory_core_terminate, /* shutdown */
+ mali_memory_core_load_complete, /* load_complete */
+ mali_memory_core_system_info_fill, /* system_info_fill */
+ mali_memory_core_session_begin, /* session_begin */
+ mali_memory_core_session_end, /* session_end */
+ NULL, /* broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+};
+
+static mali_kernel_mem_address_manager mali_address_manager =
+{
+ mali_address_manager_allocate, /* allocate */
+ mali_address_manager_release, /* release */
+ mali_address_manager_map, /* map_physical */
+ NULL /* unmap_physical not present*/
+};
+
+static mali_kernel_mem_address_manager process_address_manager =
+{
+ _mali_osk_mem_mapregion_init, /* allocate */
+ _mali_osk_mem_mapregion_term, /* release */
+ _mali_osk_mem_mapregion_map, /* map_physical */
+ _mali_osk_mem_mapregion_unmap /* unmap_physical */
+};
+
+static mali_allocation_engine memory_engine = NULL;
+static mali_physical_memory_allocator * physical_memory_allocators = NULL;
+
+static dedicated_memory_info * mem_region_registrations = NULL;
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_memory_id = (mali_kernel_subsystem_identifier)-1;
+
+/* called during module init */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id)
+{
+ MALI_DEBUG_PRINT(2, ("MMU memory system initializing\n"));
+
+ /* save our subsystem id for later for use in slot lookup during session activation */
+ mali_subsystem_memory_id = id;
+
+ _MALI_OSK_INIT_LIST_HEAD(&mmu_head);
+
+ MALI_CHECK_NO_ERROR( mali_mmu_page_table_cache_create() );
+
+ /* register our handlers */
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(MMU, mali_memory_core_resource_mmu) );
+
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(FPGA_FRAMEWORK, mali_memory_core_resource_fpga) );
+
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(MEMORY, mali_memory_core_resource_dedicated_memory) );
+
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(OS_MEMORY, mali_memory_core_resource_os_memory) );
+
+ memory_engine = mali_allocation_engine_create(&mali_address_manager, &process_address_manager);
+ MALI_CHECK_NON_NULL( memory_engine, _MALI_OSK_ERR_FAULT);
+
+ MALI_SUCCESS;
+}
+
+/* called if/when our module is unloaded */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_memory_mmu * mmu, *temp_mmu;
+
+ MALI_DEBUG_PRINT(2, ("MMU memory system terminating\n"));
+
+ /* loop over all MMU units and shut them down */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &mmu_head, mali_kernel_memory_mmu, list)
+ {
+ /* reset to defaults */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+
+ /* unregister the irq */
+ _mali_osk_irq_term(mmu->irq);
+
+ /* remove from the list of MMU's on the system */
+ _mali_osk_list_del(&mmu->list);
+
+ /* release resources */
+ _mali_osk_mem_unmapioregion(mmu->base, mmu->mapping_size, mmu->mapped_registers);
+ _mali_osk_mem_unreqregion(mmu->base, mmu->mapping_size);
+ _mali_osk_free(mmu);
+ }
+
+ /* free global helper pages */
+ mali_free_empty_page_directory();
+ mali_free_fault_flush_pages();
+
+ /* destroy the page table cache before shutting down backends in case we have a page table leak to report */
+ mali_mmu_page_table_cache_destroy();
+
+ while ( NULL != mem_region_registrations)
+ {
+ dedicated_memory_info * m;
+ m = mem_region_registrations;
+ mem_region_registrations = m->next;
+ _mali_osk_mem_unreqregion(m->base, m->size);
+ _mali_osk_free(m);
+ }
+
+ while ( NULL != physical_memory_allocators)
+ {
+ mali_physical_memory_allocator * m;
+ m = physical_memory_allocators;
+ physical_memory_allocators = m->next;
+ m->destroy(m);
+ }
+
+ if (NULL != memory_engine)
+ {
+ mali_allocation_engine_destroy(memory_engine);
+ memory_engine = NULL;
+ }
+
+}
+
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ memory_session * session_data;
+ _mali_osk_errcode_t err;
+ int i;
+ mali_io_address pd_mapped;
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ if (NULL != *slot)
+ {
+ MALI_DEBUG_PRINT(1, ("The slot given to memory session begin already contains data"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_DEBUG_PRINT(2, ("MMU session begin\n"));
+
+ /* create the session data object */
+ session_data = _mali_osk_calloc(1, sizeof(memory_session));
+ MALI_CHECK_NON_NULL( session_data, _MALI_OSK_ERR_NOMEM );
+
+ /* create descriptor mapping table */
+ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+ if (NULL == session_data->descriptor_mapping)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ err = mali_mmu_get_table_page(&session_data->page_directory, &pd_mapped);
+
+ session_data->page_directory_mapped = pd_mapped;
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ _mali_osk_free(session_data);
+ MALI_ERROR(err);
+ }
+ MALI_DEBUG_ASSERT_POINTER( session_data->page_directory_mapped );
+
+ MALI_DEBUG_PRINT(2, ("Page directory for session 0x%x placed at physical address 0x%08X\n", mali_session_data, session_data->page_directory));
+
+ for (i = 0; i < MALI_MMU_PAGE_SIZE/4; i++)
+ {
+ /* mark each page table as not present */
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, sizeof(u32) * i, 0);
+ }
+
+ /* page_table_mapped[] is already set to NULL by _mali_osk_calloc call */
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->active_mmus);
+ session_data->lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 128);
+ if (NULL == session_data->lock)
+ {
+ mali_mmu_release_table_page(session_data->page_directory);
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Init the session's memory allocation list */
+ _MALI_OSK_INIT_LIST_HEAD( &session_data->memory_head );
+
+ *slot = session_data; /* slot will point to our data object */
+ MALI_DEBUG_PRINT(2, ("MMU session begin: success\n"));
+ MALI_SUCCESS;
+}
+
+static void descriptor_table_cleanup_callback(int descriptor_id, void* map_target)
+{
+ mali_memory_allocation * descriptor;
+
+ descriptor = (mali_memory_allocation*)map_target;
+
+ MALI_DEBUG_PRINT(1, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target));
+ MALI_DEBUG_ASSERT(descriptor);
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+}
+
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ memory_session * session_data;
+ int i;
+ const int num_page_table_entries = sizeof(session_data->page_entries_mapped) / sizeof(session_data->page_entries_mapped[0]);
+
+ MALI_DEBUG_PRINT(2, ("MMU session end\n"));
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ return;
+ }
+
+ session_data = (memory_session *)*slot;
+
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+ return;
+ }
+ /* Lock the session so we can modify the memory list */
+ _mali_osk_lock_wait( session_data->lock, _MALI_OSK_LOCKMODE_RW );
+ /* Noninterruptable spinlock type, so must always have locked. Checking should've been done in OSK function. */
+
+#ifndef MALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP
+#if _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+#error Indirect MMAP specified, but UKK does not have implicit MMAP cleanup. Current implementation does not handle this.
+#else
+
+ /* Free all memory engine allocations */
+ if (0 == _mali_osk_list_empty(&session_data->memory_head))
+ {
+ mali_memory_allocation *descriptor;
+ mali_memory_allocation *temp;
+ _mali_uk_mem_munmap_s unmap_args;
+
+ MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n"));
+
+ unmap_args.ctx = mali_session_data;
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->memory_head, mali_memory_allocation, list)
+ {
+ MALI_DEBUG_PRINT(4, ("Freeing block with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ descriptor->mali_address, descriptor->size, descriptor->size, descriptor->mapping)
+ );
+ /* ASSERT that the descriptor's lock references the correct thing */
+ MALI_DEBUG_ASSERT( descriptor->lock == session_data->lock );
+ /* Therefore, we have already locked the descriptor */
+
+ unmap_args.size = descriptor->size;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.cookie = (u32)descriptor;
+
+ /*
+ * This removes the descriptor from the list, and frees the descriptor
+ *
+ * Does not handle the _MALI_OSK_SPECIFIC_INDIRECT_MMAP case, since
+ * the only OS we are aware of that requires indirect MMAP also has
+ * implicit mmap cleanup.
+ */
+ _mali_ukk_mem_munmap_internal( &unmap_args );
+ }
+ }
+
+ /* Assert that we really did free everything */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty(&session_data->memory_head) );
+#endif /* _MALI_OSK_SPECIFIC_INDIRECT_MMAP */
+#endif /* MALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP */
+
+ if (NULL != session_data->descriptor_mapping)
+ {
+ mali_descriptor_mapping_call_for_each(session_data->descriptor_mapping, descriptor_table_cleanup_callback);
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ session_data->descriptor_mapping = NULL;
+ }
+
+ for (i = 0; i < num_page_table_entries; i++)
+ {
+ /* free PTE memory */
+ if (session_data->page_directory_mapped && (_mali_osk_mem_ioread32(session_data->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT))
+ {
+ mali_mmu_release_table_page( _mali_osk_mem_ioread32(session_data->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), 0);
+ }
+ }
+
+ if (MALI_INVALID_PAGE != session_data->page_directory)
+ {
+ mali_mmu_release_table_page(session_data->page_directory);
+ session_data->page_directory = MALI_INVALID_PAGE;
+ }
+
+ _mali_osk_lock_signal( session_data->lock, _MALI_OSK_LOCKMODE_RW );
+
+ /**
+ * @note Could the VMA close handler mean that we use the session data after it was freed?
+ * In which case, would need to refcount the session data, and free on VMA close
+ */
+
+ /* Free the lock */
+ _mali_osk_lock_term( session_data->lock );
+ /* free the session data object */
+ _mali_osk_free(session_data);
+
+ /* clear our slot */
+ *slot = NULL;
+
+ return;
+}
+
+static _mali_osk_errcode_t mali_allocate_empty_page_directory(void)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address mapping;
+
+ MALI_CHECK_NO_ERROR(mali_mmu_get_table_page(&mali_empty_page_directory, &mapping));
+
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ err = fill_page(mapping, 0);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ mali_mmu_release_table_page(mali_empty_page_directory);
+ mali_empty_page_directory = MALI_INVALID_PAGE;
+ }
+ return err;
+}
+
+static void mali_free_empty_page_directory(void)
+{
+ if (MALI_INVALID_PAGE != mali_empty_page_directory)
+ {
+ mali_mmu_release_table_page(mali_empty_page_directory);
+ mali_empty_page_directory = MALI_INVALID_PAGE;
+ }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+ int i;
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ for(i = 0; i < MALI_MMU_PAGE_SIZE/4; i++)
+ {
+ _mali_osk_mem_iowrite32( mapping, i * sizeof(u32), data);
+ }
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_allocate_fault_flush_pages(void)
+{
+ _mali_osk_errcode_t err;
+
+ err = mali_mmu_get_table_page(&mali_page_fault_flush_data_page, &mali_page_fault_flush_data_page_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(&mali_page_fault_flush_page_table, &mali_page_fault_flush_page_table_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(&mali_page_fault_flush_page_directory, &mali_page_fault_flush_page_directory_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ fill_page(mali_page_fault_flush_data_page_mapping, 0);
+ fill_page(mali_page_fault_flush_page_table_mapping, mali_page_fault_flush_data_page | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+ fill_page(mali_page_fault_flush_page_directory_mapping, mali_page_fault_flush_page_table | MALI_MMU_FLAGS_PRESENT);
+ MALI_SUCCESS;
+ }
+ mali_mmu_release_table_page(mali_page_fault_flush_page_table);
+ mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+ mali_page_fault_flush_page_table_mapping = NULL;
+ }
+ mali_mmu_release_table_page(mali_page_fault_flush_data_page);
+ mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+ mali_page_fault_flush_data_page_mapping = NULL;
+ }
+ MALI_ERROR(err);
+}
+
+static void mali_free_fault_flush_pages(void)
+{
+ if (MALI_INVALID_PAGE != mali_page_fault_flush_page_directory)
+ {
+ mali_mmu_release_table_page(mali_page_fault_flush_page_directory);
+ mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != mali_page_fault_flush_page_table)
+ {
+ mali_mmu_release_table_page(mali_page_fault_flush_page_table);
+ mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != mali_page_fault_flush_data_page)
+ {
+ mali_mmu_release_table_page(mali_page_fault_flush_data_page);
+ mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+ }
+}
+
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+
+ /* Report the allocators */
+ mali_allocation_engine_report_allocators( physical_memory_allocators );
+
+ /* allocate the helper pages */
+ MALI_CHECK_NO_ERROR( mali_allocate_empty_page_directory() );
+ if (_MALI_OSK_ERR_OK != mali_allocate_fault_flush_pages())
+ {
+ mali_free_empty_page_directory();
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* activate the empty page directory on all MMU's */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &mmu_head, mali_kernel_memory_mmu, list)
+ {
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMUs activated\n"));
+ /* the MMU system is now active */
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info)
+{
+ _mali_mem_info * mem_info;
+
+ /* make sure we won't leak any memory. It could also be that it's an uninitialized variable, but that would be a bug in the caller */
+ MALI_DEBUG_ASSERT(NULL == info->mem_info);
+
+ info->has_mmu = 1;
+
+ mem_info = _mali_osk_calloc(1,sizeof(_mali_mem_info));
+ MALI_CHECK_NON_NULL( mem_info, _MALI_OSK_ERR_NOMEM );
+
+ mem_info->size = 2048UL * 1024UL * 1024UL;
+ mem_info->maximum_order_supported = 30;
+ mem_info->flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE;
+ mem_info->identifier = 0;
+
+ info->mem_info = mem_info;
+
+ /* all OK */
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource)
+{
+ mali_kernel_memory_mmu * mmu;
+
+ MALI_DEBUG_PRINT(4, ("MMU '%s' @ (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + MALI_MMU_REGISTERS_SIZE - 1
+ ));
+
+ if (NULL != mali_memory_core_mmu_lookup(resource->mmu_id))
+ {
+ MALI_DEBUG_PRINT(1, ("Duplicate MMU ids found. The id %d is already in use\n", resource->mmu_id));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, MALI_MMU_REGISTERS_SIZE, resource->description))
+ {
+ /* specified addresses are already in used by another driver / the kernel */
+ MALI_DEBUG_PRINT(
+ 1, ("Failed to request MMU '%s' register address space at (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + MALI_MMU_REGISTERS_SIZE - 1
+ ));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mmu = _mali_osk_calloc(1, sizeof(mali_kernel_memory_mmu));
+
+ if (NULL == mmu)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate memory for handling a MMU unit"));
+ _mali_osk_mem_unreqregion(resource->base, MALI_MMU_REGISTERS_SIZE);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* basic setup */
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->list);
+
+ mmu->id = resource->mmu_id;
+ mmu->irq_nr = resource->irq;
+ mmu->flags = resource->flags;
+ mmu->base = resource->base;
+ mmu->mapping_size = MALI_MMU_REGISTERS_SIZE;
+ mmu->description = resource->description; /* no need to copy */
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->callbacks);
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->session_link);
+ mmu->in_page_fault_handler = 0;
+
+ mmu->lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 127-mmu->id);
+ if (NULL == mmu->lock)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create mmu lock\n"));
+ _mali_osk_mem_unreqregion(mmu->base, mmu->mapping_size);
+ _mali_osk_free(mmu);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* map the registers */
+ mmu->mapped_registers = _mali_osk_mem_mapioregion( mmu->base, mmu->mapping_size, mmu->description );
+ if (NULL == mmu->mapped_registers)
+ {
+ /* failed to map the registers */
+ MALI_DEBUG_PRINT(1, ("Failed to map MMU registers at 0x%08X\n", mmu->base));
+ _mali_osk_lock_term(mmu->lock);
+ _mali_osk_mem_unreqregion(mmu->base, MALI_MMU_REGISTERS_SIZE);
+ _mali_osk_free(mmu);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMU '%s' @ (0x%08X - 0x%08X) mapped to 0x%08X\n",
+ resource->description, resource->base, resource->base + MALI_MMU_REGISTERS_SIZE - 1, mmu->mapped_registers
+ ));
+
+ /* setup MMU interrupt mask */
+ /* set all values to known defaults */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* setup MMU page directory pointer */
+ /* The mali_page_directory pointer is guaranteed to be 4kb aligned because we've used get_zeroed_page to accquire it */
+ /* convert the kernel virtual address into a physical address and set */
+
+ /* add to our list of MMU's */
+ _mali_osk_list_addtail(&mmu->list, &mmu_head);
+
+ mmu->irq = _mali_osk_irq_init(
+ mmu->irq_nr,
+ mali_kernel_memory_mmu_interrupt_handler_upper_half,
+ mali_kernel_memory_mmu_interrupt_handler_bottom_half,
+ (_mali_osk_irq_trigger_t)mali_mmu_probe_irq_trigger,
+ (_mali_osk_irq_ack_t)mali_mmu_probe_irq_acknowledge,
+ mmu,
+ "mali_mmu_irq_handlers"
+ );
+ if (NULL == mmu->irq)
+ {
+ _mali_osk_list_del(&mmu->list);
+ _mali_osk_lock_term(mmu->lock);
+ _mali_osk_mem_unmapioregion( mmu->base, mmu->mapping_size, mmu->mapped_registers );
+ _mali_osk_mem_unreqregion(resource->base, MALI_MMU_REGISTERS_SIZE);
+ _mali_osk_free(mmu);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* set to a known state */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+
+ MALI_DEBUG_PRINT(2, ("MMU registered\n"));
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource)
+{
+ mali_io_address mapping;
+
+ MALI_DEBUG_PRINT(5, ("FPGA framework '%s' @ (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + sizeof(u32) * 2 - 1
+ ));
+
+ mapping = _mali_osk_mem_mapioregion(resource->base + 0x1000, sizeof(u32) * 2, "fpga framework");
+ if (mapping)
+ {
+ MALI_DEBUG_CODE(u32 data = )
+ _mali_osk_mem_ioread32(mapping, 0);
+ MALI_DEBUG_PRINT(2, ("FPGA framwork '%s' @ 0x%08X:\n", resource->description, resource->base));
+ MALI_DEBUG_PRINT(2, ("\tBitfile date: %d%02d%02d_%02d%02d\n",
+ (data >> 20),
+ (data >> 16) & 0xF,
+ (data >> 11) & 0x1F,
+ (data >> 6) & 0x1F,
+ (data >> 0) & 0x3F));
+ MALI_DEBUG_CODE(data = )
+ _mali_osk_mem_ioread32(mapping, sizeof(u32));
+ MALI_DEBUG_PRINT(2, ("\tBitfile SCCS rev: %d\n", data));
+
+ _mali_osk_mem_unmapioregion(resource->base + 0x1000, sizeof(u32) *2, mapping);
+ }
+ else MALI_DEBUG_PRINT(1, ("Failed to access FPGA framwork '%s' @ 0x%08X\n", resource->description, resource->base));
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+
+ u32 alloc_order = resource->alloc_order;
+
+ allocator = mali_os_allocator_create(resource->size, resource->cpu_usage_adjust, resource->description);
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create OS memory allocator\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ allocator->alloc_order = alloc_order;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while ( NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+ dedicated_memory_info * cleanup_data;
+
+ u32 alloc_order = resource->alloc_order;
+
+ /* do the lowlevel linux operation first */
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, resource->size, resource->description))
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region %s (0x%08X - 0x%08X)\n", resource->description, resource->base, resource->base + resource->size - 1));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* create generic block allocator object to handle it */
+ allocator = mali_block_allocator_create(resource->base, resource->cpu_usage_adjust, resource->size, resource->description );
+
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* save lowlevel cleanup info */
+ allocator->alloc_order = alloc_order;
+
+ cleanup_data = _mali_osk_malloc(sizeof(dedicated_memory_info));
+
+ if (NULL == cleanup_data)
+ {
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ allocator->destroy(allocator);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ cleanup_data->base = resource->base;
+ cleanup_data->size = resource->size;
+
+ cleanup_data->next = mem_region_registrations;
+ mem_region_registrations = cleanup_data;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while ( NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_kernel_memory_mmu_interrupt_handler_upper_half(void * data)
+{
+ mali_kernel_memory_mmu * mmu;
+ u32 int_stat;
+
+ if (mali_benchmark) MALI_SUCCESS;
+
+ mmu = (mali_kernel_memory_mmu *)data;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ /* check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
+ int_stat = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_STATUS);
+ if (0 == int_stat)
+ {
+ MALI_DEBUG_PRINT(5, ("Ignoring shared interrupt\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT); /* no bits set, we are sharing the IRQ line and someone else caused the interrupt */
+ }
+
+ MALI_DEBUG_PRINT(1, ("mali_kernel_memory_mmu_interrupt_handler_upper_half\n"));
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, 0);
+
+ mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS);
+
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ MALI_PRINT(("Page fault on %s\n", mmu->description));
+
+ _mali_osk_irq_schedulework(mmu->irq);
+ }
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ MALI_PRINT(("Bus read error on %s\n", mmu->description));
+ /* clear interrupt flag */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* reenable it */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_MASK) | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ }
+
+ MALI_SUCCESS;
+}
+
+
+static void mali_kernel_mmu_bus_reset(mali_kernel_memory_mmu * mmu)
+{
+
+#if defined(USING_MALI200)
+ int i;
+ const int replay_buffer_check_interval = 10; /* must be below 1000 */
+ const int replay_buffer_max_number_of_checks = 100;
+#endif
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ /* add an extra reference while handling the page fault */
+ mmu->usage_count++;
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_DEBUG_PRINT(4, ("Sending stop bus request to cores\n"));
+ /* request to stop the bus, but don't wait for it to actually stop */
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES, (u32)mmu);
+
+#if defined(USING_MALI200)
+ /* no new request will come from any of the connected cores from now
+ * we must now flush the playback buffer for any requests queued already
+ */
+ MALI_DEBUG_PRINT(4, ("Switching to the special page fault flush page directory\n"));
+ /* don't use the mali_mmu_activate_address_space function here as we can't stall the MMU */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_page_fault_flush_page_directory);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ /* resume the MMU */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+ /* the MMU will now play back all the requests, all going to our special page fault flush data page */
+
+ /* just to be safe, check that the playback buffer is empty before continuing */
+ if (!mali_benchmark) {
+ for (i = 0; i < replay_buffer_max_number_of_checks; i++)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY) break;
+ _mali_osk_time_ubusydelay(replay_buffer_check_interval);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, i == replay_buffer_max_number_of_checks, ("MMU: %s: Failed to flush replay buffer on page fault\n", mmu->description));
+ MALI_DEBUG_PRINT(1, ("Replay playback took %ld usec\n", i * replay_buffer_check_interval));
+ }
+#endif
+ /* notify all subsystems that the core should be reset once the bus is actually stopped */
+ MALI_DEBUG_PRINT(4,("Sending job abort command to subsystems\n"));
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS, (u32)mmu);
+
+ /* reprogram the MMU */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory); /* no session is active, so just activate the empty page directory */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+ /* release the extra address space reference, will schedule */
+ mali_memory_core_mmu_release_address_space_reference(mmu);
+
+ /* resume normal operation */
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP3_CONTINUE_JOB_HANDLING, (u32)mmu);
+ MALI_DEBUG_PRINT(4, ("Page fault handling complete\n"));
+}
+
+void mali_kernel_mmu_reset(void * input_mmu)
+{
+ mali_kernel_memory_mmu * mmu;
+ MALI_DEBUG_ASSERT_POINTER(input_mmu);
+ mmu = (mali_kernel_memory_mmu *)input_mmu;
+
+ MALI_DEBUG_PRINT(4, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->description));
+
+ if ( 0 != mmu->in_page_fault_handler)
+ {
+ /* This is possible if the bus can never be stopped for some reason */
+ MALI_PRINT_ERROR(("Stopping the Memory bus not possible. Mali reset could not be performed."));
+ return;
+ }
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory); /* no session is active, so just activate the empty page directory */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+}
+
+void mali_kernel_mmu_force_bus_reset(void * input_mmu)
+{
+ mali_kernel_memory_mmu * mmu;
+ MALI_DEBUG_ASSERT_POINTER(input_mmu);
+ mmu = (mali_kernel_memory_mmu *)input_mmu;
+ if ( 0 != mmu->in_page_fault_handler)
+ {
+ /* This is possible if the bus can never be stopped for some reason */
+ MALI_PRINT_ERROR(("Stopping the Memory bus not possible. Mali reset could not be performed."));
+ return;
+ }
+ MALI_DEBUG_PRINT(1, ("Mali MMU: Force_bus_reset.\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, 0);
+ mali_kernel_mmu_bus_reset(mmu);
+}
+
+
+static void mali_kernel_memory_mmu_interrupt_handler_bottom_half(void * data)
+{
+ mali_kernel_memory_mmu * mmu;
+ u32 raw, fault_address, status;
+
+ if (NULL == data)
+ {
+ MALI_PRINT_ERROR(("MMU IRQ work queue: NULL argument"));
+ return; /* Error */
+ }
+ mmu = (mali_kernel_memory_mmu*)data;
+
+
+ MALI_DEBUG_PRINT(4, ("Locking subsystems\n"));
+ /* lock all subsystems */
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP0_LOCK_SUBSYSTEM, (u32)mmu);
+
+ raw = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_RAWSTAT);
+ status = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS);
+
+ if ( (0==(raw & MALI_MMU_INTERRUPT_PAGE_FAULT)) && (0==(status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)) )
+ {
+ MALI_DEBUG_PRINT(1, ("MMU: Page fault bottom half: No Irq found.\n"));
+ MALI_DEBUG_PRINT(4, ("Unlocking subsystems"));
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP4_UNLOCK_SUBSYSTEM, (u32)mmu);
+ return;
+ }
+
+ mmu->in_page_fault_handler = 1;
+
+ fault_address = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+ MALI_PRINT(("Page fault detected at 0x%x from bus id %d of type %s on %s\n",
+ (void*)fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ mmu->description)
+ );
+
+ if (NULL == mmu->active_session)
+ {
+ MALI_PRINT(("Spurious memory access detected from MMU %s\n", mmu->description));
+ }
+ else
+ {
+ MALI_PRINT(("Active page directory at 0x%08X\n", mmu->active_session->page_directory));
+ MALI_PRINT(("Info from page table for VA 0x%x:\n", (void*)fault_address));
+ MALI_PRINT(("DTE entry: PTE at 0x%x marked as %s\n",
+ (void*)(_mali_osk_mem_ioread32(mmu->active_session->page_directory_mapped,
+ MALI_MMU_PDE_ENTRY(fault_address) * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK),
+ _mali_osk_mem_ioread32(mmu->active_session->page_directory_mapped,
+ MALI_MMU_PDE_ENTRY(fault_address) * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT ? "present" : "not present"
+ ));
+
+ if (_mali_osk_mem_ioread32(mmu->active_session->page_directory_mapped, MALI_MMU_PDE_ENTRY(fault_address) * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)
+ {
+ mali_io_address pte;
+ u32 data;
+ pte = mmu->active_session->page_entries_mapped[MALI_MMU_PDE_ENTRY(fault_address)];
+ data = _mali_osk_mem_ioread32(pte, MALI_MMU_PTE_ENTRY(fault_address) * sizeof(u32));
+ MALI_PRINT(("PTE entry: Page at 0x%x, %s %s %s\n",
+ (void*)(data & ~MALI_MMU_FLAGS_MASK),
+ data & MALI_MMU_FLAGS_PRESENT ? "present" : "not present",
+ data & MALI_MMU_FLAGS_READ_PERMISSION ? "readable" : "",
+ data & MALI_MMU_FLAGS_WRITE_PERMISSION ? "writable" : ""
+ ));
+ }
+ else
+ {
+ MALI_PRINT(("PTE entry: Not present\n"));
+ }
+ }
+
+
+ mali_kernel_mmu_bus_reset(mmu);
+
+ mmu->in_page_fault_handler = 0;
+
+ /* unlock all subsystems */
+ MALI_DEBUG_PRINT(4, ("Unlocking subsystems"));
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP4_UNLOCK_SUBSYSTEM, (u32)mmu);
+
+}
+
+
+static u32 mali_mmu_register_read(mali_kernel_memory_mmu * unit, mali_mmu_register reg)
+{
+ u32 val;
+
+ if (mali_benchmark) return 0;
+
+ val = _mali_osk_mem_ioread32(unit->mapped_registers, (u32)reg * sizeof(u32));
+
+ MALI_DEBUG_PRINT(6, ("mali_mmu_register_read addr:0x%04X val:0x%08x\n", (u32)reg * sizeof(u32),val));
+
+ return val;
+}
+
+static void mali_mmu_register_write(mali_kernel_memory_mmu * unit, mali_mmu_register reg, u32 val)
+{
+ if (mali_benchmark) return;
+
+ MALI_DEBUG_PRINT(6, ("mali_mmu_register_write addr:0x%04X val:0x%08x\n", (u32)reg * sizeof(u32), val));
+
+ _mali_osk_mem_iowrite32(unit->mapped_registers, (u32)reg * sizeof(u32), val);
+}
+
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ ump_dd_handle ump_mem;
+ u32 nr_blocks;
+ u32 i;
+ ump_dd_physical_block * ump_blocks;
+ ump_mem_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof( ump_mem_allocation ) );
+ if ( NULL==ret_allocation ) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ump_mem = (ump_dd_handle)ctx;
+
+ MALI_DEBUG_PRINT(4, ("In ump_memory_commit\n"));
+
+ nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+
+ MALI_DEBUG_PRINT(4, ("Have %d blocks\n", nr_blocks));
+
+ if (nr_blocks == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("No block count\n"));
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks)*nr_blocks );
+ if ( NULL==ump_blocks )
+ {
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks))
+ {
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ /* Store away the initial offset for unmapping purposes */
+ ret_allocation->initial_offset = *offset;
+
+ for(i=0; i<nr_blocks; ++i)
+ {
+ MALI_DEBUG_PRINT(4, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[i].addr , 0, ump_blocks[i].size ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += ump_blocks[i].size;
+ }
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[0].addr , 0, _MALI_OSK_MALI_PAGE_SIZE ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ _mali_osk_free( ump_blocks );
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->ump_mem = ump_mem;
+ ret_allocation->size_allocated = *offset - ret_allocation->initial_offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = ump_memory_release;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void ump_memory_release(void * ctx, void * handle)
+{
+ ump_dd_handle ump_mem;
+ ump_mem_allocation *allocation;
+
+ allocation = (ump_mem_allocation *)handle;
+
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ ump_mem = allocation->ump_mem;
+
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID!=ump_mem);
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size_allocated,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+ _mali_osk_free( allocation );
+
+
+ ump_dd_reference_release(ump_mem) ;
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args )
+{
+ ump_dd_handle ump_mem;
+ mali_physical_memory_allocator external_memory_allocator;
+ memory_session * session_data;
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+ args->secure_id, args->mali_address, args->size));
+
+ ump_mem = ump_dd_handle_create_from_secure_id( (int)args->secure_id ) ;
+
+ if ( UMP_DD_HANDLE_INVALID==ump_mem ) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor)
+ {
+ ump_dd_reference_release(ump_mem);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ external_memory_allocator.allocate = ump_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = ump_mem;
+ external_memory_allocator.name = "UMP Memory";
+ external_memory_allocator.next = NULL;
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from UMP attach\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+
+ MALI_SUCCESS;
+
+}
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0 */
+
+
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ u32 * data;
+ external_mem_allocation * ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof(external_mem_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ data = (u32*)ctx;
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->initial_offset = *offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = external_memory_release;
+
+ MALI_DEBUG_PRINT(3, ("External map: mapping phys 0x%08X at mali virtual address 0x%08X staring at offset 0x%08X length 0x%08X\n", data[0], descriptor->mali_address, *offset, data[1]));
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, data[1]))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += data[1];
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, _MALI_OSK_MALI_PAGE_SIZE))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap what we previously mapped */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ ret_allocation->size = *offset - ret_allocation->initial_offset;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void external_memory_release(void * ctx, void * handle)
+{
+ external_mem_allocation * allocation;
+
+ allocation = (external_mem_allocation *) handle;
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+
+ _mali_osk_free( allocation );
+
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args )
+{
+ mali_physical_memory_allocator external_memory_allocator;
+ memory_session * session_data;
+ u32 info[2];
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ external_memory_allocator.allocate = external_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = &info[0];
+ external_memory_allocator.name = "External Memory";
+ external_memory_allocator.next = NULL;
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+ (void*)args->phys_addr,
+ (void*)(args->phys_addr + args->size -1),
+ (void*)args->mali_address)
+ );
+
+ /* Validate the mali physical range */
+ MALI_CHECK_NO_ERROR( mali_kernel_core_validate_mali_phys_range( args->phys_addr, args->size ) );
+
+ info[0] = args->phys_addr;
+ info[1] = args->size;
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ descriptor->lock = NULL;
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from range_map_external_memory\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to unmap external memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ args->memory_size = 2 * 1024 * 1024 * 1024UL; /* 2GB address space */
+ args->mali_address_base = 1 * 1024 * 1024 * 1024UL; /* staring at 1GB, causing this layout: (0-1GB unused)(1GB-3G usage by Mali)(3G-4G unused) */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_page_table_cache_create(void)
+{
+ page_table_cache.lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 110);
+ MALI_CHECK_NON_NULL( page_table_cache.lock, _MALI_OSK_ERR_FAULT );
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.partial);
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.full);
+ MALI_SUCCESS;
+}
+
+void mali_mmu_page_table_cache_destroy(void)
+{
+ mali_mmu_page_table_allocation * alloc, *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT_IF(1, 0 != alloc->usage_count, ("Destroying page table cache while pages are tagged as in use. %d allocations still marked as in use.\n", alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, 0 == _mali_osk_list_empty(&page_table_cache.full), ("Page table cache full list contains one or more elements \n"));
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT(1, ("Destroy alloc 0x%08X with usage count %d\n", (u32)alloc, alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+}
+
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+{
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == _mali_osk_list_empty(&page_table_cache.partial))
+ {
+ mali_mmu_page_table_allocation * alloc = _MALI_OSK_LIST_ENTRY(page_table_cache.partial.next, mali_mmu_page_table_allocation, list);
+ int page_number = _mali_osk_find_first_zero_bit(alloc->usage_map, alloc->num_pages);
+ MALI_DEBUG_PRINT(6, ("Partial page table allocation found, using page offset %d\n", page_number));
+ _mali_osk_set_nonatomic_bit(page_number, alloc->usage_map);
+ alloc->usage_count++;
+ if (alloc->num_pages == alloc->usage_count)
+ {
+ /* full, move alloc to full list*/
+ _mali_osk_list_move(&alloc->list, &page_table_cache.full);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ *table_page = (MALI_MMU_PAGE_SIZE * page_number) + alloc->pages.phys_base;
+ *mapping = (mali_io_address)((MALI_MMU_PAGE_SIZE * page_number) + (u32)alloc->pages.mapping);
+ MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+ else
+ {
+ mali_mmu_page_table_allocation * alloc;
+ /* no free pages, allocate a new one */
+
+ alloc = (mali_mmu_page_table_allocation *)_mali_osk_calloc(1, sizeof(mali_mmu_page_table_allocation));
+ if (NULL == alloc)
+ {
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&alloc->list);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_page_tables(memory_engine, &alloc->pages, physical_memory_allocators))
+ {
+ MALI_DEBUG_PRINT(1, ("No more memory for page tables\n"));
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* create the usage map */
+ alloc->num_pages = alloc->pages.size / MALI_MMU_PAGE_SIZE;
+ alloc->usage_count = 1;
+ MALI_DEBUG_PRINT(3, ("New page table cache expansion, %d pages in new cache allocation\n", alloc->num_pages));
+ alloc->usage_map = _mali_osk_calloc(1, ((alloc->num_pages + BITS_PER_LONG - 1) & ~(BITS_PER_LONG-1) / BITS_PER_LONG) * sizeof(unsigned long));
+ if (NULL == alloc->usage_map)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate memory to describe MMU page table cache usage\n"));
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* clear memory allocation */
+ fill_page(alloc->pages.mapping, 0);
+
+ _mali_osk_set_nonatomic_bit(0, alloc->usage_map);
+
+ _mali_osk_list_add(&alloc->list, &page_table_cache.partial);
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = alloc->pages.phys_base; /* return the first page */
+ *mapping = alloc->pages.mapping; /* Mapping for first page */
+ MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+}
+
+void mali_mmu_release_table_page(u32 pa)
+{
+ mali_mmu_page_table_allocation * alloc, * temp_alloc;
+
+ MALI_DEBUG_PRINT_IF(1, pa & 4095, ("Bad page address 0x%x given to mali_mmu_release_table_page\n", (void*)pa));
+
+ MALI_DEBUG_PRINT(4, ("Releasing table page 0x%08X to the cache\n", pa));
+
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* find the entry this address belongs to */
+ /* first check the partial list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ MALI_DEBUG_ASSERT(0 != _mali_osk_test_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map));
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+ if (0 == alloc->usage_count)
+ {
+ /* empty, release whole page alloc */
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(partial list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ /* the check the full list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+ /* transfer to partial list */
+ _mali_osk_list_move(&alloc->list, &page_table_cache.partial);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(full list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ MALI_DEBUG_PRINT(1, ("pa 0x%x not found in the page table cache\n", (void*)pa));
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+void* mali_memory_core_mmu_lookup(u32 id)
+{
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+
+ /* find an MMU with a matching id */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &mmu_head, mali_kernel_memory_mmu, list)
+ {
+ if (id == mmu->id) return mmu;
+ }
+
+ /* not found */
+ return NULL;
+}
+
+void mali_mmu_activate_address_space(mali_kernel_memory_mmu * mmu, u32 page_directory)
+{
+ const int delay_in_usecs = 10;
+ const int max_loop_count = 10;
+ int i;
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ if (!mali_benchmark) {
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_STALL_ACTIVE) break;
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ MALI_DEBUG_PRINT_IF(1, (max_loop_count == i), ("Stall request failed, swapping anyway\n"));
+ }
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+}
+
+_mali_osk_errcode_t mali_memory_core_mmu_activate_page_table(void* mmu_ptr, struct mali_session_data * mali_session_data, void(*callback)(void*), void * callback_argument)
+{
+ memory_session * requested_memory_session;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ mali_kernel_memory_mmu * mmu;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+ MALI_DEBUG_ASSERT_POINTER(mali_session_data);
+
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+
+ MALI_DEBUG_PRINT(4, ("Asked to activate page table for session 0x%x on MMU %s\n", mali_session_data, mmu->description));
+ requested_memory_session = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+ MALI_DEBUG_PRINT(5, ("Session 0x%x looked up as using memory session 0x%x\n", mali_session_data, requested_memory_session));
+
+ MALI_DEBUG_ASSERT_POINTER(requested_memory_session);
+
+ MALI_DEBUG_PRINT(7, ("Taking locks\n"));
+
+ _mali_osk_lock_wait(requested_memory_session->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ if (0 == mmu->usage_count)
+ {
+ /* no session currently active, activate the requested session */
+ MALI_DEBUG_ASSERT(NULL == mmu->active_session);
+ mmu->active_session = requested_memory_session;
+ mmu->usage_count = 1;
+ MALI_DEBUG_PRINT(4, ("MMU idle, activating page directory 0x%08X on MMU %s\n", requested_memory_session->page_directory, mmu->description));
+ mali_mmu_activate_address_space(mmu, requested_memory_session->page_directory);
+ {
+ /* Insert mmu into the right place in the active_mmus list so that
+ * it is still sorted. The list must be sorted by ID so we can get
+ * the mutexes in the right order in
+ * _mali_ukk_mem_munmap_internal().
+ */
+ _mali_osk_list_t *entry;
+ for (entry = requested_memory_session->active_mmus.next;
+ entry != &requested_memory_session->active_mmus;
+ entry = entry->next)
+ {
+ mali_kernel_memory_mmu *temp = _MALI_OSK_LIST_ENTRY(entry, mali_kernel_memory_mmu, session_link);
+ if (mmu->id < temp->id)
+ break;
+ }
+ /* If we broke out, then 'entry' points to the list node of the
+ * first mmu with a greater ID; otherwise, it points to
+ * active_mmus. We want to add *before* this node.
+ */
+ _mali_osk_list_addtail(&mmu->session_link, entry);
+ }
+ err = _MALI_OSK_ERR_OK;
+ }
+
+ /* Allow two cores to run in parallel if they come from the same session */
+ else if (
+ (mmu->in_page_fault_handler == 0) &&
+ (requested_memory_session == mmu->active_session ) &&
+ (0==(MALI_MMU_DISALLOW_PARALLELL_WORK_OF_MALI_CORES & mmu->flags))
+ )
+ {
+ /* nested activation detected, just update the reference count */
+ MALI_DEBUG_PRINT(4, ("Nested activation detected, %d previous activations found\n", mmu->usage_count));
+ mmu->usage_count++;
+ err = _MALI_OSK_ERR_OK;
+ }
+
+ else if (NULL != callback)
+ {
+ /* can't activate right now, notify caller on idle via callback */
+ mali_kernel_memory_mmu_idle_callback * callback_object, * temp_callback_object;
+ int found = 0;
+
+ MALI_DEBUG_PRINT(3, ("The MMU is busy and is using a different address space, callback given\n"));
+ /* check for existing registration */
+ _MALI_OSK_LIST_FOREACHENTRY(callback_object, temp_callback_object, &mmu->callbacks, mali_kernel_memory_mmu_idle_callback, link)
+ {
+ if (callback_object->callback == callback)
+ {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ {
+ MALI_DEBUG_PRINT(5, ("Duplicate callback registration found, ignoring\n"));
+ /* callback already registered */
+ err = _MALI_OSK_ERR_BUSY;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(5,("New callback, registering\n"));
+ /* register the new callback */
+ callback_object = _mali_osk_malloc(sizeof(mali_kernel_memory_mmu_idle_callback));
+ if (NULL != callback_object)
+ {
+ MALI_DEBUG_PRINT(7,("Callback struct setup\n"));
+ callback_object->callback = callback;
+ callback_object->callback_argument = callback_argument;
+ _mali_osk_list_addtail(&callback_object->link, &mmu->callbacks);
+ err = _MALI_OSK_ERR_BUSY;
+ }
+ }
+ }
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_lock_signal(requested_memory_session->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_ERROR(err);
+}
+
+void mali_memory_core_mmu_release_address_space_reference(void* mmu_ptr)
+{
+ mali_kernel_memory_mmu_idle_callback * callback_object, * temp;
+ mali_kernel_memory_mmu * mmu;
+ memory_session * session;
+
+ _MALI_OSK_LIST_HEAD(callbacks);
+
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+
+ session = mmu->active_session;
+
+ /* support that we handle spurious page faults */
+ if (NULL != session)
+ {
+ _mali_osk_lock_wait(session->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("Deactivation of address space on MMU %s, %d references exists\n", mmu->description, mmu->usage_count));
+ MALI_DEBUG_ASSERT(0 != mmu->usage_count);
+ mmu->usage_count--;
+ if (0 != mmu->usage_count)
+ {
+ MALI_DEBUG_PRINT(4, ("MMU still in use by this address space, %d references still exists\n", mmu->usage_count));
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ /* support that we handle spurious page faults */
+ if (NULL != session)
+ {
+ _mali_osk_lock_signal(session->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+ return;
+ }
+
+ MALI_DEBUG_PRINT(4, ("Activating the empty page directory on %s\n", mmu->description));
+
+ /* last reference gone, deactivate current address space */
+ mali_mmu_activate_address_space(mmu, mali_empty_page_directory);
+
+ /* unlink from session */
+ _mali_osk_list_delinit(&mmu->session_link);
+ /* remove the active session pointer */
+ mmu->active_session = NULL;
+
+ /* Notify all registered callbacks.
+ * We have to be clever here:
+ * We must call the callbacks with the spinlock unlocked and
+ * the callback list emptied to allow them to re-register.
+ * So we make a copy of the list, clears the list and then later call the callbacks on the local copy
+ */
+ /* copy list */
+ _MALI_OSK_INIT_LIST_HEAD(&callbacks);
+ _mali_osk_list_splice(&mmu->callbacks, &callbacks);
+ /* clear the original, allowing new registrations during the callback */
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->callbacks);
+
+ /* end of mmu manipulation, so safe to unlock */
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* then finally remove the (possible) session lock, supporting that no session was active (spurious page fault handling) */
+ if (NULL != session)
+ {
+ _mali_osk_lock_signal(session->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(callback_object, temp, &callbacks, mali_kernel_memory_mmu_idle_callback, link)
+ {
+ MALI_DEBUG_ASSERT_POINTER(callback_object->callback);
+ (callback_object->callback)(callback_object->callback_argument);
+ _mali_osk_list_del(&callback_object->link);
+ _mali_osk_free(callback_object);
+ }
+}
+
+void mali_memory_core_mmu_unregister_callback(void* mmu_ptr, void(*callback)(void*))
+{
+ mali_kernel_memory_mmu_idle_callback * callback_object, * temp_callback_object;
+ mali_kernel_memory_mmu * mmu;
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+
+ MALI_DEBUG_ASSERT_POINTER(callback);
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ _MALI_OSK_LIST_FOREACHENTRY(callback_object, temp_callback_object, &mmu->callbacks, mali_kernel_memory_mmu_idle_callback, link)
+ {
+ MALI_DEBUG_ASSERT_POINTER(callback_object->callback);
+ if (callback_object->callback == callback)
+ {
+ _mali_osk_list_del(&callback_object->link);
+ _mali_osk_free(callback_object);
+ break;
+ }
+ }
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor)
+{
+ /* allocate page tables, if needed */
+ int i;
+ const int first_pde_idx = MALI_MMU_PDE_ENTRY(descriptor->mali_address);
+ int last_pde_idx;
+ memory_session * session_data;
+#if defined USING_MALI400_L2_CACHE
+ int has_active_mmus = 0;
+ int page_dir_updated = 0;
+#endif
+
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ last_pde_idx = MALI_MMU_PDE_ENTRY(descriptor->mali_address + _MALI_OSK_MALI_PAGE_SIZE + descriptor->size - 1);
+ }
+ else
+ {
+ last_pde_idx = MALI_MMU_PDE_ENTRY(descriptor->mali_address + descriptor->size - 1);
+ }
+
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ MALI_DEBUG_PRINT(4, ("allocating page tables for Mali virtual address space 0x%08X to 0x%08X\n", descriptor->mali_address, descriptor->mali_address + descriptor->size - 1));
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == _mali_osk_list_empty(&session_data->active_mmus))
+ {
+ /*
+ * We have active MMUs, so we are probably in the process of alocating more memory for a suspended GP job (PLBU heap)
+ * From Mali-400 MP r1p0, MMU page directory/tables are also cached by the Mali L2 cache, thus we need to invalidate the page directory
+ * from the L2 cache if we add new page directory entries (PDEs) to the page directory.
+ * We only need to do this when we have an active MMU, because we otherwise invalidate the entire Mali L2 cache before at job start
+ */
+ has_active_mmus = 1;
+ }
+#endif
+
+ for (i = first_pde_idx; i <= last_pde_idx; i++)
+ {
+ if ( 0 == (_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT) )
+ {
+ u32 pte_phys;
+ mali_io_address pte_mapped;
+ _mali_osk_errcode_t err;
+
+ /* allocate a new page table */
+ MALI_DEBUG_ASSERT(0 == session_data->page_entries_usage_count[i]);
+ MALI_DEBUG_ASSERT(NULL == session_data->page_entries_mapped[i]);
+
+ err = mali_mmu_get_table_page(&pte_phys, &pte_mapped);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ session_data->page_entries_mapped[i] = pte_mapped;
+ MALI_DEBUG_ASSERT_POINTER( session_data->page_entries_mapped[i] );
+
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), pte_phys | MALI_MMU_FLAGS_PRESENT); /* mark page table as present */
+
+ /* update usage count */
+ session_data->page_entries_usage_count[i]++;
+#if defined USING_MALI400_L2_CACHE
+ page_dir_updated = 1;
+#endif
+ continue; /* continue loop */
+ }
+
+ MALI_DEBUG_PRINT(1, ("Page table alloc failed\n"));
+ break; /* abort loop, failed to allocate one or more page tables */
+ }
+ else
+ {
+ session_data->page_entries_usage_count[i]++;
+ }
+ }
+
+ if (i <= last_pde_idx)
+ {
+ /* one or more pages could not be allocated, release reference count for the ones we added one for */
+ /* adjust for the one which caused the for loop to be aborted */
+ i--;
+
+ while (i >= first_pde_idx)
+ {
+ MALI_DEBUG_ASSERT(0 != session_data->page_entries_usage_count[i]);
+ session_data->page_entries_usage_count[i]--;
+ if (0 == session_data->page_entries_usage_count[i])
+ {
+ /* last reference removed */
+ mali_mmu_release_table_page(MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32))));
+ session_data->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), 0); /* mark as not present in the page directory */
+ }
+ i--;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+#if defined USING_MALI400_L2_CACHE
+ if (1 == has_active_mmus && 1 == page_dir_updated)
+ {
+ /*
+ * We have updated the page directory and have an active MMU using it, so invalidate it in the Mali L2 cache.
+ */
+ mali_kernel_l2_cache_invalidate_page(session_data->page_directory);
+ }
+#endif
+
+ /* all OK */
+ MALI_SUCCESS;
+}
+
+static void mali_address_manager_release(mali_memory_allocation * descriptor)
+{
+ int first_pde_idx;
+ int last_pde_idx;
+ memory_session * session_data;
+ u32 mali_address;
+ u32 mali_address_end;
+ u32 left;
+ int i;
+#if defined USING_MALI400_L2_CACHE
+ int has_active_mmus = 0;
+ int page_dir_updated = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_directory_mapped);
+
+ mali_address = descriptor->mali_address;
+ mali_address_end = descriptor->mali_address + descriptor->size;
+ left = descriptor->size;
+
+ first_pde_idx = MALI_MMU_PDE_ENTRY(mali_address);
+ last_pde_idx = MALI_MMU_PDE_ENTRY(mali_address_end - 1);
+
+ MALI_DEBUG_PRINT(3, ("Zapping Mali MMU table for address 0x%08X size 0x%08X\n", mali_address, left));
+ MALI_DEBUG_PRINT(4, ("Zapping PDE %d through %d\n", first_pde_idx, last_pde_idx));
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == _mali_osk_list_empty(&session_data->active_mmus))
+ {
+ /*
+ * From Mali-400 MP r1p0, MMU page directory/tables are also cached by the Mali L2 cache, thus we need to invalidate the page tables
+ * from the L2 cache to ensure that the memory is unmapped.
+ * We only need to do this when we have an active MMU, because we otherwise invalidate the entire Mali L2 cache before at job start
+ */
+ has_active_mmus = 1;
+ }
+#endif
+
+
+ for (i = first_pde_idx; i <= last_pde_idx; i++)
+ {
+ const int size_inside_pte = left < 0x400000 ? left : 0x400000;
+
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_entries_mapped[i]);
+ MALI_DEBUG_ASSERT(0 != session_data->page_entries_usage_count[i]);
+ MALI_DEBUG_PRINT(4, ("PDE %d\n", i));
+
+ session_data->page_entries_usage_count[i]--;
+
+ if (0 == session_data->page_entries_usage_count[i])
+ {
+ MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+ /* last reference removed, no need to zero out each PTE */
+ mali_mmu_release_table_page(MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32))));
+ session_data->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), 0); /* mark as not present in the page directory */
+#if defined USING_MALI400_L2_CACHE
+ page_dir_updated = 1;
+#endif
+ }
+ else
+ {
+ int j;
+ const int first_pte_idx = MALI_MMU_PTE_ENTRY(mali_address);
+ const int last_pte_idx = MALI_MMU_PTE_ENTRY(mali_address + size_inside_pte - 1);
+
+ MALI_DEBUG_PRINT(4, ("Partial page table fill detected, zapping entries %d through %d (page table at 0x%08X)\n", first_pte_idx, last_pte_idx, MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32)))));
+
+ for (j = first_pte_idx; j <= last_pte_idx; j++)
+ {
+ _mali_osk_mem_iowrite32(session_data->page_entries_mapped[i], j * sizeof(u32), 0);
+ }
+
+ MALI_DEBUG_PRINT(5, ("zap complete\n"));
+
+ mali_address += size_inside_pte;
+
+#if defined USING_MALI400_L2_CACHE
+ if (1 == has_active_mmus)
+ {
+ /* Invalidate the page we've just modified */
+ mali_kernel_l2_cache_invalidate_page( _mali_osk_mem_ioread32(session_data->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ }
+#endif
+ }
+ left -= size_inside_pte;
+ }
+
+#if defined USING_MALI400_L2_CACHE
+ if ((1 == page_dir_updated) && (1== has_active_mmus))
+ {
+ /* The page directory was also updated */
+ mali_kernel_l2_cache_invalidate_page(session_data->page_directory);
+ }
+#endif
+}
+
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size)
+{
+ memory_session * session_data;
+ u32 mali_address;
+ u32 mali_address_end;
+ u32 current_phys_addr;
+#if defined USING_MALI400_L2_CACHE
+ int has_active_mmus = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_ASSERT_POINTER( phys_addr );
+
+ current_phys_addr = *phys_addr;
+
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ mali_address = descriptor->mali_address + offset;
+ mali_address_end = descriptor->mali_address + offset + size;
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == _mali_osk_list_empty(&session_data->active_mmus))
+ {
+ /*
+ * We have active MMUs, so we are probably in the process of alocating more memory for a suspended GP job (PLBU heap)
+ * From Mali-400 MP r1p0, MMU page directory/tables are also cached by the Mali L2 cache, thus we need to invalidate the page tables
+ * from the L2 cache when we have allocated more heap memory.
+ * We only need to do this when we have an active MMU, because we otherwise invalidate the entire Mali L2 cache before at job start
+ */
+ has_active_mmus = 1;
+ }
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali map: mapping 0x%08X to Mali address 0x%08X length 0x%08X\n", current_phys_addr, mali_address, size));
+
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_entries_mapped);
+
+ for ( ; mali_address < mali_address_end; mali_address += MALI_MMU_PAGE_SIZE, current_phys_addr += MALI_MMU_PAGE_SIZE)
+ {
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+ _mali_osk_mem_iowrite32(session_data->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)], MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32), current_phys_addr | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+ }
+
+#if defined USING_MALI400_L2_CACHE
+ if (1 == has_active_mmus)
+ {
+ int i;
+ const int first_pde_idx = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde_idx = MALI_MMU_PDE_ENTRY(mali_address_end - 1);
+
+ /*
+ * Invalidate the updated page table(s), incase they have been used for something
+ * else since last job start (invalidation of entire Mali L2 cache)
+ */
+ for (i = first_pde_idx; i <= last_pde_idx; i++)
+ {
+ mali_kernel_l2_cache_invalidate_page( _mali_osk_mem_ioread32(session_data->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ }
+ }
+#endif
+
+ MALI_SUCCESS;
+}
+
+/* This handler registered to mali_mmap for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args )
+{
+ struct mali_session_data * mali_session_data;
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ /* Unpack arguments */
+ mali_session_data = (struct mali_session_data *)args->ctx;
+
+ if (NULL == mali_session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: mali_session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ MALI_DEBUG_ASSERT( mali_subsystem_memory_id >= 0 );
+
+ session_data = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+ /* validate input */
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); }
+
+ descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) );
+ if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); }
+
+ descriptor->size = args->size;
+ descriptor->mali_address = args->phys_addr;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+
+ descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE;
+ descriptor->lock = session_data->lock;
+ _mali_osk_list_init( &descriptor->list );
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == mali_allocation_engine_allocate_memory(memory_engine, descriptor, physical_memory_allocators, &session_data->memory_head))
+ {
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ /* no need to lock the MMU as we own it already */
+ MALI_DEBUG_PRINT(5, ("Zapping the cache of mmu %s as it's using the page table we have updated\n", mmu->description));
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+ if (!mali_benchmark) {
+ while ( (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_STALL_ACTIVE) == 0) _mali_osk_time_ubusydelay(1);
+ }
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* All ok, write out any information generated from this call */
+ args->mapping = descriptor->mapping;
+ args->cookie = (u32)descriptor;
+
+ MALI_DEBUG_PRINT(7, ("MMAP OK\n"));
+ /* All done */
+ MALI_SUCCESS;
+ }
+ else
+ {
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ /* OOM, but not a fatal error */
+ MALI_DEBUG_PRINT(4, ("Memory allocation failure, OOM\n"));
+ _mali_osk_free(descriptor);
+ /* Linux will free the CPU address allocation, userspace client the Mali address allocation */
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+}
+
+static void _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args )
+{
+ memory_session * session_data;
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+ mali_memory_allocation * descriptor;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ /* Stall the MMU(s) which is using the address space we're operating on.
+ * Note that active_mmus must be sorted in order of ID to avoid a mutex
+ * ordering violation.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ const int max_loop_count = 100;
+ const int sleep_duration = 1; /* must be below 1000 */
+ int i;
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ if (!mali_benchmark)
+ {
+ for ( i = 0; i < max_loop_count; i++)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_STALL_ACTIVE) break;
+ _mali_osk_time_ubusydelay(sleep_duration);
+ }
+
+ MALI_DEBUG_PRINT_IF(3, max_loop_count == i, ("Stall failed, trying zap anyway\n"));
+ }
+ }
+
+ /* This function also removes the memory from the session's memory list */
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+
+ /* any L2 maintenance was done during mali_allocation_engine_release_memory */
+ /* the session is locked, so the active mmu list should be the same */
+ /* zap the TLB and resume operation */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
+
+/* Handler for unmapping memory for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args )
+{
+ mali_memory_allocation * descriptor;
+ _mali_osk_lock_t *descriptor_lock;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ MALI_DEBUG_ASSERT_POINTER((memory_session*)descriptor->mali_addr_mapping_info);
+
+ descriptor_lock = descriptor->lock; /* should point to the session data lock... */
+
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_wait( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+ /* Noninterruptable spinlock type, so must always have locked. Checking should've been done in OSK function. */
+
+ _mali_ukk_mem_munmap_internal( args );
+ /* descriptor is no longer valid - it may've been freed */
+
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_signal( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Is called when the rendercore wants the mmu to give an interrupt */
+static void mali_mmu_probe_irq_trigger(mali_kernel_memory_mmu * mmu)
+{
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_trigger\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_irq_acknowledge(mali_kernel_memory_mmu * mmu)
+{
+ u32 int_stat;
+
+ int_stat = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_STATUS);
+
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ }
+ else MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ }
+ else MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+
+ if ( (int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+ (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR))
+ {
+ MALI_SUCCESS;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+struct dump_info
+{
+ u32 buffer_left;
+ u32 register_writes_size;
+ u32 page_table_dump_size;
+ u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char * comment, struct dump_info * info, int dump_to_serial)
+{
+ if (dump_to_serial) MALI_DEBUG_PRINT(1, ("writereg %08X %08X # %s\n", where, what, comment));
+
+ if (NULL != info)
+ {
+ info->register_writes_size += sizeof(u32)*2; /* two 32-bit words */
+
+ if (NULL != info->buffer)
+ {
+ /* check that we have enough space */
+ if (info->buffer_left < sizeof(u32)*2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = where;
+ info->buffer++;
+
+ *info->buffer = what;
+ info->buffer++;
+
+ info->buffer_left -= sizeof(u32)*2;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_page(mali_io_address page, u32 phys_addr, struct dump_info * info, int dump_to_serial)
+{
+ if (dump_to_serial)
+ {
+ int i;
+ for (i = 0; i < 256; i++)
+ {
+ MALI_DEBUG_PRINT(1, ("%08X: %08X %08X %08X %08X\n", phys_addr + 16*i, _mali_osk_mem_ioread32(page, (i*4 + 0) * sizeof(u32)),
+ _mali_osk_mem_ioread32(page, (i*4 + 1) * sizeof(u32)),
+ _mali_osk_mem_ioread32(page, (i*4 + 2) * sizeof(u32)),
+ _mali_osk_mem_ioread32(page, (i*4 + 3) * sizeof(u32))));
+
+ }
+ }
+
+ if (NULL != info)
+ {
+ /* 4096 for the page and 4 bytes for the address */
+ const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+ const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+ const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+ info->page_table_dump_size += dump_size_in_bytes;
+
+ if (NULL != info->buffer)
+ {
+ if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = phys_addr;
+ info->buffer++;
+
+ _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+ info->buffer += page_size_in_elements;
+
+ info->buffer_left -= dump_size_in_bytes;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(memory_session * session_data, struct dump_info * info)
+{
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ if (NULL != session_data->page_directory_mapped)
+ {
+ int i;
+
+ MALI_CHECK_NO_ERROR(
+ dump_page(session_data->page_directory_mapped, session_data->page_directory, info, 0)
+ );
+
+ for (i = 0; i < 1024; i++)
+ {
+ if (NULL != session_data->page_entries_mapped[i])
+ {
+ MALI_CHECK_NO_ERROR(
+ dump_page(session_data->page_entries_mapped[i], _mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info, 0)
+ );
+ }
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(memory_session * session_data, struct dump_info * info)
+{
+ MALI_CHECK_NO_ERROR(writereg(0x00000000, session_data->page_directory, "set the page directory address", info, 0));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info, 0));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info, 0));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data, &info));
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data, &info));
+ args->size = info.register_writes_size + info.page_table_dump_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+
+ info.buffer_left = args->size;
+ info.buffer = args->buffer;
+
+ args->register_writes = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data, &info));
+
+ args->page_table_dump = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data, &info));
+
+ args->register_writes_size = info.register_writes_size;
+ args->page_table_dump_size = info.page_table_dump_size;
+
+ MALI_SUCCESS;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.h
new file mode 100644
index 00000000000..a199fd66b5a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_MMU_H__
+#define __MALI_KERNEL_MEM_MMU_H__
+
+#include "mali_kernel_session_manager.h"
+
+/**
+ * Lookup a MMU core by ID.
+ * @param id ID of the MMU to find
+ * @return NULL if ID not found or valid, non-NULL if a core was found.
+ */
+void* mali_memory_core_mmu_lookup(u32 id);
+
+/**
+ * Activate a user session with its address space on the given MMU.
+ * If the session can't be activated due to that the MMU is busy and
+ * a callback pointer is given, the callback will be called once the MMU becomes idle.
+ * If the same callback pointer is registered multiple time it will only be called once.
+ * Nested activations are supported.
+ * Each call must be matched by a call to @see mali_memory_core_mmu_release_address_space_reference
+ *
+ * @param mmu The MMU to activate the address space on
+ * @param mali_session_data The user session object which address space to activate
+ * @param callback Pointer to the function to call when the MMU becomes idle
+ * @param callback_arg Argument given to the callback
+ * @return 0 if the address space was activated, -EBUSY if the MMU was busy, -EFAULT in all other cases.
+ */
+int mali_memory_core_mmu_activate_page_table(void* mmu_ptr, struct mali_session_data * mali_session_data, void(*callback)(void*), void * callback_argument);
+
+/**
+ * Release a reference to the current active address space.
+ * Once the last reference is released any callback(s) registered will be called before the function returns
+ *
+ * @note Caution must be shown calling this function with locks held due to that callback can be called
+ * @param mmu The mmu to release a reference to the active address space of
+ */
+void mali_memory_core_mmu_release_address_space_reference(void* mmu);
+
+/**
+ * Soft reset of MMU - needed after power up
+ *
+ * @param mmu_ptr The MMU pointer registered with the relevant core
+ */
+void mali_kernel_mmu_reset(void * mmu_ptr);
+
+void mali_kernel_mmu_force_bus_reset(void * mmu_ptr);
+
+/**
+ * Unregister a previously registered callback.
+ * @param mmu The MMU to unregister the callback on
+ * @param callback The function to unregister
+ */
+void mali_memory_core_mmu_unregister_callback(void* mmu, void(*callback)(void*));
+
+
+
+#endif /* __MALI_KERNEL_MEM_MMU_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.c
new file mode 100644
index 00000000000..845de935ec9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+
+typedef struct os_allocation
+{
+ u32 num_pages;
+ u32 offset_start;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+} os_allocation;
+
+typedef struct os_allocator
+{
+ _mali_osk_lock_t *mutex;
+
+ /**
+ * Maximum number of pages to allocate from the OS
+ */
+ u32 num_pages_max;
+
+ /**
+ * Number of pages allocated from the OS
+ */
+ u32 num_pages_allocated;
+
+ /** CPU Usage adjustment (add to mali physical address to get cpu physical address) */
+ u32 cpu_usage_adjust;
+} os_allocator;
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void os_allocator_release(void * ctx, void * handle);
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block );
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ os_allocator * info;
+
+ max_allocation = (max_allocation + _MALI_OSK_CPU_PAGE_SIZE-1) & ~(_MALI_OSK_CPU_PAGE_SIZE-1);
+
+ MALI_DEBUG_PRINT(2, ("Mali OS memory allocator created with max allocation size of 0x%X bytes, cpu_usage_adjust 0x%08X\n", max_allocation, cpu_usage_adjust));
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(os_allocator));
+ if (NULL != info)
+ {
+ info->num_pages_max = max_allocation / _MALI_OSK_CPU_PAGE_SIZE;
+ info->num_pages_allocated = 0;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED, 0, 106);
+ if (NULL != info->mutex)
+ {
+ allocator->allocate = os_allocator_allocate;
+ allocator->allocate_page_table_block = os_allocator_allocate_page_table_block;
+ allocator->destroy = os_allocator_destroy;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ os_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (os_allocator*)allocator->ctx;
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ u32 left;
+ os_allocator * info;
+ os_allocation * allocation;
+ int pages_allocated = 0;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (os_allocator*)ctx;
+ left = descriptor->size - *offset;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ /** @note this code may not work on Linux, or may require a more complex Linux implementation */
+ allocation = _mali_osk_malloc(sizeof(os_allocation));
+ if (NULL != allocation)
+ {
+ u32 os_mem_max_usage = info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE;
+ allocation->offset_start = *offset;
+ allocation->num_pages = ((left + _MALI_OSK_CPU_PAGE_SIZE - 1) & ~(_MALI_OSK_CPU_PAGE_SIZE - 1)) >> _MALI_OSK_CPU_PAGE_ORDER;
+ MALI_DEBUG_PRINT(6, ("Allocating page array of size %d bytes\n", allocation->num_pages * sizeof(struct page*)));
+
+ while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max) && _mali_osk_mem_check_allocated(os_mem_max_usage))
+ {
+ err = mali_allocation_engine_map_physical(engine, descriptor, *offset, MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, info->cpu_usage_adjust, _MALI_OSK_CPU_PAGE_SIZE);
+ if ( _MALI_OSK_ERR_OK != err)
+ {
+ if ( _MALI_OSK_ERR_NOMEM == err)
+ {
+ /* 'Partial' allocation (or, out-of-memory on first page) */
+ break;
+ }
+
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+
+ /* Fatal error, cleanup any previous pages allocated. */
+ if ( pages_allocated > 0 )
+ {
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*pages_allocated, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+ /* (*offset) doesn't need to be restored; it will not be used by the caller on failure */
+ }
+
+ pages_allocated = 0;
+
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ break;
+ }
+
+ /* Loop iteration */
+ if (left < _MALI_OSK_CPU_PAGE_SIZE) left = 0;
+ else left -= _MALI_OSK_CPU_PAGE_SIZE;
+
+ pages_allocated++;
+
+ *offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+
+ /* Loop termination; decide on result */
+ if (pages_allocated)
+ {
+ MALI_DEBUG_PRINT(6, ("Allocated %d pages\n", pages_allocated));
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+ _mali_osk_cache_ensure_uncached_range_flushed( (void *)descriptor, allocation->offset_start, pages_allocated *_MALI_OSK_CPU_PAGE_SIZE );
+ allocation->num_pages = pages_allocated;
+ allocation->engine = engine; /* Necessary to make the engine's unmap call */
+ allocation->descriptor = descriptor; /* Necessary to make the engine's unmap call */
+ info->num_pages_allocated += pages_allocated;
+
+ MALI_DEBUG_PRINT(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ alloc_info->ctx = info;
+ alloc_info->handle = allocation;
+ alloc_info->release = os_allocator_release;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Releasing pages array due to no pages allocated\n"));
+ _mali_osk_free( allocation );
+ }
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+static void os_allocator_release(void * ctx, void * handle)
+{
+ os_allocator * info;
+ os_allocation * allocation;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (os_allocator*)ctx;
+ allocation = (os_allocation*)handle;
+ engine = allocation->engine;
+ descriptor = allocation->descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER( engine );
+ MALI_DEBUG_ASSERT_POINTER( descriptor );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(6, ("Releasing %d os pages\n", allocation->num_pages));
+
+ MALI_DEBUG_ASSERT( allocation->num_pages <= info->num_pages_allocated);
+ info->num_pages_allocated -= allocation->num_pages;
+
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*allocation->num_pages, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free(allocation);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ const int allocation_order = 6; /* _MALI_OSK_CPU_PAGE_SIZE << 6 */
+ void *virt;
+ const u32 pages_to_allocate = 1 << allocation_order;
+ const u32 size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+ os_allocator * info;
+
+ u32 cpu_phys_base;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ info = (os_allocator*)ctx;
+
+ /* Ensure we don't allocate more than we're supposed to from the ctx */
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ if ( (info->num_pages_allocated + pages_to_allocate > info->num_pages_max) && _mali_osk_mem_check_allocated(info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE) )
+ {
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+
+ virt = _mali_osk_mem_allocioregion( &cpu_phys_base, size );
+
+ if ( NULL == virt )
+ {
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+
+ block->release = os_allocator_page_table_block_release;
+ block->ctx = ctx;
+ block->handle = (void*)allocation_order;
+ block->size = size;
+ block->phys_base = cpu_phys_base - info->cpu_usage_adjust;
+ block->mapping = virt;
+
+ info->num_pages_allocated += pages_to_allocate;
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block )
+{
+ os_allocator * info;
+ u32 allocation_order;
+ u32 pages_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (os_allocator*)page_table_block->ctx;
+
+ MALI_DEBUG_ASSERT_POINTER( info );
+
+ allocation_order = (u32)page_table_block->handle;
+
+ pages_allocated = 1 << allocation_order;
+
+ MALI_DEBUG_ASSERT( pages_allocated * _MALI_OSK_CPU_PAGE_SIZE == page_table_block->size );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_ASSERT( pages_allocated <= info->num_pages_allocated);
+ info->num_pages_allocated -= pages_allocated;
+
+ /* Adjust phys_base from mali physical address to CPU physical address */
+ _mali_osk_mem_freeioregion( page_table_block->phys_base + info->cpu_usage_adjust, page_table_block->size, page_table_block->mapping );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.h
new file mode 100644
index 00000000000..ff49f8a816a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_OS_H__
+#define __MALI_KERNEL_MEM_OS_H__
+
+/**
+ * @brief Creates an object that manages allocating OS memory
+ *
+ * Creates an object that provides an interface to allocate OS memory and
+ * have it mapped into the Mali virtual memory space.
+ *
+ * The object exposes pointers to
+ * - allocate OS memory
+ * - allocate Mali page tables in OS memory
+ * - destroy the object
+ *
+ * Allocations from OS memory are of type mali_physical_memory_allocation
+ * which provides a function to release the allocation.
+ *
+ * @param max_allocation max. number of bytes that can be allocated from OS memory
+ * @param cpu_usage_adjust value to add to mali physical addresses to obtain CPU physical addresses
+ * @param name description of the allocator
+ * @return pointer to mali_physical_memory_allocator object. NULL on failure.
+ **/
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name);
+
+#endif /* __MALI_KERNEL_MEM_OS_H__ */
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.c
new file mode 100644
index 00000000000..3b4ace05f01
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+typedef struct memory_engine
+{
+ mali_kernel_mem_address_manager * mali_address;
+ mali_kernel_mem_address_manager * process_address;
+} memory_engine;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager)
+{
+ memory_engine * engine;
+
+ /* Mali Address Manager need not support unmap_physical */
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->map_physical);
+
+ /* Process Address Manager must support unmap_physical for OS allocation
+ * error path handling */
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->map_physical);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->unmap_physical);
+
+
+ engine = (memory_engine*)_mali_osk_malloc(sizeof(memory_engine));
+ if (NULL == engine) return NULL;
+
+ engine->mali_address = mali_address_manager;
+ engine->process_address = process_address_manager;
+
+ return (mali_allocation_engine)engine;
+}
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine)
+{
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ _mali_osk_free(engine);
+}
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_allocators, _mali_osk_list_t *tracking_list )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_allocators);
+ /* ASSERT that the list member has been initialized, even if it won't be
+ * used for tracking. We need it to be initialized to see if we need to
+ * delete it from a list in the release function. */
+ MALI_DEBUG_ASSERT( NULL != descriptor->list.next && NULL != descriptor->list.prev );
+
+ if (_MALI_OSK_ERR_OK == engine->mali_address->allocate(descriptor))
+ {
+ _mali_osk_errcode_t res = _MALI_OSK_ERR_OK;
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ res = engine->process_address->allocate(descriptor);
+ }
+ if ( _MALI_OSK_ERR_OK == res )
+ {
+ /* address space setup OK, commit physical memory to the allocation */
+ mali_physical_memory_allocator * active_allocator = physical_allocators;
+ struct mali_physical_memory_allocation * active_allocation_tracker = &descriptor->physical_allocation;
+ u32 offset = 0;
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate(active_allocator->ctx, mem_engine, descriptor, &offset, active_allocation_tracker))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ if ( NULL != tracking_list )
+ {
+ /* Insert into the memory session list */
+ /* ASSERT that it is not already part of a list */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty( &descriptor->list ) );
+ _mali_osk_list_add( &descriptor->list, tracking_list );
+ }
+
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* reuse current active_allocation_tracker */
+ MALI_DEBUG_PRINT( 4, ("Memory Engine Allocate: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ if (NULL != active_allocator->next)
+ {
+ /* need a new allocation tracker */
+ active_allocation_tracker->next = _mali_osk_calloc(1, sizeof(mali_physical_memory_allocation));
+ if (NULL != active_allocation_tracker->next)
+ {
+ active_allocation_tracker = active_allocation_tracker->next;
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate: Partial allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ }
+ }
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("Non-fatal OOM, have to cleanup, stopped at offset %d for size %d\n", offset, descriptor->size));
+
+ /* allocation failure, start cleanup */
+ /* loop over any potential partial allocations */
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ /* handle blank trackers which will show up during failure */
+ if (NULL != active_allocation_tracker->release)
+ {
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ }
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ /* release the address spaces */
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+ }
+ engine->mali_address->release(descriptor);
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+void mali_allocation_engine_release_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+ mali_physical_memory_allocation * active_allocation_tracker;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* Determine whether we need to remove this from a tracking list */
+ if ( ! _mali_osk_list_empty( &descriptor->list ) )
+ {
+ _mali_osk_list_del( &descriptor->list );
+ /* Clear the list for debug mode, catch use-after-free */
+ MALI_DEBUG_CODE( descriptor->list.next = descriptor->list.prev = NULL; )
+ }
+
+ engine->mali_address->release(descriptor);
+
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ MALI_DEBUG_ASSERT_POINTER(active_allocation_tracker->release);
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+}
+
+
+_mali_osk_errcode_t mali_allocation_engine_map_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size)
+{
+ _mali_osk_errcode_t err;
+ memory_engine * engine = (memory_engine*)mem_engine;
+ _mali_osk_mem_mapregion_flags_t unmap_flags = (_mali_osk_mem_mapregion_flags_t)0;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X\n", phys, size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address->map_physical);
+
+ /* Handle process address manager first, because we may need them to
+ * allocate the physical page */
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Handle OS-allocated specially, since an adjustment may be required */
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == phys )
+ {
+ MALI_DEBUG_ASSERT( _MALI_OSK_CPU_PAGE_SIZE == size );
+
+ /* Set flags to use on error path */
+ unmap_flags |= _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR;
+
+ err = engine->process_address->map_physical(descriptor, offset, &phys, size);
+ /* Adjust for cpu physical address to mali physical address */
+ phys -= cpu_usage_adjust;
+ }
+ else
+ {
+ u32 cpu_phys;
+ /* Adjust mali physical address to cpu physical address */
+ cpu_phys = phys + cpu_usage_adjust;
+ err = engine->process_address->map_physical(descriptor, offset, &cpu_phys, size);
+ }
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_ERROR( err );
+ }
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X to CPUVA 0x%08X\n", phys, size, offset, (u32)(descriptor->mapping) + offset));
+
+ /* Mali address manager must use the physical address - no point in asking
+ * it to allocate another one for us */
+ MALI_DEBUG_ASSERT( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC != phys );
+
+ err = engine->mali_address->map_physical(descriptor, offset, &phys, size);
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ MALI_DEBUG_PRINT( 2, ("Process address manager succeeded, but Mali Address manager failed for phys=0x%08X size=0x%08X, offset=0x%08X. Will unmap.\n", phys, size, offset));
+ engine->process_address->unmap_physical(descriptor, offset, size, unmap_flags);
+ }
+
+ MALI_ERROR( err );
+ }
+
+ MALI_SUCCESS;
+}
+
+void mali_allocation_engine_unmap_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("UnMapping length 0x%08X at offset 0x%08X\n", size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->process_address);
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Mandetory for process_address manager to have an unmap function*/
+ engine->process_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+
+ /* Optional for mali_address manager to have an unmap function*/
+ if ( NULL != engine->mali_address->unmap_physical )
+ {
+ engine->mali_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+}
+
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_page_tables(mali_allocation_engine engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider)
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate_page_table_block(active_allocator->ctx, descriptor))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* try next */
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate PageTables: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ MALI_DEBUG_PRINT(1, ("Invalid return value from allocate_page_table_block call: MALI_MEM_ALLOC_PARTIAL\n"));
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ MALI_DEBUG_PRINT(1, ("Aborting due to allocation failure\n"));
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+
+void mali_allocation_engine_report_allocators( mali_physical_memory_allocator * physical_provider )
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest numbered first) :\n"));
+ while ( NULL != active_allocator )
+ {
+ if ( NULL != active_allocator->name )
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", active_allocator->alloc_order, active_allocator->name) );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", active_allocator->alloc_order) );
+ }
+ active_allocator = active_allocator->next;
+ }
+
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.h
new file mode 100644
index 00000000000..80a2b4bb3a7
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEMORY_ENGINE_H__
+#define __MALI_KERNEL_MEMORY_ENGINE_H__
+
+typedef void * mali_allocation_engine;
+
+typedef enum { MALI_MEM_ALLOC_FINISHED, MALI_MEM_ALLOC_PARTIAL, MALI_MEM_ALLOC_NONE, MALI_MEM_ALLOC_INTERNAL_FAILURE } mali_physical_memory_allocation_result;
+
+typedef struct mali_physical_memory_allocation
+{
+ void (*release)(void * ctx, void * handle); /**< Function to call on to release the physical memory */
+ void * ctx;
+ void * handle;
+ struct mali_physical_memory_allocation * next;
+} mali_physical_memory_allocation;
+
+struct mali_page_table_block;
+
+typedef struct mali_page_table_block
+{
+ void (*release)(struct mali_page_table_block *page_table_block);
+ void * ctx;
+ void * handle;
+ u32 size; /**< In bytes, should be a multiple of MALI_MMU_PAGE_SIZE to avoid internal fragementation */
+ u32 phys_base; /**< Mali physical address */
+ mali_io_address mapping;
+} mali_page_table_block;
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+typedef enum
+{
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE = 0x1,
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE = 0x2,
+} mali_memory_allocation_flag;
+
+/**
+ * Supplying this 'magic' physical address requests that the OS allocate the
+ * physical address at page commit time, rather than commiting a specific page
+ */
+#define MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC ((u32)(-1))
+
+typedef struct mali_memory_allocation
+{
+ /* Information about the allocation */
+ void * mapping; /**< CPU virtual address where the memory is mapped at */
+ u32 mali_address; /**< The Mali seen address of the memory allocation */
+ u32 size; /**< Size of the allocation */
+ u32 permission; /**< Permission settings */
+ mali_memory_allocation_flag flags;
+
+ _mali_osk_lock_t * lock;
+
+ /* Manager specific information pointers */
+ void * mali_addr_mapping_info; /**< Mali address allocation specific info */
+ void * process_addr_mapping_info; /**< Mapping manager specific info */
+
+ mali_physical_memory_allocation physical_allocation;
+
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+} mali_memory_allocation;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+typedef struct mali_physical_memory_allocator
+{
+ mali_physical_memory_allocation_result (*allocate)(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+ mali_physical_memory_allocation_result (*allocate_page_table_block)(void * ctx, mali_page_table_block * block); /* MALI_MEM_ALLOC_PARTIAL not allowed */
+ void (*destroy)(struct mali_physical_memory_allocator * allocator);
+ void * ctx;
+ const char * name; /**< Descriptive name for use in mali_allocation_engine_report_allocators, or NULL */
+ u32 alloc_order; /**< Order in which the allocations should happen */
+ struct mali_physical_memory_allocator * next;
+} mali_physical_memory_allocator;
+
+typedef struct mali_kernel_mem_address_manager
+{
+ _mali_osk_errcode_t (*allocate)(mali_memory_allocation *); /**< Function to call to reserve an address */
+ void (*release)(mali_memory_allocation *); /**< Function to call to free the address allocated */
+
+ /**
+ * Function called for each physical sub allocation.
+ * Called for each physical block allocated by the physical memory manager.
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in,out] phys_addr A pointer to the physical address of the start of the
+ * physical block. When *phys_addr == MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC
+ * is used, this requests the function must allocate the physical page
+ * itself, and return it through the pointer provided.
+ * @param[in] size Length in bytes of the physical block
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ * Specifically, _MALI_OSK_ERR_UNSUPPORTED indicates that the function
+ * does not support allocating physical pages itself.
+ */
+ _mali_osk_errcode_t (*map_physical)(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+
+ /**
+ * Function called to remove a physical sub allocation.
+ * Called on error paths where one of the address managers fails.
+ *
+ * @note this is optional. For address managers where this is not
+ * implemented, the value of this member is NULL. The memory engine
+ * currently does not require the mali address manager to be able to
+ * unmap individual pages, but the process address manager must have this
+ * capability.
+ *
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in] size Length in bytes of the physical block
+ * @param[in] flags flags to use on a per-page basis. For OS-allocated
+ * physical pages, this must include _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR.
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ */
+ void (*unmap_physical)(mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags);
+
+} mali_kernel_mem_address_manager;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager);
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine);
+
+int mali_allocation_engine_allocate_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_provider, _mali_osk_list_t *tracking_list );
+void mali_allocation_engine_release_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+
+int mali_allocation_engine_map_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size);
+void mali_allocation_engine_unmap_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags);
+
+int mali_allocation_engine_allocate_page_tables(mali_allocation_engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider);
+
+void mali_allocation_engine_report_allocators(mali_physical_memory_allocator * physical_provider);
+
+#endif /* __MALI_KERNEL_MEMORY_ENGINE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_pp.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_pp.h
new file mode 100644
index 00000000000..ff1e153678c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_pp.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PP_H__
+#define __MALI_KERNEL_PP_H__
+
+extern struct mali_kernel_subsystem mali_subsystem_mali200;
+
+#if USING_MALI_PMM
+_mali_osk_errcode_t malipp_signal_power_up( u32 core_num, mali_bool queue_only );
+_mali_osk_errcode_t malipp_signal_power_down( u32 core_num, mali_bool immediate_only );
+#endif
+
+#endif /* __MALI_KERNEL_PP_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.c
new file mode 100644
index 00000000000..07bb894b1b4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_timestamp.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+typedef struct mali_profiling_entry
+{
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+} mali_profiling_entry;
+
+
+typedef enum mali_profiling_state
+{
+ MALI_PROFILING_STATE_UNINITIALIZED,
+ MALI_PROFILING_STATE_IDLE,
+ MALI_PROFILING_STATE_RUNNING,
+ MALI_PROFILING_STATE_RETURN,
+} mali_profiling_state;
+
+
+static _mali_osk_lock_t *lock = NULL;
+static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+static mali_profiling_entry* profile_entries = NULL;
+static u32 profile_entry_count = 0;
+static _mali_osk_atomic_t profile_insert_index;
+static _mali_osk_atomic_t profile_entries_written;
+
+
+_mali_osk_errcode_t _mali_profiling_init(void)
+{
+ profile_entries = NULL;
+ profile_entry_count = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+ _mali_osk_atomic_init(&profile_entries_written, 0);
+
+ lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0 );
+ if (NULL == lock)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_profiling_term(void)
+{
+ prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+
+ /* wait for all elements to be completely inserted into array */
+ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written))
+ {
+ /* do nothing */;
+ }
+
+ if (NULL != profile_entries)
+ {
+ _mali_osk_free(profile_entries);
+ profile_entries = NULL;
+ }
+
+ if (NULL != lock)
+ {
+ _mali_osk_lock_term(lock);
+ lock = NULL;
+ }
+}
+
+inline _mali_osk_errcode_t _mali_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+ u32 cur_index = _mali_osk_atomic_inc_return(&profile_insert_index) - 1;
+
+ if (prof_state != MALI_PROFILING_STATE_RUNNING || cur_index >= profile_entry_count)
+ {
+ /*
+ * Not in recording mode, or buffer is full
+ * Decrement index again, and early out
+ */
+ _mali_osk_atomic_dec(&profile_insert_index);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ profile_entries[cur_index].timestamp = _mali_timestamp_get();
+ profile_entries[cur_index].event_id = event_id;
+ profile_entries[cur_index].data[0] = data0;
+ profile_entries[cur_index].data[1] = data1;
+ profile_entries[cur_index].data[2] = data2;
+ profile_entries[cur_index].data[3] = data3;
+ profile_entries[cur_index].data[4] = data4;
+
+ _mali_osk_atomic_inc(&profile_entries_written);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
+{
+ _mali_osk_errcode_t ret;
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_IDLE)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (args->limit > MALI_PROFILING_MAX_BUFFER_ENTRIES)
+ {
+ args->limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
+ }
+
+ profile_entries = _mali_osk_malloc(args->limit * sizeof(mali_profiling_entry));
+ profile_entry_count = args->limit;
+ if (NULL == profile_entries)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ ret = _mali_timestamp_reset();
+
+ if (ret == _MALI_OSK_ERR_OK)
+ {
+ prof_state = MALI_PROFILING_STATE_RUNNING;
+ }
+ else
+ {
+ _mali_osk_free(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+ /* Always add process and thread identificator in the first two data elements for events from user space */
+ return _mali_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RUNNING)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ /* go into return state (user to retreive events), no more events will be added after this */
+ prof_state = MALI_PROFILING_STATE_RETURN;
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* wait for all elements to be completely inserted into array */
+ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written))
+ {
+ /* do nothing */;
+ }
+
+ args->count = _mali_osk_atomic_read(&profile_insert_index);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
+{
+ u32 index = args->index;
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (index >= _mali_osk_atomic_read(&profile_entries_written))
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ args->timestamp = profile_entries[index].timestamp;
+ args->event_id = profile_entries[index].event_id;
+ args->data[0] = profile_entries[index].data[0];
+ args->data[1] = profile_entries[index].data[1];
+ args->data[2] = profile_entries[index].data[2];
+ args->data[3] = profile_entries[index].data[3];
+ args->data[4] = profile_entries[index].data[4];
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+ profile_entry_count = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+ _mali_osk_atomic_init(&profile_entries_written, 0);
+ if (NULL != profile_entries)
+ {
+ _mali_osk_free(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_OK;
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.h
new file mode 100644
index 00000000000..5d3aa6eb841
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PROFILING_H__
+#define __MALI_KERNEL_PROFILING_H__
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+
+#include <../../../include/cinstr/mali_cinstr_profiling_events_m200.h>
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_profiling_init(void);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_profiling_term(void);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 - First data parameter, depending on event_id specified.
+ * @param data1 - Second data parameter, depending on event_id specified.
+ * @param data2 - Third data parameter, depending on event_id specified.
+ * @param data3 - Fourth data parameter, depending on event_id specified.
+ * @param data4 - Fifth data parameter, depending on event_id specified.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+#endif /* MALI_TIMELINE_PROFILING_ENABLED */
+
+#endif /* __MALI_KERNEL_PROFILING_H__ */
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.c
new file mode 100644
index 00000000000..1db8f3ad5ab
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.c
@@ -0,0 +1,2032 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_rendercore.h"
+#include "mali_osk_list.h"
+#if MALI_GPU_UTILIZATION
+#include "mali_kernel_utilization.h"
+#endif
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+#if USING_MMU
+#include "mali_kernel_mem_mmu.h"
+#endif /* USING_MMU */
+#if defined USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif /* USING_MALI400_L2_CACHE */
+
+#define HANG_CHECK_MSECS_MIN 100
+#define HANG_CHECK_MSECS_MAX 2000 /* 2 secs */
+#define HANG_CHECK_MSECS_DEFAULT 500 /* 500 ms */
+
+#define WATCHDOG_MSECS_MIN (2*HANG_CHECK_MSECS_MIN)
+#define WATCHDOG_MSECS_MAX 3600000 /* 1 hour */
+#define WATCHDOG_MSECS_DEFAULT 900000 /* 15 mins */
+
+/* max value that will be converted from jiffies to msecs and written to job->render_time_msecs */
+#define JOB_MAX_JIFFIES 100000
+
+int mali_hang_check_interval = HANG_CHECK_MSECS_DEFAULT;
+int mali_max_job_runtime = WATCHDOG_MSECS_DEFAULT;
+
+/* Subsystem entrypoints: */
+static _mali_osk_errcode_t rendercore_subsystem_startup(mali_kernel_subsystem_identifier id);
+static void rendercore_subsystem_terminate(mali_kernel_subsystem_identifier id);
+#if USING_MMU
+static void rendercore_subsystem_broadcast_notification(mali_core_notification_message message, u32 data);
+#endif
+
+
+static void mali_core_subsystem_cleanup_all_renderunits(struct mali_core_subsystem* subsys);
+static void mali_core_subsystem_move_core_set_idle(struct mali_core_renderunit *core);
+
+static mali_core_session * mali_core_subsystem_get_waiting_session(mali_core_subsystem *subsystem);
+static mali_core_job * mali_core_subsystem_release_session_get_job(mali_core_subsystem *subsystem, mali_core_session * session);
+
+static void find_and_abort(mali_core_session* session, u32 abort_id);
+
+static void mali_core_job_start_on_core(mali_core_job *job, mali_core_renderunit *core);
+#if USING_MMU
+static void mali_core_subsystem_callback_schedule_wrapper(void* sub);
+#endif
+static void mali_core_subsystem_schedule(mali_core_subsystem*subsystem);
+static void mali_core_renderunit_detach_job_from_core(mali_core_renderunit* core, mali_subsystem_reschedule_option reschedule, mali_subsystem_job_end_code end_status);
+
+static void mali_core_renderunit_irq_handler_remove(struct mali_core_renderunit *core);
+
+static _mali_osk_errcode_t mali_core_irq_handler_upper_half (void * data);
+static void mali_core_irq_handler_bottom_half ( void *data );
+
+#if USING_MMU
+static void lock_subsystem(struct mali_core_subsystem * subsys);
+static void unlock_subsystem(struct mali_core_subsystem * subsys);
+#endif
+
+
+/**
+ * This will be one of the subsystems in the array of subsystems:
+ * static struct mali_kernel_subsystem * subsystems[];
+ * found in file: mali_kernel_core.c
+ *
+ * This subsystem is necessary for operations common to all rendercore
+ * subsystems. For example, mali_subsystem_mali200 and mali_subsystem_gp2 may
+ * share a mutex when RENDERCORES_USE_GLOBAL_MUTEX is non-zero.
+ */
+struct mali_kernel_subsystem mali_subsystem_rendercore=
+{
+ rendercore_subsystem_startup, /* startup */
+ rendercore_subsystem_terminate, /* shutdown */
+ NULL, /* load_complete */
+ NULL, /* system_info_fill */
+ NULL, /* session_begin */
+ NULL, /* session_end */
+#if USING_MMU
+ rendercore_subsystem_broadcast_notification, /* broadcast_notification */
+#else
+ NULL,
+#endif
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+} ;
+
+static _mali_osk_lock_t *rendercores_global_mutex = NULL;
+static u32 rendercores_global_mutex_is_held = 0;
+static u32 rendercores_global_mutex_owner = 0;
+
+/** The 'dummy' rendercore subsystem to allow global subsystem mutex to be
+ * locked for all subsystems that extend the ''rendercore'' */
+static mali_core_subsystem rendercore_dummy_subsystem = {0,};
+
+/*
+ * Rendercore Subsystem functions.
+ *
+ * These are exposed by mali_subsystem_rendercore
+ */
+
+/**
+ * @brief Initialize the Rendercore subsystem.
+ *
+ * This must be called before any other subsystem that extends the
+ * ''rendercore'' may be initialized. For example, this must be called before
+ * the following functions:
+ * - mali200_subsystem_startup(), from mali_subsystem_mali200
+ * - maligp_subsystem_startup(), from mali_subsystem_gp2
+ *
+ * @note This function is separate from mali_core_subsystem_init(). They
+ * are related, in that mali_core_subsystem_init() may use the structures
+ * initialized by rendercore_subsystem_startup()
+ */
+static _mali_osk_errcode_t rendercore_subsystem_startup(mali_kernel_subsystem_identifier id)
+{
+ rendercores_global_mutex_is_held = 0;
+ rendercores_global_mutex = _mali_osk_lock_init(
+ (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED),
+ 0, 129);
+
+ if (NULL == rendercores_global_mutex)
+ {
+ MALI_PRINT_ERROR(("Failed: _mali_osk_lock_init\n")) ;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ rendercore_dummy_subsystem.name = "Rendercore Global Subsystem"; /* On the constant pool, do not free */
+ rendercore_dummy_subsystem.magic_nr = SUBSYSTEM_MAGIC_NR; /* To please the Subsystem Mutex code */
+
+#if MALI_GPU_UTILIZATION
+ if (mali_utilization_init() != _MALI_OSK_ERR_OK)
+ {
+ _mali_osk_lock_term(rendercores_global_mutex);
+ rendercores_global_mutex = NULL;
+ MALI_PRINT_ERROR(("Failed: mali_utilization_init\n")) ;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ if (_mali_profiling_init() != _MALI_OSK_ERR_OK)
+ {
+ /* No biggie if we wheren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Rendercore: Failed to initialize profiling, feature will be unavailable\n")) ;
+ }
+#endif
+
+ MALI_DEBUG_PRINT(2, ("Rendercore: subsystem global mutex initialized\n")) ;
+ MALI_SUCCESS;
+}
+
+/**
+ * @brief Terminate the Rendercore subsystem.
+ *
+ * This must only be called \b after any other subsystem that extends the
+ * ''rendercore'' has been terminated. For example, this must be called \b after
+ * the following functions:
+ * - mali200_subsystem_terminate(), from mali_subsystem_mali200
+ * - maligp_subsystem_terminate(), from mali_subsystem_gp2
+ *
+ * @note This function is separate from mali_core_subsystem_cleanup(), though,
+ * the subsystems that extend ''rendercore'' must still call
+ * mali_core_subsystem_cleanup() when they terminate.
+ */
+static void rendercore_subsystem_terminate(mali_kernel_subsystem_identifier id)
+{
+ /* Catch double-terminate */
+ MALI_DEBUG_ASSERT_POINTER( rendercores_global_mutex );
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_term();
+#endif
+
+#if MALI_GPU_UTILIZATION
+ mali_utilization_term();
+#endif
+
+ rendercore_dummy_subsystem.name = NULL; /* The original string was on the constant pool, do not free */
+ rendercore_dummy_subsystem.magic_nr = 0;
+
+ /* ASSERT that no-one's holding this */
+ MALI_DEBUG_PRINT_ASSERT( 0 == rendercores_global_mutex_is_held,
+ ("Rendercores' Global Mutex was held at termination time. Have the subsystems that extend ''rendercore'' been terminated?\n") );
+
+ _mali_osk_lock_term( rendercores_global_mutex );
+ rendercores_global_mutex = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Rendercore: subsystem global mutex terminated\n")) ;
+}
+
+
+#if USING_MMU
+/**
+ * @brief Handle certain Rendercore subsystem broadcast notifications
+ *
+ * When RENDERCORES_USE_GLOBAL_MUTEX is non-zero, this handles the following messages:
+ * - MMU_KILL_STEP0_LOCK_SUBSYSTEM
+ * - MMU_KILL_STEP4_UNLOCK_SUBSYSTEM
+ *
+ * The purpose is to manage the Rendercode Global Mutex, which cannot be
+ * managed by any system that extends the ''rendercore''.
+ *
+ * All other messages must be handled by mali_core_subsystem_broadcast_notification()
+ *
+ *
+ * When RENDERCORES_USE_GLOBAL_MUTEX is 0, this function does nothing.
+ * Instead, the subsystem that extends the ''rendercore' \b must handle its
+ * own mutexes - refer to mali_core_subsystem_broadcast_notification().
+ *
+ * Used currently only for signalling when MMU has a pagefault
+ */
+static void rendercore_subsystem_broadcast_notification(mali_core_notification_message message, u32 data)
+{
+ switch(message)
+ {
+ case MMU_KILL_STEP0_LOCK_SUBSYSTEM:
+ lock_subsystem( &rendercore_dummy_subsystem );
+ break;
+ case MMU_KILL_STEP4_UNLOCK_SUBSYSTEM:
+ unlock_subsystem( &rendercore_dummy_subsystem );
+ break;
+
+ case MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES:
+ /** FALLTHROUGH */
+ case MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS:
+ /** FALLTHROUGH */
+ case MMU_KILL_STEP3_CONTINUE_JOB_HANDLING:
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Illegal message: 0x%x, data: 0x%x\n", (u32)message, data));
+ break;
+ }
+
+}
+#endif
+
+/*
+ * Functions inherited by the subsystems that extend the ''rendercore''.
+ */
+
+u32 mali_core_renderunit_register_read(mali_core_renderunit *core, u32 relative_address)
+{
+ u32 read_val;
+
+ #if USING_MALI_PMM
+ if( core->state == CORE_OFF )
+ {
+ MALI_PRINT_ERROR(("Core is OFF during read: Core:%s Addr:0x%04X\n",
+ core->description,relative_address));
+ return 0xDEADBEEF;
+ }
+ #endif
+
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+
+ if (mali_benchmark) return 0;
+
+ if (relative_address >= core->size)
+ {
+ MALI_PRINT_ERROR(("Trying to read from illegal register: 0x%04x in core: %s\n",
+ relative_address, core->description));
+ return 0xDEADBEEF;
+ }
+
+ read_val = _mali_osk_mem_ioread32(core->registers_mapped, relative_address);
+
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_register_read: Core:%s Addr:0x%04X Val:0x%08x\n",
+ core->description,relative_address, read_val));
+
+ return read_val;
+}
+
+void mali_core_renderunit_register_read_array(mali_core_renderunit *core,
+ u32 relative_address,
+ u32 * result_array,
+ u32 nr_of_regs
+ )
+{
+ /* NOTE Do not use burst reads against the registers */
+
+ u32 i;
+
+ for(i=0; i<nr_of_regs; ++i)
+ {
+ result_array[i] = mali_core_renderunit_register_read(core, relative_address + i*4);
+ }
+
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_register_read_array: Core:%s Addr:0x%04X Nr_regs: %u\n",
+ core->description,relative_address, nr_of_regs));
+}
+
+void mali_core_renderunit_register_write(mali_core_renderunit *core, u32 relative_address, u32 new_val)
+{
+ #if USING_MALI_PMM
+ if( core->state == CORE_OFF )
+ {
+ MALI_PRINT_ERROR(("Core is OFF during write: Core:%s Addr:0x%04X Val:0x%08x\n",
+ core->description,relative_address, new_val));
+ return;
+ }
+ #endif
+
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+
+ if (mali_benchmark) return;
+
+ if (relative_address >= core->size)
+ {
+ MALI_PRINT_ERROR(("Trying to write to illegal register: 0x%04x in core: %s",
+ relative_address, core->description));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(6, ("mali_core_renderunit_register_write: Core:%s Addr:0x%04X Val:0x%08x\n",
+ core->description,relative_address, new_val));
+
+ _mali_osk_mem_iowrite32(core->registers_mapped, relative_address, new_val);
+}
+
+
+void mali_core_renderunit_register_write_array(mali_core_renderunit *core,
+ u32 relative_address,
+ u32 * source_array,
+ u32 nr_of_regs)
+{
+
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_register_write_array: Core:%s Addr:0x%04X Nr_regs: %u\n",
+ core->description,relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+
+ for( i = 0; i< nr_of_regs; i++)
+ {
+ mali_core_renderunit_register_write(core, relative_address + i*4, source_array[i]);
+ }
+}
+
+void mali_core_renderunit_timeout_function_hang_detection(void *arg)
+{
+ mali_bool action = MALI_FALSE;
+ mali_core_renderunit * core;
+
+ core = (mali_core_renderunit *) arg;
+ if( !core ) return;
+
+ /* if NOT idle OR has TIMED_OUT */
+ if ( !((CORE_WATCHDOG_TIMEOUT == core->state ) || (CORE_IDLE== core->state)) )
+ {
+ core->state = CORE_HANG_CHECK_TIMEOUT;
+ action = MALI_TRUE;
+ }
+
+ if(action) _mali_osk_irq_schedulework(core->irq);
+}
+
+
+void mali_core_renderunit_timeout_function(void *arg)
+{
+ mali_core_renderunit * core;
+ mali_bool is_watchdog;
+
+ core = (mali_core_renderunit *)arg;
+ if( !core ) return;
+
+ is_watchdog = MALI_TRUE;
+ if (mali_benchmark)
+ {
+ /* poll based core */
+ mali_core_job *job;
+ job = core->current_job;
+ if ( (NULL != job) &&
+ (0 != _mali_osk_time_after(job->watchdog_jiffies,_mali_osk_time_tickcount()))
+ )
+ {
+ core->state = CORE_POLL;
+ is_watchdog = MALI_FALSE;
+ }
+ }
+
+ if (is_watchdog)
+ {
+ MALI_DEBUG_PRINT(3, ("SW-Watchdog timeout: Core:%s\n", core->description));
+ core->state = CORE_WATCHDOG_TIMEOUT;
+ }
+
+ _mali_osk_irq_schedulework(core->irq);
+}
+
+/* Used by external renderunit_create<> function */
+_mali_osk_errcode_t mali_core_renderunit_init(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Core: renderunit_init: Core:%s\n", core->description));
+
+ _MALI_OSK_INIT_LIST_HEAD(&core->list) ;
+ core->timer = _mali_osk_timer_init();
+ if (NULL == core->timer)
+ {
+ MALI_PRINT_ERROR(("Core: renderunit_init: Core:%s -- cannot init timer\n", core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ _mali_osk_timer_setcallback(core->timer, mali_core_renderunit_timeout_function, (void *)core);
+
+ core->timer_hang_detection = _mali_osk_timer_init();
+ if (NULL == core->timer_hang_detection)
+ {
+ _mali_osk_timer_term(core->timer);
+ MALI_PRINT_ERROR(("Core: renderunit_init: Core:%s -- cannot init hang detection timer\n", core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ _mali_osk_timer_setcallback(core->timer_hang_detection, mali_core_renderunit_timeout_function_hang_detection, (void *)core);
+
+#if USING_MALI_PMM
+ /* Init no pending power downs */
+ core->pend_power_down = MALI_FALSE;
+
+ /* Register the core with the PMM - which powers it up */
+ if (_MALI_OSK_ERR_OK != malipmm_core_register( core->pmm_id ))
+ {
+ _mali_osk_timer_term(core->timer);
+ _mali_osk_timer_term(core->timer_hang_detection);
+ MALI_PRINT_ERROR(("Core: renderunit_init: Core:%s -- cannot register with PMM\n", core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif /* USING_MALI_PMM */
+
+ core->error_recovery = MALI_FALSE;
+ core->in_detach_function = MALI_FALSE;
+ core->state = CORE_IDLE;
+ core->current_job = NULL;
+ core->magic_nr = CORE_MAGIC_NR;
+#if USING_MMU
+ core->mmu = NULL;
+#endif /* USING_MMU */
+
+ MALI_SUCCESS;
+}
+
+void mali_core_renderunit_term(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Core: renderunit_term: Core:%s\n", core->description));
+
+ if (NULL != core->timer)
+ {
+ _mali_osk_timer_term(core->timer);
+ core->timer = NULL;
+ }
+ if (NULL != core->timer_hang_detection)
+ {
+ _mali_osk_timer_term(core->timer_hang_detection);
+ core->timer_hang_detection = NULL;
+ }
+
+#if USING_MALI_PMM
+ /* Unregister the core with the PMM */
+ malipmm_core_unregister( core->pmm_id );
+#endif
+}
+
+/* Used by external renderunit_create<> function */
+_mali_osk_errcode_t mali_core_renderunit_map_registers(mali_core_renderunit *core)
+{
+ MALI_DEBUG_PRINT(3, ("Core: renderunit_map_registers: Core:%s\n", core->description)) ;
+ if( (0 == core->registers_base_addr) ||
+ (0 == core->size) ||
+ (NULL == core->description)
+ )
+ {
+ MALI_PRINT_ERROR(("Missing fields in the core structure %u %u 0x%x;\n", core->registers_base_addr, core->size, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(core->registers_base_addr, core->size, core->description))
+ {
+ MALI_PRINT_ERROR(("Could not request register region (0x%08X - 0x%08X) to core: %s\n",
+ core->registers_base_addr, core->registers_base_addr + core->size - 1, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Success: request_mem_region: (0x%08X - 0x%08X) Core:%s\n",
+ core->registers_base_addr, core->registers_base_addr + core->size - 1, core->description));
+ }
+
+ core->registers_mapped = _mali_osk_mem_mapioregion( core->registers_base_addr, core->size, core->description );
+
+ if ( 0 == core->registers_mapped )
+ {
+ MALI_PRINT_ERROR(("Could not ioremap registers for %s .\n", core->description));
+ _mali_osk_mem_unreqregion(core->registers_base_addr, core->size);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Success: ioremap_nocache: Internal ptr: (0x%08X - 0x%08X) Core:%s\n",
+ (u32) core->registers_mapped,
+ ((u32)core->registers_mapped)+ core->size - 1,
+ core->description));
+ }
+
+ MALI_DEBUG_PRINT(4, ("Success: Mapping registers to core: %s\n",core->description));
+
+ MALI_SUCCESS;
+}
+
+/* Used by external renderunit_create<> function + other places */
+void mali_core_renderunit_unmap_registers(mali_core_renderunit *core)
+{
+ MALI_DEBUG_PRINT(3, ("Core: renderunit_unmap_registers: Core:%s\n", core->description));
+ if (0 == core->registers_mapped)
+ {
+ MALI_PRINT_ERROR(("Trying to unmap register-mapping with NULL from core: %s\n", core->description));
+ return;
+ }
+ _mali_osk_mem_unmapioregion(core->registers_base_addr, core->size, core->registers_mapped);
+ core->registers_mapped = 0;
+ _mali_osk_mem_unreqregion(core->registers_base_addr, core->size);
+}
+
+static void mali_core_renderunit_irq_handler_remove(mali_core_renderunit *core)
+{
+ MALI_DEBUG_PRINT(3, ("Core: renderunit_irq_handler_remove: Core:%s\n", core->description));
+ _mali_osk_irq_term(core->irq);
+}
+
+mali_core_renderunit * mali_core_renderunit_get_mali_core_nr(mali_core_subsystem *subsys, u32 mali_core_nr)
+{
+ mali_core_renderunit * core;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ if (subsys->number_of_cores <= mali_core_nr)
+ {
+ MALI_PRINT_ERROR(("Trying to get illegal mali_core_nr: 0x%x for %s", mali_core_nr, subsys->name));
+ return NULL;
+ }
+ core = (subsys->mali_core_array)[mali_core_nr];
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_get_mali_core_nr: Core:%s\n", core->description));
+ MALI_CHECK_CORE(core);
+ return core;
+}
+
+/* Is used by external function:
+ subsystem_startup<> */
+_mali_osk_errcode_t mali_core_subsystem_init(mali_core_subsystem* new_subsys)
+{
+ int i;
+
+ /* These function pointers must have been set on before calling this function */
+ if (
+ ( NULL == new_subsys->name ) ||
+ ( NULL == new_subsys->start_job ) ||
+ ( NULL == new_subsys->irq_handler_upper_half ) ||
+ ( NULL == new_subsys->irq_handler_bottom_half ) ||
+ ( NULL == new_subsys->get_new_job_from_user ) ||
+ ( NULL == new_subsys->return_job_to_user )
+ )
+ {
+ MALI_PRINT_ERROR(("Missing functions in subsystem."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(2, ("Core: subsystem_init: %s\n", new_subsys->name)) ;
+
+ /* Catch use-before-initialize/use-after-terminate */
+ MALI_DEBUG_ASSERT_POINTER( rendercores_global_mutex );
+
+ new_subsys->magic_nr = SUBSYSTEM_MAGIC_NR;
+
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->renderunit_idle_head); /* Idle cores of this type */
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->renderunit_off_head); /* Powered off cores of this type */
+
+ /* Linked list for each priority of sessions with a job ready for scheduleing */
+ for(i=0; i<PRIORITY_LEVELS; ++i)
+ {
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->awaiting_sessions_head[i]);
+ }
+
+ /* Linked list of all sessions connected to this coretype */
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->all_sessions_head);
+
+ MALI_SUCCESS;
+}
+
+#if USING_MMU
+void mali_core_subsystem_attach_mmu(mali_core_subsystem* subsys)
+{
+ u32 i;
+ mali_core_renderunit * core;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+ if ( NULL==core ) break;
+ core->mmu = mali_memory_core_mmu_lookup(core->mmu_id);
+ MALI_DEBUG_PRINT(2, ("Attach mmu: 0x%x to core: %s in subsystem: %s\n", core->mmu, core->description, subsys->name));
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+}
+#endif
+
+/* This will register an IRQ handler, and add the core to the list of available cores for this subsystem. */
+_mali_osk_errcode_t mali_core_subsystem_register_renderunit(mali_core_subsystem* subsys, mali_core_renderunit * core)
+{
+ mali_core_renderunit ** mali_core_array;
+ u32 previous_nr;
+ u32 previous_size;
+ u32 new_nr;
+ u32 new_size;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+
+ /* If any of these are 0 there is an error */
+ if(0 == core->subsystem ||
+ 0 == core->registers_base_addr ||
+ 0 == core->size ||
+ 0 == core->description)
+ {
+ MALI_PRINT_ERROR(("Missing fields in the core structure 0x%x 0x%x 0x%x;\n",
+ core->registers_base_addr, core->size, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Core: subsystem_register_renderunit: %s\n", core->description));
+
+ MALI_CHECK_NON_NULL(
+ core->irq = _mali_osk_irq_init(
+ core->irq_nr,
+ mali_core_irq_handler_upper_half,
+ mali_core_irq_handler_bottom_half,
+ (_mali_osk_irq_trigger_t)subsys->probe_core_irq_trigger,
+ (_mali_osk_irq_ack_t)subsys->probe_core_irq_acknowledge,
+ core,
+ "mali_core_irq_handlers"
+ ),
+ _MALI_OSK_ERR_FAULT
+ );
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ /* Update which core number this is */
+ core->core_number = subsys->number_of_cores;
+
+ /* Update the array of cores in the subsystem. */
+ previous_nr = subsys->number_of_cores;
+ previous_size = sizeof(mali_core_renderunit*)*previous_nr;
+ new_nr = previous_nr + 1;
+ new_size = sizeof(mali_core_renderunit*)*new_nr;
+
+ if (0 != previous_nr)
+ {
+ if (NULL == subsys->mali_core_array)
+ {
+ MALI_PRINT_ERROR(("Internal error"));
+ goto exit_function;
+ }
+
+ mali_core_array = (mali_core_renderunit **) _mali_osk_malloc( new_size );
+ if (NULL == mali_core_array )
+ {
+ MALI_PRINT_ERROR(("Out of mem"));
+ err = _MALI_OSK_ERR_NOMEM;
+ goto exit_function;
+ }
+ _mali_osk_memcpy(mali_core_array, subsys->mali_core_array, previous_size);
+ _mali_osk_free( subsys->mali_core_array);
+ MALI_DEBUG_PRINT(5, ("Success: adding a new core to subsystem array %s\n", core->description) ) ;
+ }
+ else
+ {
+ mali_core_array = (mali_core_renderunit **) _mali_osk_malloc( new_size );
+ if (NULL == mali_core_array )
+ {
+ MALI_PRINT_ERROR(("Out of mem"));
+ err = _MALI_OSK_ERR_NOMEM;
+ goto exit_function;
+ }
+ MALI_DEBUG_PRINT(6, ("Success: adding first core to subsystem array %s\n", core->description) ) ;
+ }
+ subsys->mali_core_array = mali_core_array;
+ mali_core_array[previous_nr] = core;
+
+ /* Add the core to the list of available cores on the system */
+ _mali_osk_list_add(&(core->list), &(subsys->renderunit_idle_head));
+
+ /* Update total number of cores */
+ subsys->number_of_cores = new_nr;
+ MALI_DEBUG_PRINT(6, ("Success: mali_core_subsystem_register_renderunit %s\n", core->description));
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_SUCCESS;
+
+exit_function:
+ mali_core_renderunit_irq_handler_remove(core);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_ERROR(err);
+}
+
+
+/**
+ * Called by the core when a system info update is needed
+ * We fill in info about all the core types available
+ * @param subsys Pointer to the core's @a mali_core_subsystem data structure
+ * @param info Pointer to system info struct to update
+ * @return _MALI_OSK_ERR_OK on success, or another _mali_osk_errcode_t error code on failure
+ */
+_mali_osk_errcode_t mali_core_subsystem_system_info_fill(mali_core_subsystem* subsys, _mali_system_info* info)
+{
+ u32 i;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK; /* OK if no cores to update info for */
+ mali_core_renderunit * core;
+ _mali_core_info **core_info_nextp;
+ _mali_core_info * cinfo;
+
+ MALI_DEBUG_PRINT(4, ("mali_core_subsystem_system_info_fill: %s\n", subsys->name) ) ;
+
+ /* check input */
+ MALI_CHECK_NON_NULL(info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ core_info_nextp = &(info->core_info);
+ cinfo = info->core_info;
+
+ while(NULL!=cinfo)
+ {
+ core_info_nextp = &(cinfo->next);
+ cinfo = cinfo->next;
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+ if ( NULL==core )
+ {
+ err = _MALI_OSK_ERR_FAULT;
+ goto early_exit;
+ }
+ cinfo = (_mali_core_info *)_mali_osk_calloc(1, sizeof(_mali_core_info));
+ if ( NULL==cinfo )
+ {
+ err = _MALI_OSK_ERR_NOMEM;
+ goto early_exit;
+ }
+ cinfo->version = core->core_version;
+ cinfo->type =subsys->core_type;
+ cinfo->reg_address = core->registers_base_addr;
+ cinfo->core_nr = i;
+ cinfo->next = NULL;
+ /* Writing this address to the previous' *(&next) ptr */
+ *core_info_nextp = cinfo;
+ /* Setting the next_ptr to point to &this->next_ptr */
+ core_info_nextp = &(cinfo->next);
+ }
+early_exit:
+ if ( _MALI_OSK_ERR_OK != err) MALI_PRINT_ERROR(("Error: In mali_core_subsystem_system_info_fill %d\n", err));
+ MALI_DEBUG_CODE(
+ cinfo = info->core_info;
+
+ MALI_DEBUG_PRINT(3, ("Current list of cores\n"));
+ while( NULL != cinfo )
+ {
+ MALI_DEBUG_PRINT(3, ("Type: 0x%x\n", cinfo->type));
+ MALI_DEBUG_PRINT(3, ("Version: 0x%x\n", cinfo->version));
+ MALI_DEBUG_PRINT(3, ("Reg_addr: 0x%x\n", cinfo->reg_address));
+ MALI_DEBUG_PRINT(3, ("Core_nr: 0x%x\n", cinfo->core_nr));
+ MALI_DEBUG_PRINT(3, ("Flags: 0x%x\n", cinfo->flags));
+ MALI_DEBUG_PRINT(3, ("*****\n"));
+ cinfo = cinfo->next;
+ }
+ );
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_ERROR(err);
+}
+
+
+/* Is used by external function:
+ subsystem_terminate<> */
+void mali_core_subsystem_cleanup(mali_core_subsystem* subsys)
+{
+ u32 i;
+ mali_core_renderunit * core;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+ MALI_DEBUG_PRINT(2, ("Core: subsystem_cleanup: %s\n", subsys->name )) ;
+
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+
+#if USING_MMU
+ if (NULL != core->mmu)
+ {
+ /* the MMU is attached in the load_complete callback, which will never be called if the module fails to load, handle that case */
+ mali_memory_core_mmu_unregister_callback(core->mmu, mali_core_subsystem_callback_schedule_wrapper);
+ }
+#endif
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+
+ mali_core_renderunit_irq_handler_remove(core);
+
+ /* When a process terminates, all cores running jobs from that process is reset and put to idle.
+ That means that when the module is unloading (this code) we are guaranteed that all cores are idle.
+ However: if something (we can't think of) is really wrong, a core may give an interrupt during this
+ unloading, and we may now in the code have a bottom-half-processing pending from the interrupts
+ we deregistered above. To be sure that the bottom halves do not access the structures after they
+ are deallocated we flush the bottom-halves processing here, before the deallocation. */
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+#if USING_MALI_PMM
+ /* Only reset when we are using PMM and the core is not off */
+#if MALI_PMM_NO_PMU
+ /* We need to reset when there is no PMU - but this will
+ * cause the register read/write functions to report an
+ * error (hence the if to check for CORE_OFF below) we
+ * change state to allow the reset to happen.
+ */
+ core->state = CORE_IDLE;
+#endif
+ if( core->state != CORE_OFF )
+ {
+ subsys->reset_core( core, MALI_CORE_RESET_STYLE_DISABLE );
+ }
+#else
+ /* Always reset the core */
+ subsys->reset_core( core, MALI_CORE_RESET_STYLE_DISABLE );
+#endif
+
+ mali_core_renderunit_unmap_registers(core);
+
+ _mali_osk_list_delinit(&core->list);
+
+ mali_core_renderunit_term(core);
+
+ subsys->renderunit_delete(core);
+ }
+
+ mali_core_subsystem_cleanup_all_renderunits(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT(6, ("SUCCESS: mali_core_subsystem_cleanup: %s\n", subsys->name )) ;
+}
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_number_of_cores_get(mali_core_session * session, u32 *number_of_cores)
+{
+ mali_core_subsystem * subsystem;
+
+ subsystem = session->subsystem;
+ if ( NULL != number_of_cores )
+ {
+ *number_of_cores = subsystem->number_of_cores;
+ }
+
+ MALI_DEBUG_PRINT(4, ("Core: ioctl_number_of_cores_get: %s: %u\n", subsystem->name, *number_of_cores) ) ;
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_start_job(mali_core_session * session, void *job_data)
+{
+ mali_core_subsystem * subsystem;
+ _mali_osk_errcode_t err;
+
+ /* need the subsystem to run callback function */
+ subsystem = session->subsystem;
+ MALI_CHECK_NON_NULL(subsystem, _MALI_OSK_ERR_FAULT);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+ err = subsystem->get_new_job_from_user(session, job_data);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ MALI_ERROR(err);
+}
+
+
+/* We return the version number to the first core in this subsystem */
+_mali_osk_errcode_t mali_core_subsystem_ioctl_core_version_get(mali_core_session * session, _mali_core_version *version)
+{
+ mali_core_subsystem * subsystem;
+ mali_core_renderunit * core0;
+ u32 nr_return;
+
+ subsystem = session->subsystem;
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+
+ core0 = mali_core_renderunit_get_mali_core_nr(subsystem, 0);
+
+ if( NULL == core0 )
+ {
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ nr_return = core0->core_version;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ MALI_DEBUG_PRINT(4, ("Core: ioctl_core_version_get: %s: %u\n", subsystem->name, nr_return )) ;
+
+ *version = nr_return;
+
+ MALI_SUCCESS;
+}
+
+void mali_core_subsystem_ioctl_abort_job(mali_core_session * session, u32 id)
+{
+ find_and_abort(session, id);
+}
+
+static mali_bool job_should_be_aborted(mali_core_job *job, u32 abort_id)
+{
+ if ( job->abort_id == abort_id ) return MALI_TRUE;
+ else return MALI_FALSE;
+}
+
+static void find_and_abort(mali_core_session* session, u32 abort_id)
+{
+ mali_core_subsystem * subsystem;
+ mali_core_renderunit *core;
+ mali_core_renderunit *tmp;
+ mali_core_job *job;
+
+ subsystem = session->subsystem;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+
+ job = session->job_waiting_to_run;
+ if ( (job!=NULL) && job_should_be_aborted (job, abort_id) )
+ {
+ MALI_DEBUG_PRINT(3, ("Core: Aborting %s job, with id nr: %u, from the waiting_to_run slot.\n", subsystem->name, abort_id ));
+ session->job_waiting_to_run = NULL;
+ _mali_osk_list_delinit(&(session->awaiting_sessions_list));
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ subsystem->return_job_to_user( job , JOB_STATUS_END_ABORT);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY( core, tmp, &session->renderunits_working_head, mali_core_renderunit, list )
+ {
+ job = core->current_job;
+ if ( (job!=NULL) && (job_should_be_aborted (job, abort_id) ) )
+ {
+ MALI_DEBUG_PRINT(3, ("Core: Aborting %s job, with id nr: %u, which is currently running on mali.\n", subsystem->name, abort_id ));
+ if ( core->state==CORE_IDLE )
+ {
+ MALI_PRINT_ERROR(("Aborting core with running job which is idle. Must be something very wrong."));
+ goto end_bug;
+ }
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, JOB_STATUS_END_ABORT);
+ }
+ }
+end_bug:
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE( subsystem );
+}
+
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_suspend_response(mali_core_session * session, void *argument)
+{
+ mali_core_subsystem * subsystem;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+
+ /* need the subsystem to run callback function */
+ subsystem = session->subsystem;
+ MALI_CHECK_NON_NULL(subsystem, _MALI_OSK_ERR_FAULT);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+ if ( NULL != subsystem->suspend_response)
+ {
+ MALI_DEBUG_PRINT(4, ("MALI_IOC_CORE_CMD_SUSPEND_RESPONSE start\n"));
+ err = subsystem->suspend_response(session, argument);
+ MALI_DEBUG_PRINT(4, ("MALI_IOC_CORE_CMD_SUSPEND_RESPONSE end\n"));
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ return err;
+}
+
+
+/* Is used by internal function:
+ mali_core_subsystem_cleanup<>s */
+/* All cores should be removed before calling this function
+Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_cleanup_all_renderunits(mali_core_subsystem* subsys)
+{
+ int i;
+ _mali_osk_free(subsys->mali_core_array);
+ subsys->number_of_cores = 0;
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_cleanup_all_renderunits: %s\n", subsys->name) ) ;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+
+ if ( ! _mali_osk_list_empty(&(subsys->renderunit_idle_head)))
+ {
+ MALI_PRINT_ERROR(("List renderunit_list_idle should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->renderunit_idle_head)) ;
+ }
+
+ if ( ! _mali_osk_list_empty(&(subsys->renderunit_off_head)))
+ {
+ MALI_PRINT_ERROR(("List renderunit_list_off should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->renderunit_off_head)) ;
+ }
+
+ for(i=0; i<PRIORITY_LEVELS; ++i)
+ {
+ if ( ! _mali_osk_list_empty(&(subsys->awaiting_sessions_head[i])))
+ {
+ MALI_PRINT_ERROR(("List awaiting_sessions_linkedlist should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->awaiting_sessions_head[i])) ;
+ subsys->awaiting_sessions_sum_all_priorities = 0;
+ }
+ }
+
+ if ( ! _mali_osk_list_empty(&(subsys->all_sessions_head)))
+ {
+ MALI_PRINT_ERROR(("List all_sessions_linkedlist should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->all_sessions_head)) ;
+ }
+}
+
+/* Is used by internal functions:
+ mali_core_irq_handler_bottom_half<>;
+ mali_core_subsystem_schedule<>; */
+/* Will release the core.*/
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_move_core_set_idle(mali_core_renderunit *core)
+{
+ mali_core_subsystem *subsystem;
+#if USING_MALI_PMM
+ mali_core_status oldstatus;
+#endif
+ subsystem = core->subsystem;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ _mali_osk_timer_del(core->timer);
+ _mali_osk_timer_del(core->timer_hang_detection);
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_move_core_set_idle: %s\n", core->description) ) ;
+
+ core->current_job = NULL ;
+
+#if USING_MALI_PMM
+
+ oldstatus = core->state;
+
+ if( core->pend_power_down )
+ {
+ core->state = CORE_OFF ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_off_head );
+ /* Done the move from the active queues, so the pending power down can be done */
+ core->pend_power_down = MALI_FALSE;
+ malipmm_core_power_down_okay( core->pmm_id );
+ }
+ else
+ {
+ core->state = CORE_IDLE ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_idle_head );
+ }
+
+ if( CORE_OFF != oldstatus )
+ {
+ /* Message that this core is now idle or in fact off */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_JOB_FINISHED,
+ 0 };
+ event.data = core->pmm_id;
+ _mali_ukk_pmm_event_message( &event );
+#if USING_MMU
+ /* Only free the reference when entering idle state from
+ * anything other than power off
+ */
+ mali_memory_core_mmu_release_address_space_reference(core->mmu);
+#endif /* USING_MMU */
+ }
+
+
+#else /* !USING_MALI_PMM */
+
+ core->state = CORE_IDLE ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_idle_head );
+
+#if USING_MMU
+ mali_memory_core_mmu_release_address_space_reference(core->mmu);
+#endif
+
+#endif /* USING_MALI_PMM */
+}
+
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_move_set_working(mali_core_renderunit *core, mali_core_job *job)
+{
+ mali_core_subsystem *subsystem;
+ mali_core_session *session;
+
+ session = job->session;
+ subsystem = core->subsystem;
+
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_JOB(job);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_move_set_working: %s\n", core->description) ) ;
+
+ core->current_job = job ;
+ core->state = CORE_WORKING ;
+ job->start_time_jiffies = _mali_osk_time_tickcount();
+ _mali_osk_list_move( &core->list, &session->renderunits_working_head );
+
+}
+
+#if USING_MALI_PMM
+
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_move_core_set_off(mali_core_renderunit *core)
+{
+ mali_core_subsystem *subsystem;
+ subsystem = core->subsystem;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ /* Cores must be idle before powering off */
+ MALI_DEBUG_ASSERT(core->state == CORE_IDLE);
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_move_core_set_off: %s\n", core->description) ) ;
+
+ core->current_job = NULL ;
+ core->state = CORE_OFF ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_off_head );
+}
+
+#endif /* USING_MALI_PMM */
+
+/* Is used by internal function:
+ mali_core_subsystem_schedule<>; */
+/* Returns the job with the highest priority for the subsystem. NULL if none*/
+/* Must hold subsystem_mutex before entering this function */
+static mali_core_session * mali_core_subsystem_get_waiting_session(mali_core_subsystem *subsystem)
+{
+ int i;
+
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ if ( 0 == subsystem->awaiting_sessions_sum_all_priorities )
+ {
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_get_waiting_job: No awaiting session found\n"));
+ return NULL;
+ }
+
+ for( i=0; i<PRIORITY_LEVELS ; ++i)
+ {
+ if (!_mali_osk_list_empty(&subsystem->awaiting_sessions_head[i]))
+ {
+ return _MALI_OSK_LIST_ENTRY(subsystem->awaiting_sessions_head[i].next, mali_core_session, awaiting_sessions_list);
+ }
+ }
+
+ return NULL;
+}
+
+static mali_core_job * mali_core_subsystem_release_session_get_job(mali_core_subsystem *subsystem, mali_core_session * session)
+{
+ mali_core_job *job;
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ _mali_osk_list_delinit(&session->awaiting_sessions_list);
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ job = session->job_waiting_to_run;
+ session->job_waiting_to_run = NULL;
+ MALI_CHECK_JOB(job);
+ return job;
+}
+
+/* Is used by internal functions:
+ mali_core_subsystem_schedule<> */
+/* This will start the job on the core. It will also release the core if it did not start.*/
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_job_start_on_core(mali_core_job *job, mali_core_renderunit *core)
+{
+ mali_core_session *session;
+ mali_core_subsystem *subsystem;
+ _mali_osk_errcode_t err;
+ session = job->session;
+ subsystem = core->subsystem;
+
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_JOB(job);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_CHECK_SESSION(session);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ MALI_DEBUG_PRINT(4, ("Core: job_start_on_core: job=0x%x, session=0x%x, core=%s\n", job, session, core->description));
+
+ MALI_DEBUG_ASSERT(NULL == core->current_job) ;
+ MALI_DEBUG_ASSERT(CORE_IDLE == core->state );
+
+ mali_core_subsystem_move_set_working(core, job);
+
+#if defined USING_MALI400_L2_CACHE
+ /* Invalidate the L2 cache */
+ if (_MALI_OSK_ERR_OK != mali_kernel_l2_cache_invalidate_all() )
+ {
+ MALI_DEBUG_PRINT(4, ("Core: Clear of L2 failed, return job. System may not be usable for some reason.\n"));
+ mali_core_subsystem_move_core_set_idle(core);
+ subsystem->return_job_to_user(job,JOB_STATUS_END_SYSTEM_UNUSABLE );
+ return;
+ }
+#endif
+
+ /* Tries to start job on the core. Returns MALI_FALSE if the job could not be started */
+ err = subsystem->start_job(job, core);
+
+#if MALI_GPU_UTILIZATION
+ mali_utilization_core_start();
+#endif
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ /* This will happen only if there is something in the job object
+ which make it inpossible to start. Like if it require illegal memory.*/
+ MALI_DEBUG_PRINT(4, ("Core: start_job failed, return job and putting core back into idle list\n"));
+ mali_core_subsystem_move_core_set_idle(core);
+ subsystem->return_job_to_user(job,JOB_STATUS_END_ILLEGAL_JOB );
+ }
+ else
+ {
+ u32 delay = _mali_osk_time_mstoticks(job->watchdog_msecs)+1;
+ job->watchdog_jiffies = _mali_osk_time_tickcount() + delay;
+ if (mali_benchmark)
+ {
+ _mali_osk_timer_add(core->timer, 1);
+ }
+ else
+ {
+ _mali_osk_timer_add(core->timer, delay);
+ }
+ }
+}
+
+#if USING_MMU
+static void mali_core_subsystem_callback_schedule_wrapper(void* sub)
+{
+ mali_core_subsystem * subsystem;
+ subsystem = (mali_core_subsystem *)sub;
+ MALI_DEBUG_PRINT(3, ("MMU: Is schedulling subsystem: %s\n", subsystem->name));
+ mali_core_subsystem_schedule(subsystem);
+}
+#endif
+
+/* Is used by internal function:
+ mali_core_irq_handler_bottom_half
+ mali_core_session_add_job
+*/
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_schedule(mali_core_subsystem * subsystem)
+{
+ mali_core_renderunit *core, *tmp;
+ mali_core_session *session;
+ mali_core_job *job;
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_schedule: %s\n", subsystem->name )) ;
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ /* First check that there are sessions with jobs waiting to run */
+ if ( 0 == subsystem->awaiting_sessions_sum_all_priorities)
+ {
+ MALI_DEBUG_PRINT(6, ("Core: No jobs available for %s\n", subsystem->name) ) ;
+ return;
+ }
+
+ /* Returns the session with the highest priority job for the subsystem. NULL if none*/
+ session = mali_core_subsystem_get_waiting_session(subsystem);
+
+ if (NULL == session)
+ {
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: No runnable job found\n"));
+ return;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(core, tmp, &subsystem->renderunit_idle_head, mali_core_renderunit, list)
+ {
+#if USING_MMU
+ int err = mali_memory_core_mmu_activate_page_table(core->mmu, session->mmu_session, mali_core_subsystem_callback_schedule_wrapper, subsystem);
+ if (0 == err)
+ {
+ /* core points to a core where the MMU page table activation succeeded */
+#endif
+ /* This will remove the job from queue system */
+ job = mali_core_subsystem_release_session_get_job(subsystem, session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: Got a job 0x%x\n", job));
+
+#if USING_MALI_PMM
+ {
+ /* Message that there is a job scheduled to run
+ * NOTE: mali_core_job_start_on_core() can fail to start
+ * the job for several reasons, but it will move the core
+ * back to idle which will create the FINISHED message
+ * so we can still say that the job is SCHEDULED
+ */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_JOB_SCHEDULED,
+ 0 };
+ event.data = core->pmm_id;
+ _mali_ukk_pmm_event_message( &event );
+ }
+#endif
+ /* This will {remove core from freelist AND start the job on the core}*/
+ mali_core_job_start_on_core(job, core);
+
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: Job started, done\n"));
+ return;
+#if USING_MMU
+ }
+#endif
+ }
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: Could not activate MMU. Scheduelling postponed to MMU, checking next.\n"));
+
+#if USING_MALI_PMM
+ {
+ /* Message that there are jobs to run */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_JOB_QUEUED,
+ 0 };
+ if( subsystem->core_type == _MALI_GP2 || subsystem->core_type == _MALI_400_GP )
+ {
+ event.data = MALI_PMM_CORE_GP;
+ }
+ else
+ {
+ /* Check the PP is supported by the PMM */
+ MALI_DEBUG_ASSERT( subsystem->core_type == _MALI_200 || subsystem->core_type == _MALI_400_PP );
+ /* We state that all PP cores are scheduled to inform the PMM
+ * that it may need to power something up!
+ */
+ event.data = MALI_PMM_CORE_PP_ALL;
+ }
+ _mali_ukk_pmm_event_message( &event );
+ }
+#endif /* USING_MALI_PMM */
+
+}
+
+/* Is used by external function:
+ session_begin<> */
+void mali_core_session_begin(mali_core_session * session)
+{
+ mali_core_subsystem * subsystem;
+
+ subsystem = session->subsystem;
+ if ( NULL == subsystem )
+ {
+ MALI_PRINT_ERROR(("Missing data in struct\n"));
+ return;
+ }
+ MALI_DEBUG_PRINT(2, ("Core: session_begin: for %s\n", session->subsystem->name )) ;
+
+ session->magic_nr = SESSION_MAGIC_NR;
+
+ _MALI_OSK_INIT_LIST_HEAD(&session->renderunits_working_head);
+
+ session->job_waiting_to_run = NULL;
+ _MALI_OSK_INIT_LIST_HEAD(&session->awaiting_sessions_list);
+ _MALI_OSK_INIT_LIST_HEAD(&session->all_sessions_list);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+ _mali_osk_list_add(&session->all_sessions_list, &session->subsystem->all_sessions_head);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ MALI_DEBUG_PRINT(5, ("Core: session_begin: for %s DONE\n", session->subsystem->name) ) ;
+}
+
+#if USING_MMU
+static void mali_core_renderunit_stop_bus(mali_core_renderunit* core)
+{
+ core->subsystem->stop_bus(core);
+}
+#endif
+
+void mali_core_session_close(mali_core_session * session)
+{
+ mali_core_subsystem * subsystem;
+ mali_core_renderunit *core;
+
+ subsystem = session->subsystem;
+ MALI_DEBUG_ASSERT_POINTER(subsystem);
+
+ MALI_DEBUG_PRINT(2, ("Core: session_close: for %s\n", session->subsystem->name) ) ;
+
+ /* We must grab subsystem mutex since the list this session belongs to
+ is owned by the subsystem */
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+
+ /* Return the potensial waiting job to user */
+ if ( session->job_waiting_to_run )
+ {
+ subsystem->return_job_to_user( session->job_waiting_to_run, JOB_STATUS_END_SHUTDOWN );
+ session->job_waiting_to_run = NULL;
+ _mali_osk_list_delinit(&(session->awaiting_sessions_list));
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ }
+
+ /* Kill active cores working for this session - freeing their jobs
+ Since the handling of one core also could stop jobs from another core, there is a while loop */
+ while ( ! _mali_osk_list_empty(&session->renderunits_working_head) )
+ {
+ core = _MALI_OSK_LIST_ENTRY(session->renderunits_working_head.next, mali_core_renderunit, list);
+ MALI_DEBUG_PRINT(3, ("Core: session_close: Core was working: %s\n", core->description )) ;
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, JOB_STATUS_END_SHUTDOWN );
+ break;
+ }
+ _MALI_OSK_INIT_LIST_HEAD(&session->renderunits_working_head); /* Not necessary - we will _mali_osk_free session*/
+
+ /* Remove this session from the global sessionlist */
+ _mali_osk_list_delinit(&session->all_sessions_list);
+
+ MALI_DEBUG_PRINT(5, ("Core: session_close: for %s FINISHED\n", session->subsystem->name )) ;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE( subsystem );
+}
+
+/* Must hold subsystem_mutex before entering this function */
+_mali_osk_errcode_t mali_core_session_add_job(mali_core_session * session, mali_core_job *job, mali_core_job **job_return)
+{
+ mali_core_subsystem * subsystem;
+
+ job->magic_nr = JOB_MAGIC_NR;
+ MALI_CHECK_SESSION(session);
+
+ subsystem = session->subsystem;
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ MALI_DEBUG_PRINT(5, ("Core: session_add_job: for %s\n", subsystem->name )) ;
+
+ /* Setting the default value; No job to return */
+ MALI_DEBUG_ASSERT_POINTER(job_return);
+ *job_return = NULL;
+
+ if ( NULL != session->job_waiting_to_run)
+ {
+ MALI_DEBUG_PRINT(5, ("The session already had a job waiting\n")) ;
+ /* Checing if the new job has a higher priority than the one that was pending.*/
+ if ( job_has_higher_priority(job,session->job_waiting_to_run))
+ {
+ /* Remove this session from current priority */
+ _mali_osk_list_del( &(session->awaiting_sessions_list));
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ /* Returning the previous waiting job through the input double pointer*/
+ *job_return = session->job_waiting_to_run;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Illegal internal state."));
+ /* There was a job waiting in this session, and the priority of this job
+ we try to add was NOT higher. Return -1 indicated new job NOT enqueued.*/
+ /* We check prior to calling this function that we are not in this state.*/
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ }
+ /* Continue to add the new job as the next job from this session */
+ MALI_DEBUG_PRINT(6, ("Core: session_add_job job=0x%x\n", job));
+
+ /* Adding this session to the subsystem list of sessions with pending job, with priority */
+ session->job_waiting_to_run = job;
+
+ _mali_osk_list_addtail( &(session->awaiting_sessions_list), &(subsystem->awaiting_sessions_head[job->priority]));
+ subsystem->awaiting_sessions_sum_all_priorities++;
+
+ mali_core_subsystem_schedule(subsystem);
+
+ MALI_DEBUG_PRINT(6, ("Core: session_add_job: for %s FINISHED\n", session->subsystem->name )) ;
+
+ MALI_SUCCESS;
+}
+
+static void mali_core_job_set_run_time(mali_core_job * job)
+{
+ u32 jiffies_used;
+ jiffies_used = _mali_osk_time_tickcount() - job->start_time_jiffies;
+ if ( jiffies_used > JOB_MAX_JIFFIES )
+ {
+ MALI_PRINT_ERROR(("Job used too many jiffies: %d\n", jiffies_used ));
+ jiffies_used = 0;
+ }
+ job->render_time_msecs = _mali_osk_time_tickstoms(jiffies_used);
+}
+
+static void mali_core_renderunit_detach_job_from_core(mali_core_renderunit* core, mali_subsystem_reschedule_option reschedule, mali_subsystem_job_end_code end_status)
+{
+ mali_core_job * job;
+ mali_core_subsystem * subsystem;
+ mali_bool already_in_detach_function;
+
+ job = core->current_job;
+ subsystem = core->subsystem;
+ MALI_DEBUG_ASSERT(CORE_IDLE != core->state);
+
+ /* The reset_core() called some lines below might call this detach
+ * funtion again. To protect the core object from being modified by
+ * recursive calls, the in_detach_function would track if it is an recursive call
+ */
+ already_in_detach_function = core->in_detach_function;
+
+ if ( MALI_FALSE == already_in_detach_function )
+ {
+ core->in_detach_function = MALI_TRUE;
+ if ( NULL != job )
+ {
+ mali_core_job_set_run_time(job);
+ core->current_job = NULL;
+ }
+ }
+
+ if (JOB_STATUS_END_SEG_FAULT == end_status)
+ {
+ subsystem->reset_core( core, MALI_CORE_RESET_STYLE_HARD );
+ }
+ else
+ {
+ subsystem->reset_core( core, MALI_CORE_RESET_STYLE_RUNABLE );
+ }
+
+ if ( MALI_FALSE == already_in_detach_function )
+ {
+ if ( CORE_IDLE != core->state )
+ {
+ #if MALI_GPU_UTILIZATION
+ mali_utilization_core_end();
+ #endif
+ mali_core_subsystem_move_core_set_idle(core);
+ }
+
+ core->in_detach_function = MALI_FALSE;
+
+ if ( SUBSYSTEM_RESCHEDULE == reschedule )
+ {
+ mali_core_subsystem_schedule(subsystem);
+ }
+ if ( NULL != job )
+ {
+ core->subsystem->return_job_to_user(job, end_status);
+ }
+ }
+}
+
+#if USING_MMU
+/* This function intentionally does not release the semaphore. You must run
+ stop_bus_for_all_cores(), reset_all_cores_on_mmu() and continue_job_handling()
+ after calling this function, and then call unlock_subsystem() to release the
+ semaphore. */
+
+static void lock_subsystem(struct mali_core_subsystem * subsys)
+{
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* You must run lock_subsystem() before entering this function, to ensure that
+ the subsystem mutex is held.
+ Later, unlock_subsystem() can be called to release the mutex.
+
+ This function only stops cores behind the given MMU, unless "mmu" is NULL, in
+ which case all cores are stopped.
+*/
+static void stop_bus_for_all_cores_on_mmu(struct mali_core_subsystem * subsys, void* mmu)
+{
+ u32 i;
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ MALI_DEBUG_PRINT(2,("Handling: bus stop %s\n", subsys->name ));
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ mali_core_renderunit * core;
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+
+ /* We stop only cores behind the given MMU, unless MMU is NULL */
+ if ( (NULL!=mmu) && (core->mmu != mmu) ) continue;
+
+ if ( CORE_IDLE != core->state )
+ {
+ MALI_DEBUG_PRINT(4, ("Stopping bus on core %s\n", core->description));
+ mali_core_renderunit_stop_bus(core);
+ core->error_recovery = MALI_TRUE;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4,("Core: not active %s\n", core->description ));
+ }
+ }
+ /* Mutex is still being held, to prevent things to happen while we do cleanup */
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* You must run lock_subsystem() before entering this function, to ensure that
+ the subsystem mutex is held.
+ Later, unlock_subsystem() can be called to release the mutex.
+
+ This function only resets cores behind the given MMU, unless "mmu" is NULL, in
+ which case all cores are reset.
+*/
+static void reset_all_cores_on_mmu(struct mali_core_subsystem * subsys, void* mmu)
+{
+ u32 i;
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ MALI_DEBUG_PRINT(3, ("Handling: reset cores from mmu: 0x%x on %s\n", mmu, subsys->name ));
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ mali_core_renderunit * core;
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+
+ /* We reset only cores behind the given MMU, unless MMU is NULL */
+ if ( (NULL!=mmu) && (core->mmu != mmu) ) continue;
+
+ if ( CORE_IDLE != core->state )
+ {
+ MALI_DEBUG_PRINT(4, ("Abort and reset core: %s\n", core->description ));
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_WAIT, JOB_STATUS_END_SEG_FAULT);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("Core: not active %s\n", core->description ));
+ }
+ }
+ MALI_DEBUG_PRINT(4, ("Handling: done %s\n", subsys->name ));
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* You must run lock_subsystem() before entering this function, to ensure that
+ the subsystem mutex is held.
+ Later, unlock_subsystem() can be called to release the mutex. */
+static void continue_job_handling(struct mali_core_subsystem * subsys)
+{
+ u32 i, j;
+
+ MALI_DEBUG_PRINT(3, ("Handling: Continue: %s\n", subsys->name ));
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+
+
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ mali_core_renderunit * core;
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+ core->error_recovery = MALI_FALSE;
+ }
+
+ i = subsys->number_of_cores;
+ j = subsys->awaiting_sessions_sum_all_priorities;
+
+ /* Schedule MIN(nr_waiting_jobs , number of cores) times */
+ while( i-- && j--)
+ {
+ mali_core_subsystem_schedule(subsys);
+ }
+ MALI_DEBUG_PRINT(4, ("Handling: done %s\n", subsys->name ));
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* Unlock the subsystem. */
+static void unlock_subsystem(struct mali_core_subsystem * subsys)
+{
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+}
+
+void mali_core_subsystem_broadcast_notification(struct mali_core_subsystem * subsys, mali_core_notification_message message, u32 data)
+{
+ void * mmu;
+ mmu = (void*) data;
+
+ switch(message)
+ {
+ case MMU_KILL_STEP0_LOCK_SUBSYSTEM:
+ break;
+ case MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES:
+ stop_bus_for_all_cores_on_mmu(subsys, mmu);
+ break;
+ case MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS:
+ reset_all_cores_on_mmu(subsys, mmu );
+ break;
+ case MMU_KILL_STEP3_CONTINUE_JOB_HANDLING:
+ continue_job_handling(subsys);
+ break;
+ case MMU_KILL_STEP4_UNLOCK_SUBSYSTEM:
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Illegal message: 0x%x, data: 0x%x\n", (u32)message, data));
+ break;
+ }
+}
+#endif /* USING_MMU */
+
+void job_watchdog_set(mali_core_job * job, u32 watchdog_msecs)
+{
+ if (watchdog_msecs == 0) job->watchdog_msecs = mali_max_job_runtime; /* use the default */
+ else if (watchdog_msecs > WATCHDOG_MSECS_MAX) job->watchdog_msecs = WATCHDOG_MSECS_MAX; /* no larger than max */
+ else if (watchdog_msecs < WATCHDOG_MSECS_MIN) job->watchdog_msecs = WATCHDOG_MSECS_MIN; /* not below min */
+ else job->watchdog_msecs = watchdog_msecs;
+}
+
+u32 mali_core_hang_check_timeout_get(void)
+{
+ /* check the value. The user might have set the value outside the allowed range */
+ if (mali_hang_check_interval > HANG_CHECK_MSECS_MAX) mali_hang_check_interval = HANG_CHECK_MSECS_MAX; /* cap to max */
+ else if (mali_hang_check_interval < HANG_CHECK_MSECS_MIN) mali_hang_check_interval = HANG_CHECK_MSECS_MIN; /* cap to min */
+
+ /* return the active value */
+ return mali_hang_check_interval;
+}
+
+static _mali_osk_errcode_t mali_core_irq_handler_upper_half (void * data)
+{
+ mali_core_renderunit *core;
+ u32 has_pending_irq;
+
+ core = (mali_core_renderunit * )data;
+
+ if ( (NULL == core) ||
+ (NULL == core->subsystem) ||
+ (NULL == core->subsystem->irq_handler_upper_half) )
+ {
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_SUBSYSTEM(core->subsystem);
+
+ has_pending_irq = core->subsystem->irq_handler_upper_half(core);
+
+ if ( has_pending_irq )
+ {
+ _mali_osk_irq_schedulework( core->irq ) ;
+ MALI_SUCCESS;
+ }
+
+ if (mali_benchmark) MALI_SUCCESS;
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+static void mali_core_irq_handler_bottom_half ( void *data )
+{
+ mali_core_renderunit *core;
+ mali_core_subsystem* subsystem;
+
+ mali_subsystem_job_end_code job_status;
+
+ core = (mali_core_renderunit * )data;
+
+ MALI_CHECK_CORE(core);
+ subsystem = core->subsystem;
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+ if ( CORE_IDLE == core->state ) goto end_function;
+
+ MALI_DEBUG_PRINT(5, ("IRQ: handling irq from core %s\n", core->description )) ;
+
+ _mali_osk_cache_flushall();
+
+ /* This function must also update the job status flag */
+ job_status = subsystem->irq_handler_bottom_half( core );
+
+ /* Retval is nonzero if the job is finished. */
+ if ( JOB_STATUS_CONTINUE_RUN != job_status )
+ {
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, job_status);
+ }
+ else
+ {
+ switch ( core->state )
+ {
+ case CORE_WATCHDOG_TIMEOUT:
+ MALI_DEBUG_PRINT(2, ("Watchdog SW Timeout of job from core: %s\n", core->description ));
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, JOB_STATUS_END_TIMEOUT_SW );
+ break;
+
+ case CORE_POLL:
+ MALI_DEBUG_PRINT(5, ("Poll core: %s\n", core->description )) ;
+ core->state = CORE_WORKING;
+ _mali_osk_timer_add( core->timer, 1);
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(4, ("IRQ: The job on the core continue to run: %s\n", core->description )) ;
+ break;
+ }
+ }
+end_function:
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+}
+
+void subsystem_flush_mapped_mem_cache(void)
+{
+ _mali_osk_cache_flushall();
+ _mali_osk_mem_barrier();
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t mali_core_subsystem_signal_power_down(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool immediate_only)
+{
+ mali_core_renderunit * core = NULL;
+
+ MALI_CHECK_SUBSYSTEM(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ /* It is possible that this signal funciton can be called during a driver exit,
+ * and so the requested core may now be destroyed. (This is due to us not having
+ * the subsys lock before signalling power down).
+ * mali_core_renderunit_get_mali_core_nr() will report a Mali ERR because
+ * the core number is out of range (which is a valid error in other cases).
+ * So instead we check here (now that we have the subsys lock) and let the
+ * caller cope with the core get failure and check that the core has
+ * been unregistered in the PMM as part of its destruction.
+ */
+ if ( subsys->number_of_cores > mali_core_nr )
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys, mali_core_nr);
+ }
+
+ if ( NULL == core )
+ {
+ /* Couldn't find the core */
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: Failed to find core to power down\n") );
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ else if ( core->state != CORE_IDLE )
+ {
+ /* When powering down we either set a pending power down flag here so we
+ * can power down cleanly after the job completes or we don't set the
+ * flag if we have been asked to only do a power down right now
+ * In either case, return that the core is busy
+ */
+ if ( !immediate_only ) core->pend_power_down = MALI_TRUE;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: No idle core to power down\n") );
+ MALI_ERROR(_MALI_OSK_ERR_BUSY);
+ }
+
+ /* Shouldn't have a pending power down flag set */
+ MALI_DEBUG_ASSERT( !core->pend_power_down );
+
+ /* Move core to off queue */
+ mali_core_subsystem_move_core_set_off(core);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_core_subsystem_signal_power_up(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool queue_only)
+{
+ mali_core_renderunit * core;
+
+ MALI_CHECK_SUBSYSTEM(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ core = mali_core_renderunit_get_mali_core_nr(subsys, mali_core_nr);
+
+ if( core == NULL )
+ {
+ /* Couldn't find the core */
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: Failed to find core to power up\n") );
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ else if( core->state != CORE_OFF )
+ {
+ /* This will usually happen because we are trying to cancel a pending power down */
+ core->pend_power_down = MALI_FALSE;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: No powered off core to power up (cancelled power down?)\n") );
+ MALI_ERROR(_MALI_OSK_ERR_BUSY);
+ }
+
+ /* Shouldn't have a pending power down set */
+ MALI_DEBUG_ASSERT( !core->pend_power_down );
+
+ /* Move core to idle queue */
+ mali_core_subsystem_move_core_set_idle(core);
+
+ if( !queue_only )
+ {
+ /* Reset MMU & core - core must be idle to allow this */
+#if USING_MMU
+ if ( NULL!=core->mmu )
+ {
+#if defined(USING_MALI200)
+ if (core->pmm_id != MALI_PMM_CORE_PP0)
+ {
+#endif
+ mali_kernel_mmu_reset(core->mmu);
+#if defined(USING_MALI200)
+ }
+#endif
+
+ }
+#endif /* USING_MMU */
+ subsys->reset_core( core, MALI_CORE_RESET_STYLE_RUNABLE );
+ }
+
+ /* Need to schedule work to start on this core */
+ mali_core_subsystem_schedule(subsys);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+
+ MALI_SUCCESS;
+}
+
+#endif /* USING_MALI_PMM */
+
+#if MALI_STATE_TRACKING
+void mali_core_renderunit_dump_state(mali_core_subsystem* subsystem)
+{
+ u32 i;
+ mali_core_renderunit *core;
+ mali_core_renderunit *tmp_core;
+
+ mali_core_session* session;
+ mali_core_session* tmp_session;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+
+ MALI_PRINT(("Subsystem;\n"));
+ MALI_PRINT((" Name: %s\n", subsystem->name));
+
+ for (i = 0; i < subsystem->number_of_cores; i++)
+ {
+ MALI_PRINT((" Core: #%u\n", subsystem->mali_core_array[i]->core_number));
+ MALI_PRINT((" Description: %s\n", subsystem->mali_core_array[i]->description));
+ switch(subsystem->mali_core_array[i]->state)
+ {
+ case CORE_IDLE:
+ MALI_PRINT((" State: CORE_IDLE\n"));
+ break;
+ case CORE_WORKING:
+ MALI_PRINT((" State: CORE_WORKING\n"));
+ break;
+ case CORE_WATCHDOG_TIMEOUT:
+ MALI_PRINT((" State: CORE_WATCHDOG_TIMEOUT\n"));
+ break;
+ case CORE_POLL:
+ MALI_PRINT((" State: CORE_POLL\n"));
+ break;
+ case CORE_HANG_CHECK_TIMEOUT:
+ MALI_PRINT((" State: CORE_HANG_CHECK_TIMEOUT\n"));
+ break;
+ case CORE_OFF:
+ MALI_PRINT((" State: CORE_OFF\n"));
+ break;
+ default:
+ MALI_PRINT((" State: Unknown (0x%X)\n", subsystem->mali_core_array[i]->state));
+ break;
+ }
+ MALI_PRINT((" Current job: 0x%x\n", (u32)(subsystem->mali_core_array[i]->current_job)));
+ MALI_PRINT((" Core version: 0x%x\n", subsystem->mali_core_array[i]->core_version));
+#if USING_MALI_PMM
+ MALI_PRINT((" PMM id: 0x%x\n", subsystem->mali_core_array[i]->pmm_id));
+ MALI_PRINT((" Power down requested: %s\n", subsystem->mali_core_array[i]->pend_power_down ? "TRUE" : "FALSE"));
+#endif
+ }
+
+ MALI_PRINT((" Cores on idle list:\n"));
+ _MALI_OSK_LIST_FOREACHENTRY(core, tmp_core, &subsystem->renderunit_idle_head, mali_core_renderunit, list)
+ {
+ MALI_PRINT((" Core #%u\n", core->core_number));
+ }
+
+ MALI_PRINT((" Cores on off list:\n"));
+ _MALI_OSK_LIST_FOREACHENTRY(core, tmp_core, &subsystem->renderunit_off_head, mali_core_renderunit, list)
+ {
+ MALI_PRINT((" Core #%u\n", core->core_number));
+ }
+
+ MALI_PRINT((" Connected sessions:\n"));
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp_session, &subsystem->all_sessions_head, mali_core_session, all_sessions_list)
+ {
+ MALI_PRINT((" Session 0x%X:\n", (u32)session));
+ MALI_PRINT((" Waiting job: 0x%X\n", (u32)session->job_waiting_to_run));
+ MALI_PRINT((" Notification queue: %s\n", _mali_osk_notification_queue_is_empty(session->notification_queue) ? "EMPTY" : "NON-EMPTY"));
+ }
+
+ MALI_PRINT((" Waiting sessions sum all priorities: %u\n", subsystem->awaiting_sessions_sum_all_priorities));
+ for (i = 0; i < PRIORITY_LEVELS; i++)
+ {
+ MALI_PRINT((" Waiting sessions with priority %u:\n", i));
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp_session, &subsystem->awaiting_sessions_head[i], mali_core_session, awaiting_sessions_list)
+ {
+ MALI_PRINT((" Session 0x%X:\n", (u32)session));
+ MALI_PRINT((" Waiting job: 0x%X\n", (u32)session->job_waiting_to_run));
+ MALI_PRINT((" Notification queue: %s\n", _mali_osk_notification_queue_is_empty(session->notification_queue) ? "EMPTY" : "NON-EMPTY"));
+ }
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE( subsystem );
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.h
new file mode 100644
index 00000000000..94bce94d4e1
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.h
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_RENDERCORE_H__
+#define __MALI_RENDERCORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_subsystem.h"
+
+#define PRIORITY_LEVELS 3
+#define PRIORITY_MAX 0
+#define PRIORITY_MIN (PRIORITY_MAX+PRIORITY_LEVELS-1)
+
+/* This file contains what we need in kernel for all core types. */
+
+typedef enum
+{
+ CORE_IDLE, /**< Core is ready for a new job */
+ CORE_WORKING, /**< Core is working on a job */
+ CORE_WATCHDOG_TIMEOUT, /**< Core is working but it has timed out */
+ CORE_POLL, /**< Poll timer triggered, pending handling */
+ CORE_HANG_CHECK_TIMEOUT,/**< Timeout for hang detection */
+ CORE_OFF /**< Core is powered off */
+} mali_core_status;
+
+typedef enum
+{
+ SUBSYSTEM_RESCHEDULE,
+ SUBSYSTEM_WAIT
+} mali_subsystem_reschedule_option;
+
+typedef enum
+{
+ MALI_CORE_RESET_STYLE_RUNABLE,
+ MALI_CORE_RESET_STYLE_DISABLE,
+ MALI_CORE_RESET_STYLE_HARD
+} mali_core_reset_style;
+
+typedef enum
+{
+ JOB_STATUS_CONTINUE_RUN = 0x01,
+ JOB_STATUS_END_SUCCESS = 1<<(16+0),
+ JOB_STATUS_END_OOM = 1<<(16+1),
+ JOB_STATUS_END_ABORT = 1<<(16+2),
+ JOB_STATUS_END_TIMEOUT_SW = 1<<(16+3),
+ JOB_STATUS_END_HANG = 1<<(16+4),
+ JOB_STATUS_END_SEG_FAULT = 1<<(16+5),
+ JOB_STATUS_END_ILLEGAL_JOB = 1<<(16+6),
+ JOB_STATUS_END_UNKNOWN_ERR = 1<<(16+7),
+ JOB_STATUS_END_SHUTDOWN = 1<<(16+8),
+ JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+} mali_subsystem_job_end_code;
+
+
+struct mali_core_job;
+struct mali_core_subsystem;
+struct mali_core_renderunit;
+struct mali_core_session;
+
+/* We have one of these subsystems for each core type */
+typedef struct mali_core_subsystem
+{
+ struct mali_core_renderunit ** mali_core_array; /* An array of all cores of this type */
+ u32 number_of_cores; /* Number of cores in this list */
+
+ _mali_core_type core_type;
+
+ u32 magic_nr;
+
+ _mali_osk_list_t renderunit_idle_head; /* Idle cores of this type */
+ _mali_osk_list_t renderunit_off_head; /* Powered off cores of this type */
+
+ /* Linked list for each priority of sessions with a job ready for scheduelling */
+ _mali_osk_list_t awaiting_sessions_head[PRIORITY_LEVELS];
+ u32 awaiting_sessions_sum_all_priorities;
+
+ /* Linked list of all sessions connected to this coretype */
+ _mali_osk_list_t all_sessions_head;
+
+ /* Linked list of all sessions connected to this coretype */
+ struct _mali_osk_notification_queue_t * notification_queue;
+
+ const char * name;
+ mali_kernel_subsystem_identifier id;
+
+ /**** Functions registered for this core type. Set during mali_core_init ******/
+ /* Start this job on this core. Return MALI_TRUE if the job was started. */
+ _mali_osk_errcode_t (*start_job)(struct mali_core_job * job, struct mali_core_renderunit * core);
+
+ /* Check if given core has an interrupt pending. Return MALI_TRUE and set mask to 0 if pending */
+ u32 (*irq_handler_upper_half)(struct mali_core_renderunit * core);
+
+ /* This function should check if the interrupt indicates that job was finished.
+ If so it should update the job-struct, reset the core registers, and return MALI_TRUE, .
+ If the job is still working after this function it should return MALI_FALSE.
+ The function must also enable the bits in the interrupt mask for the core.
+ Called by the bottom half interrupt function. */
+ int (*irq_handler_bottom_half)(struct mali_core_renderunit* core);
+
+ /* This function is called from the ioctl function and should return a mali_core_job pointer
+ to a created mali_core_job object with the data given from userspace */
+ _mali_osk_errcode_t (*get_new_job_from_user)(struct mali_core_session * session, void * argument);
+
+ _mali_osk_errcode_t (*suspend_response)(struct mali_core_session * session, void * argument);
+
+ /* This function is called from the ioctl function and should write the necessary data
+ to userspace telling which job was finished and the status and debuginfo for this job.
+ The function must also free and cleanup the input job object. */
+ void (*return_job_to_user)(struct mali_core_job * job, mali_subsystem_job_end_code end_status);
+
+ /* Is called when a subsystem shuts down. This function needs to
+ release internal pointers in the core struct, and free the
+ core struct before returning.
+ It is not allowed to write to any registers, since this
+ unmapping is already done. */
+ void (*renderunit_delete)(struct mali_core_renderunit * core);
+
+ /* Is called when we want to abort a job that is running on the core.
+ This is done if program exits while core is running */
+ void (*reset_core)(struct mali_core_renderunit * core, mali_core_reset_style style);
+
+ /* Is called when the rendercore wants the core to give an interrupt */
+ void (*probe_core_irq_trigger)(struct mali_core_renderunit* core);
+
+ /* Is called when the irq probe wants the core to acknowledge an interrupt from the hw */
+ _mali_osk_errcode_t (*probe_core_irq_acknowledge)(struct mali_core_renderunit* core);
+
+ /* Called when the rendercore want to issue a bus stop request to a core */
+ void (*stop_bus)(struct mali_core_renderunit* core);
+} mali_core_subsystem;
+
+
+/* Per core data. This must be embedded into each core type internal core info. */
+typedef struct mali_core_renderunit
+{
+ struct mali_core_subsystem * subsystem; /* The core belongs to this subsystem */
+ _mali_osk_list_t list; /* Is always in subsystem->idle_list OR session->renderunits_working */
+ mali_core_status state;
+ mali_bool error_recovery; /* Indicates if the core is waiting for external help to recover (typically the MMU) */
+ mali_bool in_detach_function;
+ struct mali_core_job * current_job; /* Current job being processed on this core ||NULL */
+ u32 magic_nr;
+ _mali_osk_timer_t * timer;
+ _mali_osk_timer_t * timer_hang_detection;
+
+ mali_io_address registers_mapped; /* IO-mapped pointer to registers */
+ u32 registers_base_addr; /* Base addres of the registers */
+ u32 size; /* The size of registers_mapped */
+ const char * description; /* Description of this core. */
+ u32 irq_nr; /* The IRQ nr for this core */
+ u32 core_version;
+#if USING_MMU
+ u32 mmu_id;
+ void * mmu; /* The MMU this rendercore is behind.*/
+#endif
+#if USING_MALI_PMM
+ mali_pmm_core_id pmm_id; /* The PMM core id */
+ mali_bool pend_power_down; /* Power down is requested */
+#endif
+
+ u32 core_number; /* 0 for first detected core of this type, 1 for second and so on */
+
+ _mali_osk_irq_t *irq;
+} mali_core_renderunit;
+
+
+/* Per open FILE data. */
+/* You must held subsystem->mutex before any transactions to this datatype. */
+typedef struct mali_core_session
+{
+ struct mali_core_subsystem * subsystem; /* The session belongs to this subsystem */
+ _mali_osk_list_t renderunits_working_head; /* List of renderunits working for this session */
+ struct mali_core_job *job_waiting_to_run; /* The next job from this session to run */
+
+ _mali_osk_list_t awaiting_sessions_list; /* Linked list of sessions with jobs, for each priority */
+ _mali_osk_list_t all_sessions_list; /* Linked list of all sessions on the system. */
+
+ _mali_osk_notification_queue_t * notification_queue; /* Messages back to Base in userspace*/
+#if USING_MMU
+ struct mali_session_data * mmu_session; /* The session associated with the MMU page tables for this core */
+#endif
+ u32 magic_nr;
+} mali_core_session;
+
+
+/* This must be embedded into a specific mali_core_job struct */
+/* use this macro to get spesific mali_core_job: container_of(ptr, type, member)*/
+typedef struct mali_core_job
+{
+ _mali_osk_list_t list; /* Linked list of jobs. Used by struct mali_core_session */
+ struct mali_core_session *session;
+ u32 magic_nr;
+ u32 priority;
+ u32 watchdog_msecs;
+ u32 render_time_msecs ;
+ u32 start_time_jiffies;
+ unsigned long watchdog_jiffies;
+ u32 abort_id;
+} mali_core_job;
+
+/*
+ * The rendercode subsystem is included in the subsystems[] array.
+ */
+extern struct mali_kernel_subsystem mali_subsystem_rendercore;
+
+void subsystem_flush_mapped_mem_cache(void);
+
+
+#define SUBSYSTEM_MAGIC_NR 0xdeadbeef
+#define CORE_MAGIC_NR 0xcafebabe
+#define SESSION_MAGIC_NR 0xbabe1234
+#define JOB_MAGIC_NR 0x0123abcd
+
+
+#define MALI_CHECK_SUBSYSTEM(subsystem)\
+ do { \
+ if ( SUBSYSTEM_MAGIC_NR != subsystem->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+ } while (0)
+
+#define MALI_CHECK_CORE(CORE)\
+ do { \
+ if ( CORE_MAGIC_NR != CORE->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+} while (0)
+
+#define MALI_CHECK_SESSION(SESSION)\
+ do { \
+ if ( SESSION_MAGIC_NR != SESSION->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+} while (0)
+
+#define MALI_CHECK_JOB(JOB)\
+ do { \
+ if ( JOB_MAGIC_NR != JOB->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+} while (0)
+
+
+/* Check if job_a has higher priority than job_b */
+MALI_STATIC_INLINE int job_has_higher_priority(mali_core_job * job_a, mali_core_job * job_b)
+{
+ /* The lowest number has the highest priority */
+ return (int) (job_a->priority < job_b->priority);
+}
+
+MALI_STATIC_INLINE void job_priority_set(mali_core_job * job, u32 priority)
+{
+ if (priority > PRIORITY_MIN) job->priority = PRIORITY_MIN;
+ else job->priority = priority;
+}
+
+void job_watchdog_set(mali_core_job * job, u32 watchdog_msecs);
+
+/* For use by const default register settings (e.g. set these after reset) */
+typedef struct register_address_and_value
+{
+ u32 address;
+ u32 value;
+} register_address_and_value ;
+
+
+/* For use by dynamic default register settings (e.g. set these after reset) */
+typedef struct register_address_and_value_list
+{
+ _mali_osk_list_t list;
+ register_address_and_value item;
+} register_address_and_value_list ;
+
+/* Used if the user wants to set a continious block of registers */
+typedef struct register_array_user
+{
+ u32 entries_in_array;
+ u32 start_address;
+ void __user * reg_array;
+}register_array_user;
+
+
+#define MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys) \
+ do { \
+ MALI_DEBUG_PRINT(5, ("MUTEX: GRAB %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ _mali_osk_lock_wait( rendercores_global_mutex, _MALI_OSK_LOCKMODE_RW); \
+ MALI_DEBUG_PRINT(5, ("MUTEX: GRABBED %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ if ( SUBSYSTEM_MAGIC_NR != subsys->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ rendercores_global_mutex_is_held = 1; \
+ rendercores_global_mutex_owner = _mali_osk_get_tid(); \
+ } while (0) ;
+
+#define MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys) \
+ do { \
+ MALI_DEBUG_PRINT(5, ("MUTEX: RELEASE %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ rendercores_global_mutex_is_held = 0; \
+ rendercores_global_mutex_owner = 0; \
+ if ( SUBSYSTEM_MAGIC_NR != subsys->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ _mali_osk_lock_signal( rendercores_global_mutex, _MALI_OSK_LOCKMODE_RW); \
+ MALI_DEBUG_PRINT(5, ("MUTEX: RELEASED %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ if ( SUBSYSTEM_MAGIC_NR != subsys->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ } while (0) ;
+
+
+#define MALI_ASSERT_MUTEX_IS_GRABBED(input_pointer)\
+ do { \
+ if ( 0 == rendercores_global_mutex_is_held ) MALI_PRINT_ERROR(("ASSERT MUTEX SHOULD BE GRABBED"));\
+ if ( SUBSYSTEM_MAGIC_NR != input_pointer->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ if ( rendercores_global_mutex_owner != _mali_osk_get_tid() ) MALI_PRINT_ERROR(("Owner mismatch"));\
+ } while (0)
+
+
+u32 mali_core_renderunit_register_read(struct mali_core_renderunit *core, u32 relative_address);
+void mali_core_renderunit_register_read_array(struct mali_core_renderunit *core, u32 relative_address, u32 * result_array, u32 nr_of_regs);
+void mali_core_renderunit_register_write(struct mali_core_renderunit *core, u32 relative_address, u32 new_val);
+void mali_core_renderunit_register_write_array(struct mali_core_renderunit *core, u32 relative_address, u32 * write_array, u32 nr_of_regs);
+
+_mali_osk_errcode_t mali_core_renderunit_init(struct mali_core_renderunit * core);
+void mali_core_renderunit_term(struct mali_core_renderunit * core);
+int mali_core_renderunit_map_registers(struct mali_core_renderunit *core);
+void mali_core_renderunit_unmap_registers(struct mali_core_renderunit *core);
+int mali_core_renderunit_irq_handler_add(struct mali_core_renderunit *core);
+mali_core_renderunit * mali_core_renderunit_get_mali_core_nr(mali_core_subsystem *subsys, u32 mali_core_nr);
+
+int mali_core_subsystem_init(struct mali_core_subsystem * new_subsys);
+#if USING_MMU
+void mali_core_subsystem_attach_mmu(mali_core_subsystem* subsys);
+#endif
+int mali_core_subsystem_register_renderunit(struct mali_core_subsystem * subsys, struct mali_core_renderunit * core);
+int mali_core_subsystem_system_info_fill(mali_core_subsystem* subsys, _mali_system_info* info);
+void mali_core_subsystem_cleanup(struct mali_core_subsystem * subsys);
+#if USING_MMU
+void mali_core_subsystem_broadcast_notification(struct mali_core_subsystem * subsys, mali_core_notification_message message, u32 data);
+#endif
+void mali_core_session_begin(mali_core_session *session);
+void mali_core_session_close(mali_core_session * session);
+int mali_core_session_add_job(mali_core_session * session, mali_core_job *job, mali_core_job **job_return);
+u32 mali_core_hang_check_timeout_get(void);
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_start_job(mali_core_session * session, void *job_data);
+_mali_osk_errcode_t mali_core_subsystem_ioctl_number_of_cores_get(mali_core_session * session, u32 *number_of_cores);
+_mali_osk_errcode_t mali_core_subsystem_ioctl_core_version_get(mali_core_session * session, _mali_core_version *version);
+_mali_osk_errcode_t mali_core_subsystem_ioctl_suspend_response(mali_core_session * session, void* argument);
+void mali_core_subsystem_ioctl_abort_job(mali_core_session * session, u32 id);
+
+#if USING_MALI_PMM
+_mali_osk_errcode_t mali_core_subsystem_signal_power_down(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool immediate_only);
+_mali_osk_errcode_t mali_core_subsystem_signal_power_up(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool queue_only);
+#endif
+
+#if MALI_STATE_TRACKING
+void mali_core_renderunit_dump_state(mali_core_subsystem* subsystem);
+#endif
+
+#endif /* __MALI_RENDERCORE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_session_manager.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_session_manager.h
new file mode 100644
index 00000000000..ce290d0c1c9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_session_manager.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SESSION_MANAGER_H__
+#define __MALI_KERNEL_SESSION_MANAGER_H__
+
+/* Incomplete struct to pass around pointers to it */
+struct mali_session_data;
+
+void * mali_kernel_session_manager_slot_get(struct mali_session_data * session, int id);
+
+#endif /* __MALI_KERNEL_SESSION_MANAGER_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_subsystem.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_subsystem.h
new file mode 100644
index 00000000000..e8c6530668c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_subsystem.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_subsystem.h
+ */
+
+#ifndef __MALI_KERNEL_SUBSYSTEM_H__
+#define __MALI_KERNEL_SUBSYSTEM_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+
+/* typedefs of the datatypes used in the hook functions */
+typedef void * mali_kernel_subsystem_session_slot;
+typedef int mali_kernel_subsystem_identifier;
+typedef _mali_osk_errcode_t (*mali_kernel_resource_registrator)(_mali_osk_resource_t *);
+
+/**
+ * Broadcast notification messages
+ */
+typedef enum mali_core_notification_message
+{
+ MMU_KILL_STEP0_LOCK_SUBSYSTEM, /**< Request to lock subsystem */
+ MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES, /**< Request to stop all buses */
+ MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS, /**< Request kill all jobs, and not start more jobs */
+ MMU_KILL_STEP3_CONTINUE_JOB_HANDLING, /**< Request to continue with new jobs on all cores */
+ MMU_KILL_STEP4_UNLOCK_SUBSYSTEM /**< Request to unlock subsystem */
+} mali_core_notification_message;
+
+/**
+ * A function pointer can be NULL if the subsystem isn't interested in the event.
+ */
+typedef struct mali_kernel_subsystem
+{
+ /* subsystem control */
+ _mali_osk_errcode_t (*startup)(mali_kernel_subsystem_identifier id); /**< Called during module load or system startup*/
+ void (*shutdown)(mali_kernel_subsystem_identifier id); /**< Called during module unload or system shutdown */
+
+ /**
+ * Called during module load or system startup.
+ * Called when all subsystems have reported startup OK and all resources where successfully initialized
+ */
+ _mali_osk_errcode_t (*load_complete)(mali_kernel_subsystem_identifier id);
+
+ /* per subsystem handlers */
+ _mali_osk_errcode_t (*system_info_fill)(_mali_system_info* info); /**< Fill info into info struct. MUST allocate memory with kmalloc, since it's kfree'd */
+
+ /* per session handlers */
+ /**
+ * Informs about a new session.
+ * slot can be used to track per-session per-subsystem data.
+ * queue can be used to send events to user space.
+ * _mali_osk_errcode_t error return value.
+ */
+ _mali_osk_errcode_t (*session_begin)(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+ /**
+ * Informs that a session is ending
+ * slot was the same as given during session_begin
+ */
+ void (*session_end)(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+
+ /* Used by subsystems to send messages to each other. This is the receiving end */
+ void (*broadcast_notification)(mali_core_notification_message message, u32 data);
+
+#if MALI_STATE_TRACKING
+ /** Dump the current state of the subsystem */
+ void (*dump_state)(void);
+#endif
+} mali_kernel_subsystem;
+
+/* functions used by the subsystems to interact with the core */
+/**
+ * Register a resouce handler
+ * @param type The resoruce type to register a handler for
+ * @param handler Pointer to the function handling this resource
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t _mali_kernel_core_register_resource_handler(_mali_osk_resource_type_t type, mali_kernel_resource_registrator handler);
+
+/* function used to interact with other subsystems */
+/**
+ * Broadcast a message
+ * Sends a message to all subsystems which have registered a broadcast notification handler
+ * @param message The message to send
+ * @param data Message specific extra data
+ */
+void _mali_kernel_core_broadcast_subsystem_message(mali_core_notification_message message, u32 data);
+
+#if MALI_STATE_TRACKING
+/**
+ * Tell all subsystems to dump their current state
+ */
+void _mali_kernel_core_dump_state(void);
+#endif
+
+
+#endif /* __MALI_KERNEL_SUBSYSTEM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.c
new file mode 100644
index 00000000000..04653b06ad5
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+/* Define how often to calculate and report GPU utilization, in milliseconds */
+#define MALI_GPU_UTILIZATION_TIMEOUT 500
+
+static _mali_osk_lock_t *time_data_lock;
+
+static _mali_osk_atomic_t num_running_cores;
+
+static u64 period_start_time = 0;
+static u64 work_start_time = 0;
+static u64 accumulated_work_time = 0;
+
+static _mali_osk_timer_t *utilization_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+
+static void calculate_gpu_utilization(void* arg)
+{
+ u64 time_now;
+ u64 time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 work_normalized;
+ u32 period_normalized;
+ u32 utilization;
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (accumulated_work_time == 0 && work_start_time == 0)
+ {
+ /* Don't reschedule timer, this will be started if new work arrives */
+ timer_running = MALI_FALSE;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* No work done for this period, report zero usage */
+ mali_gpu_utilization_handler(0);
+
+ return;
+ }
+
+ time_now = _mali_osk_time_get_ns();
+ time_period = time_now - period_start_time;
+
+ /* If we are currently busy, update working period up to now */
+ if (work_start_time != 0)
+ {
+ accumulated_work_time += (time_now - work_start_time);
+ work_start_time = time_now;
+ }
+
+ /*
+ * We have two 64-bit values, a dividend and a divisor.
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ * We shift the dividend up and possibly the divisor down, making the result X in 256.
+ */
+
+ /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+ leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+ shift_val = 32 - leading_zeroes;
+ work_normalized = (u32)(accumulated_work_time >> shift_val);
+ period_normalized = (u32)(time_period >> shift_val);
+
+ /*
+ * Now, we should report the usage in parts of 256
+ * this means we must shift up the dividend or down the divisor by 8
+ * (we could do a combination, but we just use one for simplicity,
+ * but the end result should be good enough anyway)
+ */
+ if (period_normalized > 0x00FFFFFF)
+ {
+ /* The divisor is so big that it is safe to shift it down */
+ period_normalized >>= 8;
+ }
+ else
+ {
+ /*
+ * The divisor is so small that we can shift up the dividend, without loosing any data.
+ * (dividend is always smaller than the divisor)
+ */
+ work_normalized <<= 8;
+ }
+
+ utilization = work_normalized / period_normalized;
+
+ accumulated_work_time = 0;
+ period_start_time = time_now; /* starting a new period */
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(MALI_GPU_UTILIZATION_TIMEOUT));
+
+ mali_gpu_utilization_handler(utilization);
+}
+
+
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+ time_data_lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ|_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0 );
+ if (NULL == time_data_lock)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ _mali_osk_atomic_init(&num_running_cores, 0);
+
+ utilization_timer = _mali_osk_timer_init();
+ if (NULL == utilization_timer)
+ {
+ _mali_osk_lock_term(time_data_lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_suspend(void)
+{
+ if (NULL != utilization_timer)
+ {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ }
+}
+
+void mali_utilization_term(void)
+{
+ if (NULL != utilization_timer)
+ {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(utilization_timer);
+ utilization_timer = NULL;
+ }
+
+ _mali_osk_atomic_term(&num_running_cores);
+
+ _mali_osk_lock_term(time_data_lock);
+}
+
+
+
+void mali_utilization_core_start(void)
+{
+ if (_mali_osk_atomic_inc_return(&num_running_cores) == 1)
+ {
+ /*
+ * We went from zero cores working, to one core working,
+ * we now consider the entire GPU for being busy
+ */
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ work_start_time = _mali_osk_time_get_ns();
+
+ if (timer_running != MALI_TRUE)
+ {
+ timer_running = MALI_TRUE;
+ period_start_time = work_start_time; /* starting a new period */
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(MALI_GPU_UTILIZATION_TIMEOUT));
+ }
+ else
+ {
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+ }
+}
+
+
+
+void mali_utilization_core_end(void)
+{
+ if (_mali_osk_atomic_dec_return(&num_running_cores) == 0)
+ {
+ /*
+ * No more cores are working, so accumulate the time we was busy.
+ */
+ u64 time_now;
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ time_now = _mali_osk_time_get_ns();
+ accumulated_work_time += (time_now - work_start_time);
+ work_start_time = 0;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.h
new file mode 100644
index 00000000000..f6485006729
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include "mali_osk.h"
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Should be called when a job is about to execute a job
+ */
+void mali_utilization_core_start(void);
+
+/**
+ * Should be called to stop the utilization timer during system suspend
+ */
+void mali_utilization_suspend(void);
+
+/**
+ * Should be called when a job has completed executing a job
+ */
+void mali_utilization_core_end(void);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_vsync.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_vsync.c
new file mode 100644
index 00000000000..8dfa3a393ba
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_vsync.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+/*#include "mali_timestamp.h"
+*/
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+ _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+ MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+/* u64 ts = _mali_timestamp_get();
+ */
+
+ MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk.h
new file mode 100644
index 00000000000..c35d90577a3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk.h
@@ -0,0 +1,1620 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+ typedef unsigned char u8;
+ typedef signed char s8;
+ typedef unsigned short u16;
+ typedef signed short s16;
+ typedef unsigned int u32;
+ typedef signed int s32;
+ typedef unsigned long long u64;
+ #define BITS_PER_LONG (sizeof(long)*8)
+#else
+ /* Ensure Linux types u32, etc. are defined */
+ #include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+ */
+ typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+ #define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+ #define MALI_FALSE ((mali_bool)0)
+#endif
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum
+{
+ _MALI_OSK_ERR_OK = 0, /**< Success. */
+ _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+ _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+ _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+ _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+ _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+ _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+ _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+ _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+ _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void (*_mali_osk_irq_trigger_t)( void * arg );
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)( void * arg );
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_irq_schedulework(). Refer to \ref _mali_osk_irq_schedulework() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)( void * arg );
+
+/** @brief IRQ 'bottom-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the deferred handling
+ * of a resource's IRQ. Usually, this work cannot be carried out in IRQ context
+ * by the IRQ upper-half handler.
+ *
+ * The IRQ bottom-half handler maps on to the concept of an IST that may
+ * execute some time after the actual IRQ has fired.
+ *
+ * All OSK-registered IRQ bottom-half handlers will be serialized, across all
+ * CPU-cores in the system.
+ *
+ * Refer to \ref _mali_osk_irq_schedulework() for more information on the
+ * IRQ work-queue, and the calling of the IRQ bottom-half handler.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_irq_bhandler_t)( void * arg );
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Even though the structure has space for a u32, the counters will only
+ * represent signed 24-bit integers.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct
+{
+ union
+ {
+ u32 val;
+ void *obj;
+ } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * Flags are supplied at the point where the Lock is initialized. Each flag can
+ * be combined with others using bitwise OR, '|'.
+ *
+ * The flags must be sufficiently rich to cope with all our OSs. This means
+ * that on some OSs, certain flags can be completely ignored. We define a
+ * number of terms that are significant across all OSs:
+ *
+ * - Sleeping/non-sleeping mutexs. Sleeping mutexs can block on waiting, and so
+ * schedule out the current thread. This is significant on OSs where there are
+ * situations in which the current thread must not be put to sleep. On OSs
+ * without this restriction, sleeping and non-sleeping mutexes can be treated
+ * as the same (if that is required).
+ * - Interruptable/non-interruptable mutexes. For sleeping mutexes, it may be
+ * possible for the sleep to be interrupted for a reason other than the thread
+ * being able to obtain the lock. OSs behaving in this way may provide a
+ * mechanism to control whether sleeping mutexes can be interrupted. On OSs
+ * that do not support the concept of interruption, \b or they do not support
+ * control of mutex interruption, then interruptable mutexes may be treated
+ * as non-interruptable.
+ *
+ * Some constrains apply to the lock type flags:
+ *
+ * - Spinlocks are by nature, non-interruptable. Hence, they must always be
+ * combined with the NONINTERRUPTABLE flag, because it is meaningless to ask
+ * for a spinlock that is interruptable (and this highlights its
+ * non-interruptable-ness). For example, on certain OSs they should be used when
+ * you must not sleep.
+ * - Reader/writer is an optimization hint, and any type of lock can be
+ * reader/writer. Since this is an optimization hint, the implementation need
+ * not respect this for any/all types of lock. For example, on certain OSs,
+ * there's no interruptable reader/writer mutex. If such a thing were requested
+ * on that OS, the fact that interruptable was requested takes priority over the
+ * reader/writer-ness, because reader/writer-ness is not necessary for correct
+ * operation.
+ * - Any lock can use the order parameter.
+ * - A onelock is an optimization hint specific to certain OSs. It can be
+ * specified when it is known that only one lock will be held by the thread,
+ * and so can provide faster mutual exclusion. This can be safely ignored if
+ * such optimization is not required/present.
+ *
+ * The absence of any flags (the value 0) results in a sleeping-mutex, which is interruptable.
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKFLAG_SPINLOCK = 0x1, /**< Specifically, don't sleep on those architectures that require it */
+ _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE = 0x2, /**< The mutex cannot be interrupted, e.g. delivery of signals on those architectures where this is required */
+ _MALI_OSK_LOCKFLAG_READERWRITER = 0x4, /**< Optimise for readers/writers */
+ _MALI_OSK_LOCKFLAG_ORDERED = 0x8, /**< Use the order parameter; otherwise use automatic ordering */
+ _MALI_OSK_LOCKFLAG_ONELOCK = 0x10, /**< Each thread can only hold one lock at a time */
+ _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ = 0x20, /**< IRQ version of spinlock */
+ /** @enum _mali_osk_lock_flags_t
+ *
+ * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks specified
+ * as _MALI_OSK_LOCKFLAG_READERWRITER. In this case, the RO mode can be used
+ * to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * _MALI_OSK_LOCKFLAG_READERWRITER is not supplied.
+ *
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */
+ _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+ _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+ /** @enum _mali_osk_lock_mode_t
+ *
+ * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private type for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_t_struct _mali_osk_lock_t;
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address * mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes. */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER ((u32)12)
+/** Mali Page Size, in bytes. */
+#define _MALI_OSK_MALI_PAGE_SIZE (((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER))
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK (~((((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum
+{
+ _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct
+{
+ u32 notification_type; /**< The notification type */
+ u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+ void * result_buffer; /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_irq_schedulework() to make use of the IRQ bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void * arg );
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s
+{
+ struct _mali_osk_list_s *next;
+ struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+ ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+ _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+/** @} */ /* end group _mali_osk_list */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief The known resource types
+ *
+ * @note \b IMPORTANT: these must remain fixed, and only be extended. This is
+ * because not all systems use a header file for reading in their resources.
+ * The resources may instead come from a data file where these resources are
+ * 'hard-coded' in, because there's no easy way of transferring the enum values
+ * into such data files. E.g. the C-Pre-processor does \em not process enums.
+ */
+typedef enum _mali_osk_resource_type
+{
+ RESOURCE_TYPE_FIRST =0, /**< Duplicate resource marker for the first resource*/
+ MEMORY =0, /**< Physically contiguous memory block, not managed by the OS */
+ OS_MEMORY =1, /**< Memory managed by and shared with the OS */
+ MALI200 =3, /**< Mali200 Programmable Fragment Shader */
+ MALIGP2 =4, /**< MaliGP2 Programmable Vertex Shader */
+ MMU =5, /**< Mali MMU (Memory Management Unit) */
+ FPGA_FRAMEWORK =6, /**< Mali registers specific to FPGA implementations */
+ MALI400L2 =7, /**< Mali400 L2 Cache */
+ MALI300L2 =7, /**< Mali300 L2 Cache */
+ MALI400GP =8, /**< Mali400 Programmable Vertex Shader Core */
+ MALI300GP =8, /**< Mali300 Programmable Vertex Shader Core */
+ MALI400PP =9, /**< Mali400 Programmable Fragment Shader Core */
+ MALI300PP =9, /**< Mali300 Programmable Fragment Shader Core */
+ MEM_VALIDATION =10, /**< External Memory Validator */
+ PMU =11, /**< Power Manangement Unit */
+ RESOURCE_TYPE_COUNT /**< The total number of known resources */
+} _mali_osk_resource_type_t;
+
+/** @brief resource description struct
+ *
+ * _mali_osk_resources_init() will enumerate objects of this type. Not all
+ * members have a valid meaning across all types.
+ *
+ * The mmu_id is used to group resources to a certain MMU, since there may be
+ * more than one MMU in the system, and each resource may be using a different
+ * MMU:
+ * - For MMU resources, the setting of mmu_id is a uniquely identifying number.
+ * - For Other resources, the setting of mmu_id determines which MMU the
+ * resource uses.
+ */
+typedef struct _mali_osk_resource
+{
+ _mali_osk_resource_type_t type; /**< type of the resource */
+ const char * description; /**< short description of the resource */
+ u32 base; /**< Physical base address of the resource, as seen by Mali resources. */
+ s32 cpu_usage_adjust; /**< Offset added to the base address of the resource to arrive at the CPU physical address of the resource (if different from the Mali physical address) */
+ u32 size; /**< Size in bytes of the resource - either the size of its register range, or the size of the memory block. */
+ u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+ u32 flags; /**< Resources-specific flags. */
+ u32 mmu_id; /**< Identifier for Mali MMU resources. */
+ u32 alloc_order; /**< Order in which MEMORY/OS_MEMORY resources are used */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+#include "mali_kernel_memory_engine.h" /* include for mali_memory_allocation and mali_physical_memory_allocation type */
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Fake IRQ number for testing purposes
+ */
+#define _MALI_OSK_IRQ_NUMBER_FAKE ((u32)0xFFFFFFF1)
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief PMM Virtual IRQ number
+ */
+#define _MALI_OSK_IRQ_NUMBER_PMM ((u32)0xFFFFFFF2)
+
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * The _mali_osk_irq_t returned must be written into the resource-specific data
+ * pointed to by data. This is so that the upper and lower handlers can call
+ * _mali_osk_irq_schedulework().
+ *
+ * @note The caller must ensure that the resource does not generate an
+ * interrupt after _mali_osk_irq_init() finishes, and before the
+ * _mali_osk_irq_t is written into the resource-specific data. Otherwise,
+ * the upper-half handler will fail to call _mali_osk_irq_schedulework().
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and trigger_func and ack_func must be
+ * non-NULL.
+ * @param uhandler The upper-half handler, corresponding to a ISR handler for
+ * the resource
+ * @param bhandler The lower-half handler, corresponding to an IST handler for
+ * the resource
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param data resource-specific data, which will be passed to uhandler,
+ * bhandler and (if present) trigger_func and ack_funnc
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, _mali_osk_irq_bhandler_t bhandler, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *data, const char *description );
+
+/** @brief Cause a queued, deferred call of the IRQ bottom-half.
+ *
+ * _mali_osk_irq_schedulework provides a mechanism for enqueuing deferred calls
+ * to the IRQ bottom-half handler. The queue is known as the IRQ work-queue.
+ * After calling _mali_osk_irq_schedulework(), the IRQ bottom-half handler will
+ * be scheduled to run at some point in the future.
+ *
+ * This is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_irq_schedulework() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_irq_schedulework() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the IRQ bottom-half handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_irq_schedulework()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The IRQ bottom-half callback (of type _mali_osk_irq_bhandler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_irq_schedulework() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_irq_schedulework() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * IRQs will be lost.
+ *
+ * The queue that _mali_osk_irq_schedulework implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_irq_schedulework
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered IRQs.
+ *
+ * The consequence of the queue being a 1-reader type is that calling
+ * _mali_osk_irq_schedulework() on different _mali_osk_irq_t objects causes
+ * their IRQ bottom-halves to be serialized, across all CPU-cores in the
+ * system.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ bottom-half must begin processing.
+ */
+void _mali_osk_irq_schedulework( _mali_osk_irq_t *irq );
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for the
+ * IRQ work-queue to finish the work that is currently in the queue. That is,
+ * for every deferred call currently in the IRQ work-queue, it waits for each
+ * of those to be processed by their respective IRQ bottom-half handler.
+ *
+ * This function is used to ensure that the bottom-half handler of the supplied
+ * IRQ object will not be running at the completion of this function call.
+ * However, the caller must ensure that no other sources could call the
+ * _mali_osk_irq_schedulework() on the same IRQ object. For example, the
+ * relevant timers must be stopped.
+ *
+ * @note While this function is being called, other OSK-registered IRQs in the
+ * system may enqueue work for their respective bottom-half handlers. This
+ * function will not wait for those entries in the work-queue to be flushed.
+ *
+ * Since this blocks on the completion of work in the IRQ work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any OSK-registered IRQ bottom-half handler. To do so may cause a deadlock.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term( _mali_osk_irq_t *irq );
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom );
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * Although the value returned is a u32, only numbers with signed 24-bit
+ * precision (sign extended to u32) are returned.
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * Although the value returned is a u32, only numbers with signed 24-bit
+ * precision (sign extended to u32) are returned.
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom );
+
+/** @brief Initialize an atomic counter
+ *
+ * The counters have storage for signed 24-bit integers. Initializing to signed
+ * values requiring more than 24-bits storage will fail.
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val );
+
+/** @brief Read a value from an atomic counter
+ *
+ * Although the value returned is a u32, only numbers with signed 24-bit
+ * precision (sign extended to u32) are returned.
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom );
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom );
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc( u32 n, u32 size );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free( void *ptr );
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy( void *dst, const void *src, u32 len );
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset( void *s, u32 c, u32 n );
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated );
+
+/** @addtogroup _mali_osk_lock
+ * @{ */
+
+/** @brief Initialize a Mutual Exclusion Lock
+ *
+ * Locks are created in the signalled (unlocked) state.
+ *
+ * initial must be zero, since there is currently no means of expressing
+ * whether a reader/writer lock should be initially locked as a reader or
+ * writer. This would require some encoding to be used.
+ *
+ * 'Automatic' ordering means that locks must be obtained in the order that
+ * they were created. For all locks that can be held at the same time, they must
+ * either all provide the order parameter, or they all must use 'automatic'
+ * ordering - because there is no way of mixing 'automatic' and 'manual'
+ * ordering.
+ *
+ * @param flags flags combined with bitwise OR ('|'), or zero. There are
+ * restrictions on which flags can be combined, @see _mali_osk_lock_flags_t.
+ * @param initial For future expansion into semaphores. SBZ.
+ * @param order The locking order of the mutex. That is, locks obtained by the
+ * same thread must have been created with an increasing order parameter, for
+ * deadlock prevention. Setting to zero causes 'automatic' ordering to be used.
+ * @return On success, a pointer to a _mali_osk_lock_t object. NULL on failure.
+ */
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order );
+
+/** @brief Wait for a lock to be signalled (obtained)
+
+ * After a thread has successfully waited on the lock, the lock is obtained by
+ * the thread, and is marked as unsignalled. The thread releases the lock by
+ * signalling it.
+ *
+ * In the case of Reader/Writer locks, multiple readers can obtain a lock in
+ * the absence of writers, which is a performance optimization (providing that
+ * the readers never write to the protected resource).
+ *
+ * To prevent deadlock, locks must always be obtained in the same order.
+ *
+ * For locks marked as _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, it is a
+ * programming error for the function to exit without obtaining the lock. This
+ * means that the error code must only be checked for interruptible locks.
+ *
+ * @param lock the lock to wait upon (obtain).
+ * @param mode the mode in which the lock should be obtained. Unless the lock
+ * was created with _MALI_OSK_LOCKFLAG_READERWRITER, this must be
+ * _MALI_OSK_LOCKMODE_RW.
+ * @return On success, _MALI_OSK_ERR_OK. For interruptible locks, a suitable
+ * _mali_osk_errcode_t will be returned on failure, and the lock will not be
+ * obtained. In this case, the error code must be propagated up to the U/K
+ * interface.
+ */
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode);
+
+
+/** @brief Signal (release) a lock
+ *
+ * Locks may only be signalled by the thread that originally waited upon the
+ * lock.
+ *
+ * @note In the OSU, a flag exists to allow any thread to signal a
+ * lock. Such functionality is not present in the OSK.
+ *
+ * @param lock the lock to signal (release).
+ * @param mode the mode in which the lock should be obtained. This must match
+ * the mode in which the lock was waited upon.
+ */
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode );
+
+/** @brief Terminate a lock
+ *
+ * This terminates a lock and frees all associated resources.
+ *
+ * It is a programming error to terminate the lock when it is held (unsignalled)
+ * by a thread.
+ *
+ * @param lock the lock to terminate.
+ */
+void _mali_osk_lock_term( _mali_osk_lock_t *lock );
+/** @} */ /* end group _mali_osk_lock */
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which affects memory
+ * mapped by _mali_osk_mem_mapregion. It will not be needed for memory
+ * mapped through _mali_osk_mem_mapioregion.
+ */
+void _mali_osk_mem_barrier( void );
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description );
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size );
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description );
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion( u32 phys, u32 size );
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32( volatile mali_io_address mapping, u32 offset );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32( volatile mali_io_address mapping, u32 offset, u32 val );
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall( void );
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size );
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size );
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete( _mali_osk_notification_t *object );
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void );
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue );
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object );
+
+#if MALI_STATE_TRACKING
+/** @brief Receive a notification from a queue
+ *
+ * Check if a notification queue is empty.
+ *
+ * @param queue The queue to check.
+ * @return MALI_TRUE if queue is empty, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_notification_queue_is_empty( _mali_osk_notification_queue_t *queue );
+#endif
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire );
+
+/** @brief Modify a timer
+ *
+ * Set the absolute time at which a timer will expire, and start it if it is
+ * stopped. If \a expiry_tick is in the past (determined by
+ * _mali_osk_time_after() ), the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at absolute time \a expiry_tick, at which point, the
+ * callback function will be invoked with the callback-specific data, as set
+ * by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param expiry_tick the \em absolute time in ticks at which this timer should
+ * trigger.
+ *
+ */
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 expiry_tick);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the IRQ work-queue by the timer (with
+ * \ref _mali_osk_irq_schedulework()) may still run. The timer callback and IRQ
+ * bottom-half handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del( _mali_osk_timer_t *tim );
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data );
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term( _mali_osk_timer_t *tim );
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after tickb
+ *
+ * Some OSs handle tick 'rollover' specially, and so can be more robust against
+ * tick counters rolling-over. This function must therefore be called to
+ * determine if a time (in ticks) really occurs after another time (in ticks).
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return non-zero if ticka represents a time that occurs after tickb.
+ * Zero otherwise.
+ */
+int _mali_osk_time_after( u32 ticka, u32 tickb );
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+u32 _mali_osk_time_mstoticks( u32 ms );
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32 _mali_osk_time_tickstoms( u32 ticks );
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+u32 _mali_osk_time_tickcount( void );
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay( u32 usecs );
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns( void );
+
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz( u32 val );
+/** @} */ /* end group _mali_osk_math */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg( const char *fmt, ... );
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#include "mali_osk_specific.h" /* include any per-os specifics */
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+ #error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+ #error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_bitops.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_bitops.h
new file mode 100644
index 00000000000..28026ba1e6d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_bitops.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit( u32 bit, u32 value )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit( u32 value )
+{
+ u32 inverted;
+ u32 negated;
+ u32 isolated;
+ u32 leading_zeros;
+
+ /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */
+ inverted = ~value; /* zzz...z1000...0 */
+ /* Using count_trailing_zeros on inverted value -
+ * See ARM System Developers Guide for details of count_trailing_zeros */
+
+ /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+ negated = (u32)-inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+ /* negated = xxx...x1000...0 */
+
+ isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+ /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+ * Note that the output is zero if value was all 1s */
+
+ leading_zeros = _mali_osk_clz( isolated );
+
+ return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_clear_bit( nr, addr );
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_set_bit( nr, addr );
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ return _mali_internal_test_bit( nr, *addr );
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit( const u32 *addr, u32 maxbit )
+{
+ u32 total;
+
+ for ( total = 0; total < maxbit; total += 32, ++addr )
+ {
+ int result;
+ result = _mali_internal_find_first_zero_bit( *addr );
+
+ /* non-negative signifies the bit was found */
+ if ( result >= 0 )
+ {
+ total += (u32)result;
+ break;
+ }
+ }
+
+ /* Now check if we reached maxbit or above */
+ if ( total >= maxbit )
+ {
+ total = maxbit;
+ }
+
+ return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_list.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_list.h
new file mode 100644
index 00000000000..a8d15f2a107
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_list.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = new_entry;
+ new_entry->next = next;
+ new_entry->prev = prev;
+ prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init( _mali_osk_list_t *list )
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+ _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE int _mali_osk_list_empty( _mali_osk_list_t *list )
+{
+ return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move( _mali_osk_list_t *move_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(move_entry->prev, move_entry->next);
+ _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Join two lists
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to join a list into another list at a specific location
+ *
+ * @param list the new list to add
+ * @param at the location in a list to add the new list into
+ */
+MALI_STATIC_INLINE void _mali_osk_list_splice( _mali_osk_list_t *list, _mali_osk_list_t *at )
+{
+ if (!_mali_osk_list_empty(list))
+ {
+ /* insert all items from 'list' after 'at' */
+ _mali_osk_list_t *first = list->next;
+ _mali_osk_list_t *last = list->prev;
+ _mali_osk_list_t *split = at->next;
+
+ first->prev = at;
+ at->next = first;
+
+ last->next = split;
+ split->prev = last;
+ }
+}
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_mali.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_mali.h
new file mode 100644
index 00000000000..9a046fb91eb
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_mali.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Initialize the OSK layer
+ *
+ * This function is used to setup any initialization of OSK functionality, if
+ * required.
+ *
+ * This must be the first function called from the common code, specifically,
+ * from the common code entry-point, mali_kernel_constructor.
+ *
+ * The OS-integration into the OS's kernel must handle calling of
+ * mali_kernel_constructor when the device driver is loaded.
+ *
+ * @return _MALI_OSK_ERR_OK on success, or a suitable _mali_osk_errcode_t on
+ * failure.
+ */
+_mali_osk_errcode_t _mali_osk_init( void );
+
+/** @brief Terminate the OSK layer
+ *
+ * This function is used to terminate any resources initialized by
+ * _mali_osk_init.
+ *
+ * This must be the last function called from the common code, specifically,
+ * from the common code closedown function, mali_kernel_destructor, and the
+ * error path in mali_kernel_constructor.
+ *
+ * The OS-integration into the OS's kernel must handle calling of
+ * mali_kernel_destructor when the device driver is terminated.
+ */
+void _mali_osk_term( void );
+
+/** @brief Read the Mali Resource configuration
+ *
+ * Populates a _mali_arch_resource_t array from configuration settings, which
+ * are stored in an OS-specific way.
+ *
+ * For example, these may be compiled in to a static structure, or read from
+ * the filesystem at startup.
+ *
+ * On failure, do not call _mali_osk_resources_term.
+ *
+ * @param arch_config a pointer to the store the pointer to the resources
+ * @param num_resources the number of resources read
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_NOMEM on allocation
+ * error. For other failures, a suitable _mali_osk_errcode_t is returned.
+ */
+_mali_osk_errcode_t _mali_osk_resources_init( _mali_osk_resource_t **arch_config, u32 *num_resources );
+
+/** @brief Free resources allocated by _mali_osk_resources_init.
+ *
+ * Frees the _mali_arch_resource_t array allocated by _mali_osk_resources_init
+ *
+ * @param arch_config a pointer to the stored the pointer to the resources
+ * @param num_resources the number of resources in the array
+ */
+void _mali_osk_resources_term( _mali_osk_resource_t **arch_config, u32 num_resources);
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Initialize a user-space accessible memory range
+ *
+ * This initializes a virtual address range such that it is reserved for the
+ * current process, but does not map any physical pages into this range.
+ *
+ * This function may initialize or adjust any members of the
+ * mali_memory_allocation \a descriptor supplied, before the physical pages are
+ * mapped in with _mali_osk_mem_mapregion_map().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The \a descriptor's process_addr_mapping_info member can be modified to
+ * allocate OS-specific information. Note that on input, this will be a
+ * ukk_private word from the U/K inteface, as inserted by _mali_ukk_mem_mmap().
+ * This is used to pass information from the U/K interface to the OSK interface,
+ * if necessary. The precise usage of the process_addr_mapping_info member
+ * depends on the U/K implementation of _mali_ukk_mem_mmap().
+ *
+ * Therefore, the U/K implementation of _mali_ukk_mem_mmap() and the OSK
+ * implementation of _mali_osk_mem_mapregion_init() must agree on the meaning and
+ * usage of the ukk_private word and process_addr_mapping_info member.
+ *
+ * Refer to \ref u_k_api for more information on the U/K interface.
+ *
+ * On successful return, \a descriptor's mapping member will be correct for
+ * use with _mali_osk_mem_mapregion_term() and _mali_osk_mem_mapregion_map().
+ *
+ * @param descriptor the mali_memory_allocation to initialize.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor );
+
+/** @brief Terminate a user-space accessible memory range
+ *
+ * This terminates a virtual address range reserved in the current user process,
+ * where none, some or all of the virtual address ranges have mappings to
+ * physical pages.
+ *
+ * It will unmap any physical pages that had been mapped into a reserved
+ * virtual address range for the current process, and then releases the virtual
+ * address range. Any extra book-keeping information or resources allocated
+ * during _mali_osk_mem_mapregion_init() will also be released.
+ *
+ * The \a descriptor itself is not freed - this must be handled by the caller of
+ * _mali_osk_mem_mapregion_term().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param descriptor the mali_memory_allocation to terminate.
+ */
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor );
+
+/** @brief Map physical pages into a user process's virtual address range
+ *
+ * This is used to map a number of physically contigous pages into a
+ * user-process's virtual address range, which was previously reserved by a
+ * call to _mali_osk_mem_mapregion_init().
+ *
+ * This need not provide a mapping for the entire virtual address range
+ * reserved for \a descriptor - it may be used to map single pages per call.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The function may supply \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC.
+ * In this case, \a size must be set to \ref _MALI_OSK_CPU_PAGE_SIZE, and the function
+ * will allocate the physical page itself. The physical address of the
+ * allocated page will be returned through \a phys_addr.
+ *
+ * It is an error to set \a size != \ref _MALI_OSK_CPU_PAGE_SIZE while
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC,
+ * since it is not always possible for OSs to support such a setting through this
+ * interface.
+ *
+ * @note \b IMPORTANT: This code must validate the input parameters. If the
+ * range defined by \a offset and \a size is outside the range allocated in
+ * \a descriptor, then this function \b MUST not attempt any mapping, and must
+ * instead return a suitable \ref _mali_osk_errcode_t \b failure code.
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor, and not the \a phys_addr parameter.
+ * It must be a multiple of \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in,out] phys_addr a pointer to the physical base address to begin the
+ * mapping from. If \a size == \ref _MALI_OSK_CPU_PAGE_SIZE and
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, then this
+ * function will allocate the physical page itself, and return the
+ * physical address of the page through \a phys_addr, which will be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE. Otherwise, \a *phys_addr must be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE, and is unmodified after the call.
+ * \a phys_addr is unaffected by the \a offset parameter.
+ *
+ * @param[in] size the number of bytes to map in. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return _MALI_OSK_ERR_OK on sucess, otherwise a _mali_osk_errcode_t value
+ * on failure
+ *
+ * @note could expand to use _mali_osk_mem_mapregion_flags_t instead of
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, but note that we must
+ * also modify the mali process address manager in the mmu/memory engine code.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size );
+
+
+/** @brief Unmap physical pages from a user process's virtual address range
+ *
+ * This is used to unmap a number of physically contigous pages from a
+ * user-process's virtual address range, which were previously mapped by a
+ * call to _mali_osk_mem_mapregion_map(). If the range specified was allocated
+ * from OS memory, then that memory will be returned to the OS. Whilst pages
+ * will be mapped out, the Virtual address range remains reserved, and at the
+ * same base address.
+ *
+ * When this function is used to unmap pages from OS memory
+ * (_mali_osk_mem_mapregion_map() was called with *phys_addr ==
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC), then the \a flags must
+ * include \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR. This is because
+ * it is not always easy for an OS implementation to discover whether the
+ * memory was OS allocated or not (and so, how it should release the memory).
+ *
+ * For this reason, only a range of pages of the same allocation type (all OS
+ * allocated, or none OS allocacted) may be unmapped in one call. Multiple
+ * calls must be made if allocations of these different types exist across the
+ * entire region described by the \a descriptor.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor. \a offset must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] size the number of bytes to unmap. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] flags specifies how the memory should be unmapped. For a range
+ * of pages that were originally OS allocated, this must have
+ * \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR set.
+ */
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags );
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_uk_types.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_uk_types.h
new file mode 100644
index 00000000000..9a1583658c9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_uk_types.h
@@ -0,0 +1,1148 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __MALI_UK_TYPES_H__
+#define __MALI_UK_TYPES_H__
+
+/*
+ * NOTE: Because this file can be included from user-side and kernel-side,
+ * it is up to the includee to ensure certain typedefs (e.g. u32) are already
+ * defined when #including this.
+ */
+#include "regs/mali_200_regs.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_uk_core U/K Core
+ * @{ */
+
+/** Definition of subsystem numbers, to assist in creating a unique identifier
+ * for each U/K call.
+ *
+ * @see _mali_uk_functions */
+typedef enum
+{
+ _MALI_UK_CORE_SUBSYSTEM, /**< Core Group of U/K calls */
+ _MALI_UK_MEMORY_SUBSYSTEM, /**< Memory Group of U/K calls */
+ _MALI_UK_PP_SUBSYSTEM, /**< Fragment Processor Group of U/K calls */
+ _MALI_UK_GP_SUBSYSTEM, /**< Vertex Processor Group of U/K calls */
+ _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
+ _MALI_UK_PMM_SUBSYSTEM, /**< Power Management Module Group of U/K calls */
+ _MALI_UK_VSYNC_SUBSYSTEM, /**< VSYNC Group of U/K calls */
+} _mali_uk_subsystem_t;
+
+/** Within a function group each function has its unique sequence number
+ * to assist in creating a unique identifier for each U/K call.
+ *
+ * An ordered pair of numbers selected from
+ * ( \ref _mali_uk_subsystem_t,\ref _mali_uk_functions) will uniquely identify the
+ * U/K call across all groups of functions, and all functions. */
+typedef enum
+{
+ /** Core functions */
+
+ _MALI_UK_OPEN = 0, /**< _mali_ukk_open() */
+ _MALI_UK_CLOSE, /**< _mali_ukk_close() */
+ _MALI_UK_GET_SYSTEM_INFO_SIZE, /**< _mali_ukk_get_system_info_size() */
+ _MALI_UK_GET_SYSTEM_INFO, /**< _mali_ukk_get_system_info() */
+ _MALI_UK_WAIT_FOR_NOTIFICATION, /**< _mali_ukk_wait_for_notification() */
+ _MALI_UK_GET_API_VERSION, /**< _mali_ukk_get_api_version() */
+ _MALI_UK_POST_NOTIFICATION, /**< _mali_ukk_post_notification() */
+
+ /** Memory functions */
+
+ _MALI_UK_INIT_MEM = 0, /**< _mali_ukk_init_mem() */
+ _MALI_UK_TERM_MEM, /**< _mali_ukk_term_mem() */
+ _MALI_UK_GET_BIG_BLOCK, /**< _mali_ukk_get_big_block() */
+ _MALI_UK_FREE_BIG_BLOCK, /**< _mali_ukk_free_big_block() */
+ _MALI_UK_MAP_MEM, /**< _mali_ukk_mem_mmap() */
+ _MALI_UK_UNMAP_MEM, /**< _mali_ukk_mem_munmap() */
+ _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
+ _MALI_UK_DUMP_MMU_PAGE_TABLE, /**< _mali_ukk_mem_dump_mmu_page_table() */
+ _MALI_UK_ATTACH_UMP_MEM, /**< _mali_ukk_attach_ump_mem() */
+ _MALI_UK_RELEASE_UMP_MEM, /**< _mali_ukk_release_ump_mem() */
+ _MALI_UK_MAP_EXT_MEM, /**< _mali_uku_map_external_mem() */
+ _MALI_UK_UNMAP_EXT_MEM, /**< _mali_uku_unmap_external_mem() */
+ _MALI_UK_VA_TO_MALI_PA, /**< _mali_uku_va_to_mali_pa() */
+
+ /** Common functions for each core */
+
+ _MALI_UK_START_JOB = 0, /**< Start a Fragment/Vertex Processor Job on a core */
+ _MALI_UK_ABORT_JOB, /**< Abort a job */
+ _MALI_UK_GET_NUMBER_OF_CORES, /**< Get the number of Fragment/Vertex Processor cores */
+ _MALI_UK_GET_CORE_VERSION, /**< Get the Fragment/Vertex Processor version compatible with all cores */
+
+ /** Fragment Processor Functions */
+
+ _MALI_UK_PP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_pp_start_job() */
+ _MALI_UK_PP_ABORT_JOB = _MALI_UK_ABORT_JOB, /**< _mali_ukk_pp_abort_job() */
+ _MALI_UK_GET_PP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_pp_number_of_cores() */
+ _MALI_UK_GET_PP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_pp_core_version() */
+
+ /** Vertex Processor Functions */
+
+ _MALI_UK_GP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_gp_start_job() */
+ _MALI_UK_GP_ABORT_JOB = _MALI_UK_ABORT_JOB, /**< _mali_ukk_gp_abort_job() */
+ _MALI_UK_GET_GP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_gp_number_of_cores() */
+ _MALI_UK_GET_GP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_gp_core_version() */
+ _MALI_UK_GP_SUSPEND_RESPONSE, /**< _mali_ukk_gp_suspend_response() */
+
+ /** Profiling functions */
+
+ _MALI_UK_PROFILING_START = 0, /**< __mali_uku_profiling_start() */
+ _MALI_UK_PROFILING_ADD_EVENT, /**< __mali_uku_profiling_add_event() */
+ _MALI_UK_PROFILING_STOP, /**< __mali_uku_profiling_stop() */
+ _MALI_UK_PROFILING_GET_EVENT, /**< __mali_uku_profiling_get_event() */
+ _MALI_UK_PROFILING_CLEAR, /**< __mali_uku_profiling_clear() */
+
+#if USING_MALI_PMM
+ /** Power Management Module Functions */
+ _MALI_UK_PMM_EVENT_MESSAGE = 0, /**< Raise an event message */
+#endif
+
+ /** VSYNC reporting fuctions */
+ _MALI_UK_VSYNC_EVENT_REPORT = 0, /**< _mali_ukk_vsync_event_report() */
+
+} _mali_uk_functions;
+
+/** @brief Get the size necessary for system info
+ *
+ * @see _mali_ukk_get_system_info_size()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of buffer necessary to hold system information data, in bytes */
+} _mali_uk_get_system_info_size_s;
+
+
+/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
+ * @{ */
+
+/**
+ * Type definition for the core version number.
+ * Used when returning the version number read from a core
+ *
+ * Its format is that of the 32-bit Version register for a particular core.
+ * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference
+ * Manual", ARM DDI 0415C, for more information.
+ */
+typedef u32 _mali_core_version;
+
+/**
+ * Enum values for the different modes the driver can be put in.
+ * Normal is the default mode. The driver then uses a job queue and takes job objects from the clients.
+ * Job completion is reported using the _mali_ukk_wait_for_notification call.
+ * The driver blocks this io command until a job has completed or failed or a timeout occurs.
+ *
+ * The 'raw' mode is reserved for future expansion.
+ */
+typedef enum _mali_driver_mode
+{
+ _MALI_DRIVER_MODE_RAW = 1, /**< Reserved for future expansion */
+ _MALI_DRIVER_MODE_NORMAL = 2 /**< Normal mode of operation */
+} _mali_driver_mode;
+
+/** @brief List of possible cores
+ *
+ * add new entries to the end of this enum */
+typedef enum _mali_core_type
+{
+ _MALI_GP2 = 2, /**< MaliGP2 Programmable Vertex Processor */
+ _MALI_200 = 5, /**< Mali200 Programmable Fragment Processor */
+ _MALI_400_GP = 6, /**< Mali400 Programmable Vertex Processor */
+ _MALI_400_PP = 7, /**< Mali400 Programmable Fragment Processor */
+ /* insert new core here, do NOT alter the existing values */
+} _mali_core_type;
+
+/** @brief Information about each Mali Core
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Both Fragment Processor (PP) and Vertex Processor (GP) cores are represented
+ * by this struct.
+ *
+ * The type is reported by the type field, _mali_core_info::_mali_core_type.
+ *
+ * Each core is given a unique Sequence number identifying it, the core_nr
+ * member.
+ *
+ * Flags are taken directly from the resource's flags, and are currently unused.
+ *
+ * Multiple mali_core_info structs are linked in a single linked list using the next field
+ */
+typedef struct _mali_core_info
+{
+ _mali_core_type type; /**< Type of core */
+ _mali_core_version version; /**< Core Version, as reported by the Core's Version Register */
+ u32 reg_address; /**< Address of Registers */
+ u32 core_nr; /**< Sequence number */
+ u32 flags; /**< Flags. Currently Unused. */
+ struct _mali_core_info * next; /**< Next core in Linked List */
+} _mali_core_info;
+
+/** @brief Capabilities of Memory Banks
+ *
+ * These may be used to restrict memory banks for certain uses. They may be
+ * used when access is not possible (e.g. Bus does not support access to it)
+ * or when access is possible but not desired (e.g. Access is slow).
+ *
+ * In the case of 'possible but not desired', there is no way of specifying
+ * the flags as an optimization hint, so that the memory could be used as a
+ * last resort.
+ *
+ * @see _mali_mem_info
+ */
+typedef enum _mali_bus_usage
+{
+
+ _MALI_PP_READABLE = (1<<0), /** Readable by the Fragment Processor */
+ _MALI_PP_WRITEABLE = (1<<1), /** Writeable by the Fragment Processor */
+ _MALI_GP_READABLE = (1<<2), /** Readable by the Vertex Processor */
+ _MALI_GP_WRITEABLE = (1<<3), /** Writeable by the Vertex Processor */
+ _MALI_CPU_READABLE = (1<<4), /** Readable by the CPU */
+ _MALI_CPU_WRITEABLE = (1<<5), /** Writeable by the CPU */
+ _MALI_MMU_READABLE = _MALI_PP_READABLE | _MALI_GP_READABLE, /** Readable by the MMU (including all cores behind it) */
+ _MALI_MMU_WRITEABLE = _MALI_PP_WRITEABLE | _MALI_GP_WRITEABLE, /** Writeable by the MMU (including all cores behind it) */
+} _mali_bus_usage;
+
+/** @brief Information about the Mali Memory system
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Each element of the linked list describes a single Mali Memory bank.
+ * Each allocation can only come from one bank, and will not cross multiple
+ * banks.
+ *
+ * Each bank is uniquely identified by its identifier member. On Mali-nonMMU
+ * systems, to allocate from this bank, the value of identifier must be passed
+ * as the type_id member of the _mali_uk_get_big_block_s arguments to
+ * _mali_ukk_get_big_block.
+ *
+ * On Mali-MMU systems, there is only one bank, which describes the maximum
+ * possible address range that could be allocated (which may be much less than
+ * the available physical memory)
+ *
+ * The flags member describes the capabilities of the memory. It is an error
+ * to attempt to build a job for a particular core (PP or GP) when the memory
+ * regions used do not have the capabilities for supporting that core. This
+ * would result in a job abort from the Device Driver.
+ *
+ * For example, it is correct to build a PP job where read-only data structures
+ * are taken from a memory with _MALI_PP_READABLE set and
+ * _MALI_PP_WRITEABLE clear, and a framebuffer with _MALI_PP_WRITEABLE set and
+ * _MALI_PP_READABLE clear. However, it would be incorrect to use a framebuffer
+ * where _MALI_PP_WRITEABLE is clear.
+ */
+typedef struct _mali_mem_info
+{
+ u32 size; /**< Size of the memory bank in bytes */
+ _mali_bus_usage flags; /**< Capabilitiy flags of the memory */
+ u32 maximum_order_supported; /**< log2 supported size */
+ u32 identifier; /**< Unique identifier, to be used in allocate calls */
+ struct _mali_mem_info * next; /**< Next List Link */
+} _mali_mem_info;
+
+/** @brief Info about the whole Mali system.
+ *
+ * This Contains a linked list of the cores and memory banks available. Each
+ * list pointer will remain inside the system_info buffer supplied in the
+ * _mali_uk_get_system_info_s arguments to a _mali_ukk_get_system_info call.
+ *
+ * The has_mmu member must be inspected to ensure the correct group of
+ * Memory function calls is obtained - that is, those for either Mali-MMU
+ * or Mali-nonMMU. @see _mali_uk_memory
+ */
+typedef struct _mali_system_info
+{
+ _mali_core_info * core_info; /**< List of _mali_core_info structures */
+ _mali_mem_info * mem_info; /**< List of _mali_mem_info structures */
+ u32 has_mmu; /**< Non-zero if Mali-MMU present. Zero otherwise. */
+ _mali_driver_mode drivermode; /**< Reserved. Must always be _MALI_DRIVER_MODE_NORMAL */
+} _mali_system_info;
+
+/** @brief Arguments to _mali_ukk_get_system_info()
+ *
+ * A buffer of the size returned by _mali_ukk_get_system_info_size() must be
+ * allocated, and the pointer to this buffer must be written into the
+ * system_info member. The buffer must be suitably aligned for storage of
+ * the _mali_system_info structure - for example, one returned by
+ * _mali_osk_malloc(), which will be suitably aligned for any structure.
+ *
+ * The ukk_private member must be set to zero by the user-side. Under an OS
+ * implementation, the U/K interface must write in the user-side base address
+ * into the ukk_private member, so that the common code in
+ * _mali_ukk_get_system_info() can determine how to adjust the pointers such
+ * that they are sensible from user space. Leaving ukk_private as NULL implies
+ * that no pointer adjustment is necessary - which will be the case on a
+ * bare-metal/RTOS system.
+ *
+ * @see _mali_system_info
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer provided to store system information data */
+ _mali_system_info * system_info; /**< [in,out] pointer to buffer to store system information data. No initialisation of buffer required on input. */
+ u32 ukk_private; /**< [in] Kernel-side private word inserted by certain U/K interface implementations. Caller must set to Zero. */
+} _mali_uk_get_system_info_s;
+/** @} */ /* end group _mali_uk_getsysteminfo */
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_gp_suspend_response()
+ *
+ * When _mali_wait_for_notification() receives notification that a
+ * Vertex Processor job was suspended, you need to send a response to indicate
+ * what needs to happen with this job. You can either abort or resume the job.
+ *
+ * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or
+ * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap
+ * for the job that will resolve the out of memory condition for the job.
+ * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification;
+ * this is an identifier for the suspended job
+ * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If
+ * you resume it, @c argument[0] should specify the Mali start address for the new
+ * heap and @c argument[1] the Mali end address of the heap.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ */
+typedef enum _maligp_job_suspended_response_code
+{
+ _MALIGP_JOB_ABORT, /**< Abort the Vertex Processor job */
+ _MALIGP_JOB_RESUME_WITH_NEW_HEAP /**< Resume the Vertex Processor job with a new heap */
+} _maligp_job_suspended_response_code;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
+ _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
+ u32 arguments[2]; /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
+} _mali_uk_gp_suspend_response_s;
+
+/** @} */ /* end group _mali_uk_gp_suspend_response_s */
+
+/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
+ * @{ */
+
+/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
+typedef enum
+{
+ _MALI_UK_START_JOB_STARTED, /**< Job started */
+ _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED, /**< Job started and bumped a lower priority job that was pending execution */
+ _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE /**< Job could not be started at this time. Try starting the job again */
+} _mali_uk_start_job_status;
+
+/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job */
+
+typedef enum
+{
+ _MALI_UK_JOB_STATUS_END_SUCCESS = 1<<(16+0),
+ _MALI_UK_JOB_STATUS_END_OOM = 1<<(16+1),
+ _MALI_UK_JOB_STATUS_END_ABORT = 1<<(16+2),
+ _MALI_UK_JOB_STATUS_END_TIMEOUT_SW = 1<<(16+3),
+ _MALI_UK_JOB_STATUS_END_HANG = 1<<(16+4),
+ _MALI_UK_JOB_STATUS_END_SEG_FAULT = 1<<(16+5),
+ _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB = 1<<(16+6),
+ _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR = 1<<(16+7),
+ _MALI_UK_JOB_STATUS_END_SHUTDOWN = 1<<(16+8),
+ _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+} _mali_uk_job_status;
+
+#define MALIGP2_NUM_REGS_FRAME (6)
+
+/** @brief Arguments for _mali_ukk_gp_start_job()
+ *
+ * To start a Vertex Processor job
+ * - associate the request with a reference to a @c mali_gp_job_info by setting
+ * user_job_ptr to the address of the @c mali_gp_job_info of the job.
+ * - set @c priority to the priority of the @c mali_gp_job_info
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again. If the job had a higher priority than the one currently pending
+ * execution (@c _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED), it will bump
+ * the lower priority job and returns the address of the @c mali_gp_job_info
+ * for that job in @c returned_user_job_ptr. That job should get requeued.
+ *
+ * After the job has started, @c _mali_wait_for_notification() will be notified
+ * that the job finished or got suspended. It may get suspended due to
+ * resource shortage. If it finished (see _mali_ukk_wait_for_notification())
+ * the notification will contain a @c _mali_uk_gp_job_finished_s result. If
+ * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s
+ * result.
+ *
+ * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains
+ * the @c user_job_ptr identifier used to start the job with, the @c reason
+ * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie
+ * to identify the core on which the job stalled. This @c cookie will be needed
+ * when responding to this nofication by means of _mali_ukk_gp_suspend_response().
+ * (see _mali_ukk_gp_suspend_response()). The response is either to abort or
+ * resume the job. If the job got suspended due to an out of memory condition
+ * you may be able to resolve this by providing more memory and resuming the job.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 watchdog_msecs; /**< [in] maximum allowed runtime in milliseconds. The job gets killed if it runs longer than this. A value of 0 selects the default used by the device driver. */
+ u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 returned_user_job_ptr; /**< [out] identifier for the returned job in user space, a @c mali_gp_job_info* */
+ _mali_uk_start_job_status status; /**< [out] indicates job start status (success, previous job returned, requeue) */
+ u32 abort_id; /**< [in] abort id of this job, used to identify this job for later abort requests */
+ u32 perf_counter_l2_src0; /**< [in] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [in] source id for Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_gp_start_job_s;
+
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE (1<<2) /**< Enable performance counter L2_SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE (1<<3) /**< Enable performance counter L2_SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET (1<<4) /**< Enable performance counter L2_RESET for a job */
+
+/** @} */ /* end group _mali_uk_gpstartjob_s */
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 irq_status; /**< [out] value of the GP interrupt rawstat register (see ARM DDI0415A) */
+ u32 status_reg_on_stop; /**< [out] value of the GP control register */
+ u32 vscl_stop_addr; /**< [out] value of the GP VLSCL start register */
+ u32 plbcl_stop_addr; /**< [out] value of the GP PLBCL start register */
+ u32 heap_current_addr; /**< [out] value of the GP PLB PL heap start address register */
+ u32 perf_counter_src0; /**< [out] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [out] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter0; /**< [out] value of perfomance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of perfomance counter 1 (see ARM DDI0415A) */
+ u32 render_time; /**< [out] number of milliseconds it took for the job to render */
+ u32 perf_counter_l2_src0; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 1 */
+ u32 perf_counter_l2_val0; /**< [out] Value of the Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_val1; /**< [out] Value of the Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_gp_job_finished_s;
+
+typedef enum _maligp_job_suspended_reason
+{
+ _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY /**< Polygon list builder unit (PLBU) has run out of memory */
+} _maligp_job_suspended_reason;
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _maligp_job_suspended_reason reason; /**< [out] reason why the job stalled */
+ u32 cookie; /**< [out] identifier for the core in kernel space on which the job stalled */
+} _mali_uk_gp_job_suspended_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @defgroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_pp_start_job()
+ *
+ * To start a Fragment Processor job
+ * - associate the request with a reference to a mali_pp_job by setting
+ * @c user_job_ptr to the address of the @c mali_pp_job of the job.
+ * - set @c priority to the priority of the mali_pp_job
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_pp_job into @c frame_registers.
+ * For MALI200 you also need to copy the write back 0,1 and 2 registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open()
+ *
+ * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again. If the job had a higher priority than the one currently pending
+ * execution (@c _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED), it will bump
+ * the lower priority job and returns the address of the @c mali_pp_job
+ * for that job in @c returned_user_job_ptr. That job should get requeued.
+ *
+ * After the job has started, _mali_wait_for_notification() will be notified
+ * when the job finished. The notification will contain a
+ * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr
+ * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than @c watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 watchdog_msecs; /**< [in] maximum allowed runtime in milliseconds. The job gets killed if it runs longer than this. A value of 0 selects the default used by the device driver. */
+ u32 frame_registers[MALI200_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job, see ARM DDI0415A */
+ u32 wb0_registers[MALI200_NUM_REGS_WBx];
+ u32 wb1_registers[MALI200_NUM_REGS_WBx];
+ u32 wb2_registers[MALI200_NUM_REGS_WBx];
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 returned_user_job_ptr; /**< [out] identifier for the returned job in user space */
+ _mali_uk_start_job_status status; /**< [out] indicates job start status (success, previous job returned, requeue) */
+ u32 abort_id; /**< [in] abort id of this job, used to identify this job for later abort requests */
+ u32 perf_counter_l2_src0; /**< [in] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [in] source id for Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_pp_start_job_s;
+/** @} */ /* end group _mali_uk_ppstartjob_s */
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 irq_status; /**< [out] value of interrupt rawstat register (see ARM DDI0415A) */
+ u32 last_tile_list_addr; /**< [out] value of renderer list register (see ARM DDI0415A); necessary to restart a stopped job */
+ u32 perf_counter_src0; /**< [out] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [out] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter0; /**< [out] value of perfomance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of perfomance counter 1 (see ARM DDI0415A) */
+ u32 render_time; /**< [out] number of milliseconds it took for the job to render */
+ u32 perf_counter_l2_src0; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 1 */
+ u32 perf_counter_l2_val0; /**< [out] Value of the Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_val1; /**< [out] Value of the Mali-400 MP L2 cache performance counter 1 */
+ u32 perf_counter_l2_val0_raw; /**< [out] Raw value of the Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_val1_raw; /**< [out] Raw value of the Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_pp_job_finished_s;
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ * @{ */
+
+/** @defgroup _mali_uk_waitfornotification_s Wait For Notification
+ * @{ */
+
+/** @brief Notification type encodings
+ *
+ * Each Notification type is an ordered pair of (subsystem,id), and is unique.
+ *
+ * The encoding of subsystem,id into a 32-bit word is:
+ * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK)
+ * | (( id << _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK)
+ *
+ * @see _mali_uk_wait_for_notification_s
+ */
+typedef enum
+{
+ /** core notifications */
+
+ _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+ _MALI_NOTIFICATION_APPLICATION_QUIT = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+
+ /** Fragment Processor notifications */
+
+ _MALI_NOTIFICATION_PP_FINISHED = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+
+ /** Vertex Processor notifications */
+
+ _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+} _mali_uk_notification_type;
+
+/** to assist in splitting up 32-bit notification value in subsystem and id value */
+#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000
+#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16
+#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF
+#define _MALI_NOTIFICATION_ID_SHIFT 0
+
+
+/** @brief Arguments for _mali_ukk_wait_for_notification()
+ *
+ * On successful return from _mali_ukk_wait_for_notification(), the members of
+ * this structure will indicate the reason for notification.
+ *
+ * Specifically, the source of the notification can be identified by the
+ * subsystem and id fields of the mali_uk_notification_type in the code.type
+ * member. The type member is encoded in a way to divide up the types into a
+ * subsystem field, and a per-subsystem ID field. See
+ * _mali_uk_notification_type for more information.
+ *
+ * Interpreting the data union member depends on the notification type:
+ *
+ * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS
+ * - The kernel side is shutting down. No further
+ * _mali_uk_wait_for_notification() calls should be made.
+ * - In this case, the value of the data union member is undefined.
+ * - This is used to indicate to the user space client that it should close
+ * the connection to the Mali Device Driver.
+ * - type == _MALI_NOTIFICATION_PP_FINISHED
+ * - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr
+ * identifier used to start the job with, the job status, the number of milliseconds the job took to render,
+ * and values of core registers when the job finished (irq status, performance counters, renderer list
+ * address).
+ * - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED.
+ * - If the hardware detected a timeout while rendering the job, or software detected the job is
+ * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will
+ * indicate _MALI_UK_JOB_STATUS_HANG.
+ * - If the hardware detected a bus error while accessing memory associated with the job, status will
+ * indicate _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job
+ * didn't start the hardware yet, e.g. when the driver closes.
+ * - type == _MALI_NOTIFICATION_GP_FINISHED
+ * - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of
+ * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned.
+ * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED.
+ * - type == _MALI_NOTIFICATION_GP_STALLED
+ * - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr
+ * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on
+ * which the job stalled.
+ * - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY
+ * when the polygon list builder unit has run out of memory.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [out] Type of notification available */
+ union
+ {
+ _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
+ _mali_uk_gp_job_finished_s gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */
+ _mali_uk_pp_job_finished_s pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
+ } data;
+} _mali_uk_wait_for_notification_s;
+
+/** @brief Arguments for _mali_ukk_post_notification()
+ *
+ * Posts the specified notification to the notification queue for this application.
+ * This is used to send a quit message to the callback thread.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [in] Type of notification to post */
+} _mali_uk_post_notification_s;
+/** @} */ /* end group _mali_uk_waitfornotification_s */
+
+/** @defgroup _mali_uk_getapiversion_s Get API Version
+ * @{ */
+
+/** helpers for Device Driver API version handling */
+
+/** @brief Encode a version ID from a 16-bit input
+ *
+ * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */
+#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+
+/** @brief Check whether a 32-bit value is likely to be Device Driver API
+ * version ID. */
+#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+
+/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version
+ * ID */
+#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+
+/** @brief Determine whether two 32-bit encoded version IDs match */
+#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * For example, for version 1 the value would be 0x00010001
+ */
+#define _MALI_API_VERSION 8
+#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
+
+/**
+ * The API version is a 16-bit integer stored in both the lower and upper 16-bits
+ * of a 32-bit value. The 16-bit API version value is incremented on each API
+ * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s.
+ */
+typedef u32 _mali_uk_api_version;
+
+/** @brief Arguments for _mali_uk_get_api_version()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+ int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_s;
+/** @} */ /* end group _mali_uk_getapiversion_s */
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_init_mem(). */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 mali_address_base; /**< [out] start of MALI address space */
+ u32 memory_size; /**< [out] total MALI address space available */
+} _mali_uk_init_mem_s;
+
+/** @brief Arguments for _mali_ukk_term_mem(). */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_term_mem_s;
+
+/** @brief Arguments for _mali_ukk_get_big_block()
+ *
+ * - type_id should be set to the value of the identifier member of one of the
+ * _mali_mem_info structures returned through _mali_ukk_get_system_info()
+ * - ukk_private must be zero when calling from user-side. On Kernel-side, the
+ * OS implementation of the U/K interface can use it to communicate data to the
+ * OS implementation of the OSK layer. Specifically, ukk_private will be placed
+ * into the ukk_private member of the _mali_uk_mem_mmap_s structure. See
+ * _mali_ukk_mem_mmap() for more details.
+ * - minimum_size_requested will be updated if it is too small
+ * - block_size will always be >= minimum_size_requested, because the underlying
+ * allocation mechanism may only be able to divide up memory regions in certain
+ * ways. To avoid wasting memory, block_size should always be taken into account
+ * rather than assuming minimum_size_requested was really allocated.
+ * - to free the memory, the returned cookie member must be stored, and used to
+ * refer to it.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 type_id; /**< [in] the type id of the memory bank to allocate memory from */
+ u32 minimum_size_requested; /**< [in,out] minimum size of the allocation */
+ u32 ukk_private; /**< [in] Kernel-side private word inserted by certain U/K interface implementations. Caller must set to Zero. */
+ u32 mali_address; /**< [out] address of the allocation in mali address space */
+ void *cpuptr; /**< [out] address of the allocation in the current process address space */
+ u32 block_size; /**< [out] size of the block that got allocated */
+ u32 flags; /**< [out] flags associated with the allocated block, of type _mali_bus_usage */
+ u32 cookie; /**< [out] identifier for the allocated block in kernel space */
+} _mali_uk_get_big_block_s;
+
+/** @brief Arguments for _mali_ukk_free_big_block()
+ *
+ * All that is required is that the cookie member must be set to the value of
+ * the cookie member returned through _mali_ukk_get_big_block()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_free_big_block_s;
+
+/** @note Mali-MMU only */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 phys_addr; /**< [in] physical address */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_map_external_mem_s;
+
+/** Flag for _mali_uk_map_external_mem_s and _mali_uk_attach_ump_mem_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
+/** @note Mali-MMU only */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_unmap_external_mem_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by secure_id */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure id */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_attach_ump_mem_s;
+
+/** @note Mali-MMU only; will be supported in future version */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_release_ump_mem_s;
+
+/** @brief Arguments for _mali_ukk_va_to_mali_pa()
+ *
+ * if size is zero or not a multiple of the system's page size, it will be
+ * rounded up to the next multiple of the page size. This will occur before
+ * any other use of the size parameter.
+ *
+ * if va is not PAGE_SIZE aligned, it will be rounded down to the next page
+ * boundary.
+ *
+ * The range (va) to ((u32)va)+(size-1) inclusive will be checked for physical
+ * contiguity.
+ *
+ * The implementor will check that the entire physical range is allowed to be mapped
+ * into user-space.
+ *
+ * Failure will occur if either of the above are not satisfied.
+ *
+ * Otherwise, the physical base address of the range is returned through pa,
+ * va is updated to be page aligned, and size is updated to be a non-zero
+ * multiple of the system's pagesize.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *va; /**< [in,out] Virtual address of the start of the range */
+ u32 pa; /**< [out] Physical base address of the range */
+ u32 size; /**< [in,out] Size of the range, in bytes. */
+} _mali_uk_va_to_mali_pa_s;
+
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of MMU page table information (registers + page tables) */
+} _mali_uk_query_mmu_page_table_dump_size_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer to receive mmu page table information */
+ void *buffer; /**< [in,out] buffer to receive mmu page table information */
+ u32 register_writes_size; /**< [out] size of MMU register dump */
+ u32 *register_writes; /**< [out] pointer within buffer where MMU register dump is stored */
+ u32 page_table_dump_size; /**< [out] size of MMU page table dump */
+ u32 *page_table_dump; /**< [out] pointer within buffer where MMU page table dump is stored */
+} _mali_uk_dump_mmu_page_table_s;
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_pp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores
+ * will contain the number of Fragment Processor cores in the system.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Fragment Processor cores in the system */
+} _mali_uk_get_pp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_pp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains
+ * the version that all Fragment Processor cores are compatible with.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_pp_core_version_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 abort_id; /**< [in] ID of job(s) to abort */
+} _mali_uk_pp_abort_job_s;
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_gp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores
+ * will contain the number of Vertex Processor cores in the system.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Vertex Processor cores in the system */
+} _mali_uk_get_gp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_gp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains
+ * the version that all Vertex Processor cores are compatible with.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_gp_core_version_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 abort_id; /**< [in] ID of job(s) to abort */
+} _mali_uk_gp_abort_job_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 limit; /**< [in,out] The desired limit for number of events to record on input, actual limit on output */
+} _mali_uk_profiling_start_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 event_id; /**< [in] event id to register (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [in] event specific data */
+} _mali_uk_profiling_add_event_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 count; /**< [out] The number of events sampled */
+} _mali_uk_profiling_stop_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 index; /**< [in] which index to get (starting at zero) */
+ u64 timestamp; /**< [out] timestamp of event */
+ u32 event_id; /**< [out] event id of event (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [out] event specific data */
+} _mali_uk_profiling_get_event_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_profiling_clear_s;
+
+
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments to _mali_ukk_mem_mmap()
+ *
+ * Use of the phys_addr member depends on whether the driver is compiled for
+ * Mali-MMU or nonMMU:
+ * - in the nonMMU case, this is the physical address of the memory as seen by
+ * the CPU (which may be a constant offset from that used by Mali)
+ * - in the MMU case, this is the Mali Virtual base address of the memory to
+ * allocate, and the particular physical pages used to back the memory are
+ * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages
+ * are not reported to user-space for security reasons.
+ *
+ * The cookie member must be stored for use later when freeing the memory by
+ * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure.
+ *
+ * The ukk_private word must be set to zero when calling from user-space. On
+ * Kernel-side, the OS implementation of the U/K interface can use it to
+ * communicate data to the OS implementation of the OSK layer. In particular,
+ * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and
+ * will communicate its own ukk_private word through the ukk_private member
+ * here. The common code itself will not inspect or modify the ukk_private
+ * word, and so it may be safely used for whatever purposes necessary to
+ * integrate Mali Memory handling into the OS.
+ *
+ * The uku_private member is currently reserved for use by the user-side
+ * implementation of the U/K interface. Its value must be zero.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ u32 size; /**< [in] Size of the requested mapping */
+ u32 phys_addr; /**< [in] Physical address - could be offset, depending on caller+callee convention */
+ u32 cookie; /**< [out] Returns a cookie for use in munmap calls */
+ void *uku_private; /**< [in] User-side Private word used by U/K interface */
+ void *ukk_private; /**< [in] Kernel-side Private word used by U/K interface */
+} _mali_uk_mem_mmap_s;
+
+/** @brief Arguments to _mali_ukk_mem_munmap()
+ *
+ * The cookie and mapping members must be that returned from the same previous
+ * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie
+ * and mapping - that is, it must be the value originally supplied to a call to
+ * _mali_ukk_mem_mmap that returned the values of mapping and cookie.
+ *
+ * An error will be returned if an attempt is made to unmap only part of the
+ * originally obtained range, or to unmap more than was originally obtained.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] The mapping returned from mmap call */
+ u32 size; /**< [in] The size passed to mmap call */
+ u32 cookie; /**< [in] Cookie from mmap call */
+} _mali_uk_mem_munmap_s;
+/** @} */ /* end group _mali_uk_memory */
+
+#if USING_MALI_PMM
+
+/** @defgroup _mali_uk_pmm U/K Power Management Module
+ * @{ */
+
+/** @brief Power management event message identifiers.
+ *
+ * U/K events start after id 200, and can range up to 999
+ * Adding new events will require updates to the PMM mali_pmm_event_id type
+ */
+#define _MALI_PMM_EVENT_UK_EXAMPLE 201
+
+/** @brief Generic PMM message data type, that will be dependent on the event msg
+ */
+typedef u32 mali_pmm_message_data;
+
+
+/** @brief Arguments to _mali_ukk_pmm_event_message()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 id; /**< [in] event id */
+ mali_pmm_message_data data; /**< [in] specific data associated with the event */
+} _mali_uk_pmm_message_s;
+
+/** @} */ /* end group _mali_uk_pmm */
+#endif /* USING_MALI_PMM */
+
+/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module
+ * @{ */
+
+/** @brief VSYNC events
+ *
+ * These events are reported when DDK starts to wait for vsync and when the
+ * vsync has occured and the DDK can continue on the next frame.
+ */
+typedef enum _mali_uk_vsync_event
+{
+ _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0,
+ _MALI_UK_VSYNC_EVENT_END_WAIT
+} _mali_uk_vsync_event;
+
+/** @brief Arguments to _mali_ukk_vsync_event()
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_vsync_event event; /**< [in] VSYNCH event type */
+} _mali_uk_vsync_event_report_s;
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UK_TYPES_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_ukk.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_ukk.h
new file mode 100644
index 00000000000..d066046901d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_ukk.h
@@ -0,0 +1,710 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ * - The Device Driver has implemented the _mali_ukk set of functions
+ * - The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ * - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ * return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ * void *ctx;
+ * u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ * function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ * interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems, which would
+ * not otherwise get called on RTOS systems.
+ * - For example, a U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * - Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ * - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ * - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ * - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ * - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ * meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ * - On OS systems (not including direct function call U/K interface
+ * implementations), _mali_ukk_get_big_block() may succeed, but the subsequent
+ * copying to user space may fail.
+ * - A problem scenario exists: some memory has been reserved by
+ * _mali_ukk_get_big_block(), but the user-mode will be unaware of it (it will
+ * never receive any information about this memory). In this case, the U/K
+ * implementation must do everything necessary to 'rollback' the \em atomic
+ * _mali_ukk_get_big_block() transaction.
+ * - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ * - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open( void **context );
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close( void **context );
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Returns the size of the buffer needed for a _mali_ukk_get_system_info call
+ *
+ * This function must be called before a call is made to
+ * _mali_ukk_get_system_info, so that memory of the correct size can be
+ * allocated, and a pointer to this memory written into the system_info member
+ * of _mali_uk_get_system_info_s.
+ *
+ * @param args see _mali_uk_get_system_info_size_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_system_info_size( _mali_uk_get_system_info_size_s *args );
+
+/** @brief Returns information about the system (cores and memory banks)
+ *
+ * A buffer for this needs to be allocated by the caller. The size of the buffer required is returned by
+ * _mali_ukk_get_system_info_size(). The user is responsible for freeing the buffer.
+ *
+ * The _mali_system_info structure will be written to the start of this buffer,
+ * and the core_info and mem_info lists will be written to locations inside
+ * the buffer, and will be suitably aligned.
+ *
+ * Under OS implementations of the U/K interface we need to pack/unpack
+ * pointers across the user/kernel boundary. This has required that we malloc()
+ * an intermediate buffer inside the kernel-side U/K interface, and free it
+ * before returning to user-side. To avoid modifying common code, we do the
+ * following pseudo-code, which we shall call 'pointer switching':
+ *
+ * @code
+ * {
+ * Copy_From_User(kargs, args, ... );
+ * void __user * local_ptr = kargs->system_info;
+ * kargs->system_info = _mali_osk_malloc( ... );
+ * _mali_ukk_get_system_info( kargs );
+ * Copy_To_User( local_ptr, kargs->system_info, ... );
+ * _mali_osk_free( kargs->system_info );
+ * }
+ * @endcode
+ * @note The user-side's args->system_info members was unmodified here.
+ *
+ * However, the current implementation requires an extra ukk_private word so that the common code can work out
+ * how to patch pointers to user-mode for an OS's U/K implementation, this should be set to the user-space
+ * destination address for pointer-patching to occur. When NULL, it is unused, an no pointer-patching occurs in the
+ * common code.
+ *
+ * @param args see _mali_uk_get_system_info_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args );
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args );
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args );
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args );
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/**
+ * @brief Initialize the Mali-MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called before any
+ * other functions in the \ref _mali_uk_memory group are called.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_init_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args );
+
+/**
+ * @brief Terminate the MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called when
+ * functions in the \ref _mali_uk_memory group will no longer be called. This
+ * function must be called before the application terminates.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_term_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args );
+
+/** @brief Map a block of memory into the current user process
+ *
+ * Allocates a minimum of minimum_size_requested bytes of MALI memory and maps it into the current
+ * process space. The number of bytes allocated is returned in args->block_size.
+ *
+ * This is only used for Mali-nonMMU mode.
+ *
+ * @param args see _mali_uk_get_big_block_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args );
+
+/** @brief Unmap a block of memory from the current user process
+ *
+ * Frees allocated MALI memory and unmaps it from the current process space. The previously allocated memory
+ * is indicated by the cookie as returned by _mali_ukk_get_big_block().
+ *
+ * This is only used for Mali-nonMMU mode.
+ *
+ * @param args see _mali_uk_free_big_block_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args );
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode. In Mali-nonMMU mode, the function is callable
+ * from the kernel side, and is used to implement _mali_ukk_get_big_block() in this case.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * This means that in the first (nonMMU) case, the caller must manage the physical address allocations. The caller
+ * in this case is _mali_ukk_get_big_block(), which does indeed manage the Mali physical address ranges.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args );
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args );
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args );
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args );
+
+/** @brief Map a physically contiguous range of memory into Mali
+ * @param args see _mali_uk_map_external_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args );
+
+/** @brief Unmap a physically contiguous range of memory from Mali
+ * @param args see _mali_uk_unmap_external_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args );
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+/** @brief Map UMP memory into Mali
+ * @param args see _mali_uk_attach_ump_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args );
+/** @brief Unmap UMP memory from Mali
+ * @param args see _mali_uk_release_ump_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args );
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+/** @brief Determine virtual-to-physical mapping of a contiguous memory range
+ * (optional)
+ *
+ * This allows the user-side to do a virtual-to-physical address translation.
+ * In conjunction with _mali_uku_map_external_mem, this can be used to do
+ * direct rendering.
+ *
+ * This function will only succeed on a virtual range that is mapped into the
+ * current process, and that is contigious.
+ *
+ * If va is not page-aligned, then it is rounded down to the next page
+ * boundary. The remainer is added to size, such that ((u32)va)+size before
+ * rounding is equal to ((u32)va)+size after rounding. The rounded modified
+ * va and size will be written out into args on success.
+ *
+ * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
+ * then size will be rounded up to the next multiple of PAGE_SIZE before
+ * translation occurs. The rounded up size will be written out into args on
+ * success.
+ *
+ * On most OSs, virtual-to-physical address translation is a priveledged
+ * function. Therefore, the implementer must validate the range supplied, to
+ * ensure they are not providing arbitrary virtual-to-physical address
+ * translations. While it is unlikely such a mechanism could be used to
+ * compromise the security of a system on its own, it is possible it could be
+ * combined with another small security risk to cause a much larger security
+ * risk.
+ *
+ * @note This is an optional part of the interface, and is only used by certain
+ * implementations of libEGL. If the platform layer in your libEGL
+ * implementation does not require Virtual-to-Physical address translation,
+ * then this function need not be implemented. A stub implementation should not
+ * be required either, as it would only be removed by the compiler's dead code
+ * elimination.
+ *
+ * @note if implemented, this function is entirely platform-dependant, and does
+ * not exist in common code.
+ *
+ * @param args see _mali_uk_va_to_mali_pa_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_va_to_mali_pa( _mali_uk_va_to_mali_pa_s * args );
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * If an existing lower priority job is returned, args->returned_user_job_ptr contains a
+ * pointer to the returned job and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param args see _mali_uk_pp_start_job_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job( _mali_uk_pp_start_job_s *args );
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args );
+
+/** @brief Abort any PP jobs with the given ID.
+ *
+ * Jobs internally queued or currently running on the hardware is to be stopped/aborted.
+ * Jobs aborted are reported via the normal job completion system.
+ * Any jobs, running or internally queued should be aborted imediately.
+ * Normal notifiction procedures to report on the status of these jobs.
+ *
+ *
+ * @param args see _malu_uk_pp_abort_job_s in "mali_uk_types.h"
+ */
+void _mali_ukk_pp_abort_job( _mali_uk_pp_abort_job_s *args );
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * If an existing lower priority job is returned, args->returned_user_job_ptr contains a pointer to
+ * the returned job and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param args see _mali_uk_gp_start_job_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job( _mali_uk_gp_start_job_s *args );
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args );
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args );
+
+/** @brief Abort any GP jobs with the given ID.
+ *
+ * Jobs internally queued or currently running on the hardware is to be stopped/aborted.
+ * Jobs aborted are reported via the normal job completion system.
+ *
+ * Any jobs, running or internally queued should be aborted imediately.
+ * Normal notifiction procedures to report on the status of these jobs.
+ *
+ * @param args see _mali_uk_gp_abort_job_s in "mali_uk_types.h"
+ */
+void _mali_ukk_gp_abort_job( _mali_uk_gp_abort_job_s *args );
+/** @} */ /* end group _mali_uk_gp */
+
+#if USING_MALI_PMM
+/** @addtogroup _mali_uk_pmm U/K Power Management Module
+ * @{ */
+
+/* @brief Power Management Module event message
+ *
+ * @note The event message can fail to be sent due to OOM but this is
+ * stored in the PMM state machine to be handled later
+ *
+ * @param args see _mali_uk_pmm_event_message_s in "mali_uk_types.h"
+ */
+void _mali_ukk_pmm_event_message( _mali_uk_pmm_message_s *args );
+/** @} */ /* end group _mali_uk_pmm */
+#endif /* USING_MALI_PMM */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Start recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_start_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Stop recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_stop_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
+
+/** @brief Retrieve a recorded profiling event.
+ *
+ * @param args see _mali_uk_profiling_get_event_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
+
+/** @brief Clear recorded profiling events.
+ *
+ * @param args see _mali_uk_profiling_clear_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.c
new file mode 100644
index 00000000000..eeb29589ddc
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.c
@@ -0,0 +1,921 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm.c
+ * Implementation of the power management module for the kernel device driver
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_subsystem.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+#include "mali_platform.h"
+
+/* Internal PMM subsystem state */
+static _mali_pmm_internal_state_t *pmm_state = NULL;
+/* Mali kernel subsystem id */
+static mali_kernel_subsystem_identifier mali_subsystem_pmm_id = -1;
+
+#define GET_PMM_STATE_PTR (pmm_state)
+
+/* Internal functions */
+static _mali_osk_errcode_t malipmm_create(_mali_osk_resource_t *resource);
+static void pmm_event_process( void );
+_mali_osk_errcode_t malipmm_irq_uhandler(void *data);
+void malipmm_irq_bhandler(void *data);
+
+/** @brief Start the PMM subsystem
+ *
+ * @param id Subsystem id to uniquely identify this subsystem
+ * @return _MALI_OSK_ERR_OK if the system started successfully, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t malipmm_kernel_subsystem_start( mali_kernel_subsystem_identifier id );
+
+/** @brief Perform post start up of the PMM subsystem
+ *
+ * Post start up includes initializing the current policy, now that the system is
+ * completely started - to stop policies turning off hardware during the start up
+ *
+ * @param id the unique subsystem id
+ * @return _MALI_OSK_ERR_OK if the post startup was successful, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t malipmm_kernel_load_complete( mali_kernel_subsystem_identifier id );
+
+/** @brief Terminate the PMM subsystem
+ *
+ * @param id the unique subsystem id
+ */
+void malipmm_kernel_subsystem_terminate( mali_kernel_subsystem_identifier id );
+
+#if MALI_STATE_TRACKING
+ void malipmm_subsystem_dump_state( void );
+#endif
+
+
+/* This will be one of the subsystems in the array of subsystems:
+ static struct mali_kernel_subsystem * subsystems[];
+ found in file: mali_kernel_core.c
+*/
+struct mali_kernel_subsystem mali_subsystem_pmm=
+{
+ malipmm_kernel_subsystem_start, /* startup */
+ malipmm_kernel_subsystem_terminate, /* shutdown */
+ malipmm_kernel_load_complete, /* loaded all subsystems */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+#if MALI_STATE_TRACKING
+ malipmm_subsystem_dump_state, /* dump_state */
+#endif
+};
+
+#if PMM_OS_TEST
+
+u32 power_test_event = 0;
+mali_bool power_test_flag = MALI_FALSE;
+_mali_osk_timer_t *power_test_timer = NULL;
+
+void _mali_osk_pmm_power_up_done(mali_pmm_message_data data)
+{
+ MALI_PRINT(("POWER TEST OS UP DONE\n"));
+}
+
+void _mali_osk_pmm_power_down_done(mali_pmm_message_data data)
+{
+ MALI_PRINT(("POWER TEST OS DOWN DONE\n"));
+}
+
+/**
+ * Symbian OS Power Up call to the driver
+ */
+void power_test_callback( void *arg )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ power_test_flag = MALI_TRUE;
+ _mali_osk_irq_schedulework( pmm->irq );
+}
+
+void power_test_start()
+{
+ power_test_timer = _mali_osk_timer_init();
+ _mali_osk_timer_setcallback( power_test_timer, power_test_callback, NULL );
+
+ /* First event is power down */
+ power_test_event = MALI_PMM_EVENT_OS_POWER_DOWN;
+ _mali_osk_timer_add( power_test_timer, 10000 );
+}
+
+mali_bool power_test_check()
+{
+ if( power_test_flag )
+ {
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ 0,
+ 1 };
+ event.id = power_test_event;
+
+ power_test_flag = MALI_FALSE;
+
+ /* Send event */
+ _mali_ukk_pmm_event_message( &event );
+
+ /* Switch to next event to test */
+ if( power_test_event == MALI_PMM_EVENT_OS_POWER_DOWN )
+ {
+ power_test_event = MALI_PMM_EVENT_OS_POWER_UP;
+ }
+ else
+ {
+ power_test_event = MALI_PMM_EVENT_OS_POWER_DOWN;
+ }
+ _mali_osk_timer_add( power_test_timer, 5000 );
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+void power_test_end()
+{
+ _mali_osk_timer_del( power_test_timer );
+ _mali_osk_timer_term( power_test_timer );
+ power_test_timer = NULL;
+}
+
+#endif
+
+void _mali_ukk_pmm_event_message( _mali_uk_pmm_message_s *args )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ _mali_osk_notification_t *msg;
+ mali_pmm_message_t *event;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: sending message\n") );
+
+#if MALI_PMM_TRACE && MALI_PMM_TRACE_SENT_EVENTS
+ _mali_pmm_trace_event_message( args, MALI_FALSE );
+#endif
+
+ msg = _mali_osk_notification_create( MALI_PMM_NOTIFICATION_TYPE, sizeof( mali_pmm_message_t ) );
+
+ if( msg )
+ {
+ event = (mali_pmm_message_t *)msg->result_buffer;
+ event->id = args->id;
+ event->ts = _mali_osk_time_tickcount();
+ event->data = args->data;
+
+ _mali_osk_atomic_inc( &(pmm->messages_queued) );
+
+ if( args->id > MALI_PMM_EVENT_INTERNALS )
+ {
+ /* Internal PMM message */
+ _mali_osk_notification_queue_send( pmm->iqueue, msg );
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->imessages_sent++;
+ #endif
+ }
+ else
+ {
+ /* Real event */
+ _mali_osk_notification_queue_send( pmm->queue, msg );
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->messages_sent++;
+ #endif
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR( ("PMM: Could not send message %d", args->id) );
+ /* Make note of this OOM - which has caused a missed event */
+ pmm->missed++;
+ }
+
+ /* Schedule time to look at the event or the fact we couldn't create an event */
+ _mali_osk_irq_schedulework( pmm->irq );
+}
+
+mali_pmm_state _mali_pmm_state( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ if( pmm && (mali_subsystem_pmm_id != -1) )
+ {
+ return pmm->state;
+ }
+
+ /* No working subsystem yet */
+ return MALI_PMM_STATE_UNAVAILABLE;
+}
+
+
+mali_pmm_core_mask _mali_pmm_cores_list( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return pmm->cores_registered;
+}
+
+mali_pmm_core_mask _mali_pmm_cores_powered( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return pmm->cores_powered;
+}
+
+
+_mali_osk_errcode_t _mali_pmm_list_policies(
+ u32 policy_list_size,
+ mali_pmm_policy *policy_list,
+ u32 *policies_available )
+{
+ /* TBD - This is currently a stub function for basic power management */
+
+ MALI_ERROR( _MALI_OSK_ERR_UNSUPPORTED );
+}
+
+_mali_osk_errcode_t _mali_pmm_set_policy( mali_pmm_policy policy )
+{
+ /* TBD - This is currently a stub function for basic power management */
+
+/* TBD - When this is not a stub... include tracing...
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_policy_change( old, newpolicy );
+#endif
+*/
+ MALI_ERROR( _MALI_OSK_ERR_UNSUPPORTED );
+}
+
+_mali_osk_errcode_t _mali_pmm_get_policy( mali_pmm_policy *policy )
+{
+ if( policy )
+ {
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ if( pmm )
+ {
+ *policy = pmm->policy;
+ MALI_SUCCESS;
+ }
+ else
+ {
+ *policy = MALI_PMM_POLICY_NONE;
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+ }
+ }
+
+ /* No return argument */
+ MALI_ERROR( _MALI_OSK_ERR_INVALID_ARGS );
+}
+
+#if MALI_PMM_TRACE
+
+/* Event names - order must match mali_pmm_event_id enum */
+static char *pmm_trace_events[] = {
+ "OS_POWER_UP",
+ "OS_POWER_DOWN",
+ "JOB_SCHEDULED",
+ "JOB_QUEUED",
+ "JOB_FINISHED",
+ "TIMEOUT",
+};
+
+/* UK event names - order must match mali_pmm_event_id enum */
+static char *pmm_trace_events_uk[] = {
+ "UKS",
+ "UK_EXAMPLE",
+};
+
+/* Internal event names - order must match mali_pmm_event_id enum */
+static char *pmm_trace_events_internal[] = {
+ "INTERNALS",
+ "INTERNAL_POWER_UP_ACK",
+ "INTERNAL_POWER_DOWN_ACK",
+};
+
+/* State names - order must match mali_pmm_state enum */
+static char *pmm_trace_state[] = {
+ "UNAVAILABLE",
+ "SYSTEM ON",
+ "SYSTEM OFF",
+ "SYSTEM TRANSITION",
+};
+
+/* Policy names - order must match mali_pmm_policy enum */
+static char *pmm_trace_policy[] = {
+ "NONE",
+ "ALWAYS ON",
+ "JOB CONTROL",
+};
+
+void _mali_pmm_trace_hardware_change( mali_pmm_core_mask old, mali_pmm_core_mask newstate )
+{
+ const char *dname;
+ const char *cname;
+ const char *ename;
+
+ if( old != newstate )
+ {
+ if( newstate == 0 )
+ {
+ dname = "NO cores";
+ }
+ else
+ {
+ dname = pmm_trace_get_core_name( newstate );
+ }
+
+ /* These state checks only work if the assumption that only cores can be
+ * turned on or turned off in seperate actions is true. If core power states can
+ * be toggled (some one, some off) at the same time, this check does not work
+ */
+ if( old > newstate )
+ {
+ /* Cores have turned off */
+ cname = pmm_trace_get_core_name( old - newstate );
+ ename = "OFF";
+ }
+ else
+ {
+ /* Cores have turned on */
+ cname = pmm_trace_get_core_name( newstate - old );
+ ename = "ON";
+ }
+ MALI_PRINT( ("PMM Trace: Hardware %s ON, %s just turned %s. { 0x%08x -> 0x%08x }", dname, cname, ename, old, newstate) );
+ }
+}
+
+void _mali_pmm_trace_state_change( mali_pmm_state old, mali_pmm_state newstate )
+{
+ if( old != newstate )
+ {
+ MALI_PRINT( ("PMM Trace: State changed from %s to %s", pmm_trace_state[old], pmm_trace_state[newstate]) );
+ }
+}
+
+void _mali_pmm_trace_policy_change( mali_pmm_policy old, mali_pmm_policy newpolicy )
+{
+ if( old != newpolicy )
+ {
+ MALI_PRINT( ("PMM Trace: Policy changed from %s to %s", pmm_trace_policy[old], pmm_trace_policy[newpolicy]) );
+ }
+}
+
+void _mali_pmm_trace_event_message( mali_pmm_message_t *event, mali_bool received )
+{
+ const char *ename;
+ const char *dname;
+ const char *tname;
+ const char *format = "PMM Trace: Event %s { (%d) %s, %d ticks, (0x%x) %s }";
+
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ tname = (received) ? "received" : "sent";
+
+ if( event->id >= MALI_PMM_EVENT_INTERNALS )
+ {
+ ename = pmm_trace_events_internal[((int)event->id) - MALI_PMM_EVENT_INTERNALS];
+ }
+ else if( event->id >= MALI_PMM_EVENT_UKS )
+ {
+ ename = pmm_trace_events_uk[((int)event->id) - MALI_PMM_EVENT_UKS];
+ }
+ else
+ {
+ ename = pmm_trace_events[event->id];
+ }
+
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ dname = "os event";
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+ dname = pmm_trace_get_core_name( (mali_pmm_core_mask)event->data );
+ break;
+
+ case MALI_PMM_EVENT_TIMEOUT:
+ dname = "timeout start";
+ /* Print data with a different format */
+ format = "PMM Trace: Event %s { (%d) %s, %d ticks, %d ticks %s }";
+ break;
+ default:
+ dname = "unknown data";
+ }
+
+ MALI_PRINT( (format, tname, (u32)event->id, ename, event->ts, (u32)event->data, dname) );
+}
+
+#endif /* MALI_PMM_TRACE */
+
+
+/****************** Mali Kernel API *****************/
+
+_mali_osk_errcode_t malipmm_kernel_subsystem_start( mali_kernel_subsystem_identifier id )
+{
+ mali_subsystem_pmm_id = id;
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(PMU, malipmm_create));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t malipmm_create(_mali_osk_resource_t *resource)
+{
+ /* Create PMM state memory */
+ MALI_DEBUG_ASSERT( pmm_state == NULL );
+ pmm_state = (_mali_pmm_internal_state_t *) _mali_osk_malloc(sizeof(*pmm_state));
+ MALI_CHECK_NON_NULL( pmm_state, _MALI_OSK_ERR_NOMEM );
+
+ /* All values get 0 as default */
+ _mali_osk_memset(pmm_state, 0, sizeof(*pmm_state));
+
+ /* Set up the initial PMM state */
+ pmm_state->waiting = 0;
+ pmm_state->status = MALI_PMM_STATUS_IDLE;
+ pmm_state->state = MALI_PMM_STATE_UNAVAILABLE; /* Until a core registers */
+
+ /* Set up policy via compile time option for the moment */
+#if MALI_PMM_ALWAYS_ON
+ pmm_state->policy = MALI_PMM_POLICY_ALWAYS_ON;
+#else
+ pmm_state->policy = MALI_PMM_POLICY_JOB_CONTROL;
+#endif
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_policy_change( MALI_PMM_POLICY_NONE, pmm_state->policy );
+#endif
+
+ /* Set up assumes all values are initialized to NULL or MALI_FALSE, so
+ * we can exit halfway through set up and perform clean up
+ */
+#if !MALI_PMM_NO_PMU
+ if( mali_platform_init(resource) != _MALI_OSK_ERR_OK ) goto pmm_fail_cleanup;
+ pmm_state->pmu_initialized = MALI_TRUE;
+#endif
+
+ pmm_state->queue = _mali_osk_notification_queue_init();
+ if( !pmm_state->queue ) goto pmm_fail_cleanup;
+
+ pmm_state->iqueue = _mali_osk_notification_queue_init();
+ if( !pmm_state->iqueue ) goto pmm_fail_cleanup;
+
+ /* We are creating an IRQ handler just for the worker thread it gives us */
+ pmm_state->irq = _mali_osk_irq_init( _MALI_OSK_IRQ_NUMBER_PMM,
+ malipmm_irq_uhandler,
+ malipmm_irq_bhandler,
+ NULL,
+ NULL,
+ (void *)pmm_state, /* PMM state is passed to IRQ */
+ "PMM handler" );
+
+ if( !pmm_state->irq ) goto pmm_fail_cleanup;
+
+ pmm_state->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED), 0, 75);
+ if( !pmm_state->lock ) goto pmm_fail_cleanup;
+
+ if( _mali_osk_atomic_init( &(pmm_state->messages_queued), 0 ) != _MALI_OSK_ERR_OK )
+ {
+ goto pmm_fail_cleanup;
+ }
+
+ MALIPMM_DEBUG_PRINT( ("PMM: subsystem created, policy=%d\n", pmm_state->policy) );
+
+ MALI_SUCCESS;
+
+pmm_fail_cleanup:
+ MALI_PRINT_ERROR( ("PMM: subsystem failed to be created\n") );
+ if( pmm_state )
+ {
+ _mali_osk_resource_type_t t = PMU;
+ if( pmm_state->lock ) _mali_osk_lock_term( pmm_state->lock );
+ if( pmm_state->irq ) _mali_osk_irq_term( pmm_state->irq );
+ if( pmm_state->queue ) _mali_osk_notification_queue_term( pmm_state->queue );
+ if( pmm_state->iqueue ) _mali_osk_notification_queue_term( pmm_state->iqueue );
+ if( pmm_state->pmu_initialized ) ( mali_platform_deinit(&t) );
+ _mali_osk_free(pmm_state);
+ pmm_state = NULL;
+ }
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+}
+
+_mali_osk_errcode_t malipmm_kernel_load_complete( mali_kernel_subsystem_identifier id )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: subsystem loaded, policy initializing\n") );
+
+#if PMM_OS_TEST
+ power_test_start();
+#endif
+
+ /* Initialize the profile now the system has loaded - so that cores are
+ * not turned off during start up
+ */
+ return pmm_policy_init( pmm );
+}
+
+void malipmm_kernel_subsystem_terminate( mali_kernel_subsystem_identifier id )
+{
+ /* Check this is the right system */
+ MALI_DEBUG_ASSERT( id == mali_subsystem_pmm_id );
+ MALI_DEBUG_ASSERT_POINTER(pmm_state);
+
+ if( pmm_state )
+ {
+ _mali_osk_resource_type_t t = PMU;
+#if PMM_OS_TEST
+ power_test_end();
+#endif
+ /* Get the lock so we can shutdown */
+ MALI_PMM_LOCK(pmm_state);
+#if MALI_STATE_TRACKING
+ pmm_state->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+ pmm_state->status = MALI_PMM_STATUS_OFF;
+#if MALI_STATE_TRACKING
+ pmm_state->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+ MALI_PMM_UNLOCK(pmm_state);
+ pmm_policy_term(pmm_state);
+ _mali_osk_irq_term( pmm_state->irq );
+ _mali_osk_notification_queue_term( pmm_state->queue );
+ _mali_osk_notification_queue_term( pmm_state->iqueue );
+ if( pmm_state->pmu_initialized ) mali_platform_deinit(&t);
+ _mali_osk_atomic_term( &(pmm_state->messages_queued) );
+ MALI_PMM_LOCK_TERM(pmm_state);
+ _mali_osk_free(pmm_state);
+ pmm_state = NULL;
+ }
+
+ MALIPMM_DEBUG_PRINT( ("PMM: subsystem terminated\n") );
+}
+
+_mali_osk_errcode_t malipmm_core_register( mali_pmm_core_id core )
+{
+ _mali_osk_errcode_t err;
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+
+ if( pmm == NULL )
+ {
+ /* PMM state has not been created, this is because the PMU resource has not been
+ * created yet.
+ * This probably means that the PMU resource has not been specfied as the first
+ * resource in the config file
+ */
+ MALI_PRINT_ERROR( ("PMM: Cannot register core %s because the PMU resource has not been\n initialized. Please make sure the PMU resource is the first resource in the\n resource configuration.\n",
+ pmm_trace_get_core_name(core)) );
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_PMM_LOCK(pmm);
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ /* Check if the core is registered more than once in PMM */
+ MALI_DEBUG_ASSERT( (pmm->cores_registered & core) == 0 );
+
+ MALIPMM_DEBUG_PRINT( ("PMM: core registered: (0x%x) %s\n", core, pmm_trace_get_core_name(core)) );
+
+#if !MALI_PMM_NO_PMU
+ /* Make sure the core is powered up */
+ err = mali_platform_powerup( core );
+#else
+ err = _MALI_OSK_ERR_OK;
+#endif
+ if( _MALI_OSK_ERR_OK == err )
+ {
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+ /* Assume a registered core is now powered up and idle */
+ pmm->cores_registered |= core;
+ pmm->cores_idle |= core;
+ pmm->cores_powered |= core;
+ pmm_update_system_state( pmm );
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ }
+ else
+ {
+ MALI_PRINT_ERROR( ("PMM: Error(%d) powering up registered core: (0x%x) %s\n",
+ err, core, pmm_trace_get_core_name(core)) );
+ }
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+
+ return err;
+}
+
+void malipmm_core_unregister( mali_pmm_core_id core )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ /* Check if the core is registered in PMM */
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, core );
+
+ MALIPMM_DEBUG_PRINT( ("PMM: core unregistered: (0x%x) %s\n", core, pmm_trace_get_core_name(core)) );
+
+ {
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+
+#if !MALI_PMM_NO_PMU
+ /* Turn off the core */
+ if( mali_platform_powerdown( core ) != _MALI_OSK_ERR_OK )
+ {
+ MALI_PRINT_ERROR( ("PMM: Error powering down unregistered core: (0x%x) %s\n",
+ core, pmm_trace_get_core_name(core)) );
+ }
+#endif
+
+ /* Remove the core from the system */
+ pmm->cores_registered &= (~core);
+ pmm->cores_idle &= (~core);
+ pmm->cores_powered &= (~core);
+ pmm->cores_pend_down &= (~core);
+ pmm->cores_pend_up &= (~core);
+ pmm->cores_ack_down &= (~core);
+ pmm->cores_ack_up &= (~core);
+
+ pmm_update_system_state( pmm );
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ }
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+}
+void malipmm_core_power_down_okay( mali_pmm_core_id core )
+{
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK,
+ 0 };
+
+ event.data = core;
+
+ _mali_ukk_pmm_event_message( &event );
+}
+
+void malipmm_set_policy_check()
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ pmm->check_policy = MALI_TRUE;
+
+ /* To check the policy we need to schedule some work */
+ _mali_osk_irq_schedulework( pmm->irq );
+}
+
+_mali_osk_errcode_t malipmm_irq_uhandler(void *data)
+{
+ MALIPMM_DEBUG_PRINT( ("PMM: uhandler - not expected to be used\n") );
+
+ MALI_SUCCESS;
+}
+
+void malipmm_irq_bhandler(void *data)
+{
+ _mali_pmm_internal_state_t *pmm;
+ pmm = (_mali_pmm_internal_state_t *)data;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+#if PMM_OS_TEST
+ if( power_test_check() ) return;
+#endif
+
+ MALI_PMM_LOCK(pmm);
+#ifdef MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ /* Quick out when we are shutting down */
+ if( pmm->status == MALI_PMM_STATUS_OFF )
+ {
+
+ #if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+ #endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ return;
+ }
+
+ MALIPMM_DEBUG_PRINT( ("PMM: bhandler - Processing event\n") );
+
+ if( pmm->missed > 0 )
+ {
+ MALI_PRINT_ERROR( ("PMM: Failed to send %d events", pmm->missed) );
+ pmm_fatal_reset( pmm );
+ }
+
+ if( pmm->check_policy )
+ {
+ pmm->check_policy = MALI_FALSE;
+ pmm_policy_check_policy(pmm);
+ }
+ else
+ {
+ /* Perform event processing */
+ pmm_event_process();
+ if( pmm->fatal_power_err )
+ {
+ /* Try a reset */
+ pmm_fatal_reset( pmm );
+ }
+ }
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+}
+
+static void pmm_event_process( void )
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_osk_notification_t *msg = NULL;
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ mali_pmm_message_t *event;
+ u32 process_messages;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+
+ /* Max number of messages to process before exiting - as we shouldn't stay
+ * processing the messages for a long time
+ */
+ process_messages = _mali_osk_atomic_read( &(pmm->messages_queued) );
+
+ while( process_messages > 0 )
+ {
+ /* Check internal message queue first */
+ err = _mali_osk_notification_queue_dequeue( pmm->iqueue, &msg );
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ if( pmm->status == MALI_PMM_STATUS_IDLE || pmm->status == MALI_PMM_STATUS_OS_WAITING || pmm->status == MALI_PMM_STATUS_DVFS_PAUSE)
+ {
+ if( pmm->waiting > 0 ) pmm->waiting--;
+
+ /* We aren't busy changing state, so look at real events */
+ err = _mali_osk_notification_queue_dequeue( pmm->queue, &msg );
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ pmm->no_events++;
+ MALIPMM_DEBUG_PRINT( ("PMM: event_process - No message to process\n") );
+ /* Nothing to do - so return */
+ return;
+ }
+ else
+ {
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->messages_received++;
+ #endif
+ }
+ }
+ else
+ {
+ /* Waiting for an internal message */
+ pmm->waiting++;
+ MALIPMM_DEBUG_PRINT( ("PMM: event_process - Waiting for internal message, messages queued=%d\n", pmm->waiting) );
+ return;
+ }
+ }
+ else
+ {
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->imessages_received++;
+ #endif
+ }
+
+ MALI_DEBUG_ASSERT_POINTER( msg );
+ /* Check the message type matches */
+ MALI_DEBUG_ASSERT( msg->notification_type == MALI_PMM_NOTIFICATION_TYPE );
+
+ event = msg->result_buffer;
+
+ _mali_osk_atomic_dec( &(pmm->messages_queued) );
+ process_messages--;
+
+ #if MALI_PMM_TRACE
+ /* Trace before we process the event in case we have an error */
+ _mali_pmm_trace_event_message( event, MALI_TRUE );
+ #endif
+ err = pmm_policy_process( pmm, event );
+
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_PRINT_ERROR( ("PMM: Error(%d) in policy %d when processing event message with id: %d",
+ err, pmm->policy, event->id) );
+ }
+
+ /* Delete notification */
+ _mali_osk_notification_delete ( msg );
+
+ if( pmm->fatal_power_err )
+ {
+ /* Nothing good has happened - exit */
+ return;
+ }
+
+
+ #if MALI_PMM_TRACE
+ MALI_PRINT( ("PMM Trace: Event processed, msgs (sent/read) = %d/%d, int msgs (sent/read) = %d/%d, no events = %d, waiting = %d\n",
+ pmm->messages_sent, pmm->messages_received, pmm->imessages_sent, pmm->imessages_received, pmm->no_events, pmm->waiting) );
+ #endif
+ }
+
+ if( pmm->status == MALI_PMM_STATUS_IDLE && pmm->waiting > 0 )
+ {
+ /* For events we ignored whilst we were busy, add a new
+ * scheduled time to look at them */
+ _mali_osk_irq_schedulework( pmm->irq );
+ }
+}
+
+#if MALI_STATE_TRACKING
+void malipmm_subsystem_dump_state(void)
+{
+ malipmm_state_dump();
+}
+#endif
+
+#if (defined(DEBUG) || MALI_STATE_TRACKING)
+void malipmm_state_dump()
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+
+ if( !pmm )
+ {
+ MALI_PRINT(("PMM: Null state\n"));
+ }
+ else
+ {
+ MALI_PRINT(("Locks::\nPMM_LOCK_STATUS=%ld",pmm->mali_pmm_lock_acquired));
+ MALI_PRINT(("PMM state:\nPrevious_status=%d\nstatus=%d\nCurrent_event=%d\npolicy=%d\ncheck_policy=%d\nstate=%d\n", pmm->mali_last_pmm_status,pmm->status, pmm->mali_new_event_status, pmm->policy, pmm->check_policy, pmm->state));
+ MALI_PRINT(("PMM cores:\ncores_registered=%d\ncores_powered=%d\ncores_idle=%d\ncores_pend_down=%d\ncores_pend_up=%d\ncores_ack_down=%d\ncores_ack_up=%d\n", pmm->cores_registered, pmm->cores_powered, pmm->cores_idle, pmm->cores_pend_down, pmm->cores_pend_up, pmm->cores_ack_down, pmm->cores_ack_up));
+ MALI_PRINT(("PMM misc:\npmu_init=%d\nmessages_queued=%d\nwaiting=%d\nno_events=%d\nmissed=%d\nfatal_power_err=%d\n", pmm->pmu_initialized, _mali_osk_atomic_read( &(pmm->messages_queued) ), pmm->waiting, pmm->no_events, pmm->missed, pmm->fatal_power_err));
+ }
+}
+#endif
+
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.h
new file mode 100644
index 00000000000..fe7a046cb47
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.h
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm.h
+ * Defines the power management module for the kernel device driver
+ */
+
+#ifndef __MALI_PMM_H__
+#define __MALI_PMM_H__
+
+/* For mali_pmm_message_data and MALI_PMM_EVENT_UK_* defines */
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @defgroup pmmapi Power Management Module APIs
+ *
+ * @{
+ */
+
+/** OS event tester */
+#define PMM_OS_TEST 0
+
+/** @brief Compile option to turn on/off tracing */
+#define MALI_PMM_TRACE 0
+#define MALI_PMM_TRACE_SENT_EVENTS 0
+
+/** @brief Compile option to switch between always on or job control PMM policy */
+#define MALI_PMM_ALWAYS_ON 0
+
+/** @brief Overrides hardware PMU and uses software simulation instead
+ * @note This even stops intialization of PMU and cores being powered on at start up
+ */
+#define MALI_PMM_NO_PMU 0
+
+/** @brief PMM debug print to control debug message level */
+#define MALIPMM_DEBUG_PRINT(args) \
+ MALI_DEBUG_PRINT(3, args)
+
+
+/** @brief power management event message identifiers.
+ */
+/* These must match up with the pmm_trace_events & pmm_trace_events_internal
+ * arrays
+ */
+typedef enum mali_pmm_event_id
+{
+ MALI_PMM_EVENT_OS_POWER_UP = 0, /**< OS power up event */
+ MALI_PMM_EVENT_OS_POWER_DOWN = 1, /**< OS power down event */
+ MALI_PMM_EVENT_JOB_SCHEDULED = 2, /**< Job scheduled to run event */
+ MALI_PMM_EVENT_JOB_QUEUED = 3, /**< Job queued (but not run) event */
+ MALI_PMM_EVENT_JOB_FINISHED = 4, /**< Job finished event */
+ MALI_PMM_EVENT_TIMEOUT = 5, /**< Time out timer has expired */
+ MALI_PMM_EVENT_DVFS_PAUSE = 6, /**< Mali device pause event */
+ MALI_PMM_EVENT_DVFS_RESUME = 7, /**< Mali device resume event */
+
+ MALI_PMM_EVENT_UKS = 200, /**< Events from the user-side start here */
+ MALI_PMM_EVENT_UK_EXAMPLE = _MALI_PMM_EVENT_UK_EXAMPLE,
+
+ MALI_PMM_EVENT_INTERNALS = 1000,
+ MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK = 1001, /**< Internal power up acknowledgement */
+ MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK = 1002, /**< Internal power down acknowledgment */
+} mali_pmm_event_id;
+
+
+/** @brief Use this when the power up/down callbacks do not need any OS data. */
+#define MALI_PMM_NO_OS_DATA 1
+
+
+/* @brief Geometry and pixel processor identifiers for the PMM
+ *
+ * @note these match the ARM Mali 400 PMU hardware definitions, apart from the "SYSTEM"
+ */
+typedef enum mali_pmm_core_id_tag
+{
+ MALI_PMM_CORE_SYSTEM = 0x00000000, /**< All of the Mali hardware */
+ MALI_PMM_CORE_GP = 0x00000001, /**< Mali GP2 */
+ MALI_PMM_CORE_L2 = 0x00000002, /**< Level 2 cache */
+ MALI_PMM_CORE_PP0 = 0x00000004, /**< Mali 200 pixel processor 0 */
+ MALI_PMM_CORE_PP1 = 0x00000008, /**< Mali 200 pixel processor 1 */
+ MALI_PMM_CORE_PP2 = 0x00000010, /**< Mali 200 pixel processor 2 */
+ MALI_PMM_CORE_PP3 = 0x00000020, /**< Mali 200 pixel processor 3 */
+ MALI_PMM_CORE_PP_ALL = 0x0000003C /**< Mali 200 pixel processors 0-3 */
+} mali_pmm_core_id;
+
+/* @brief PMM bitmask of mali_pmm_core_ids
+ */
+typedef u32 mali_pmm_core_mask;
+
+/* @brief PMM event timestamp type
+ */
+typedef u32 mali_pmm_timestamp;
+
+/** @brief power management event message struct
+ */
+typedef struct _mali_pmm_message
+{
+ mali_pmm_event_id id; /**< event id */
+ mali_pmm_message_data data; /**< specific data associated with the event */
+ mali_pmm_timestamp ts; /**< timestamp the event was placed in the event queue */
+} mali_pmm_message_t;
+
+
+
+/** @brief the state of the power management module.
+ */
+/* These must match up with the pmm_trace_state array */
+typedef enum mali_pmm_state_tag
+{
+ MALI_PMM_STATE_UNAVAILABLE = 0, /**< PMM is not available */
+ MALI_PMM_STATE_SYSTEM_ON = 1, /**< All of the Mali hardware is on */
+ MALI_PMM_STATE_SYSTEM_OFF = 2, /**< All of the Mali hardware is off */
+ MALI_PMM_STATE_SYSTEM_TRANSITION = 3 /**< System is changing state */
+} mali_pmm_state;
+
+
+/** @brief a power management policy.
+ */
+/* These must match up with the pmm_trace_policy array */
+typedef enum mali_pmm_policy_tag
+{
+ MALI_PMM_POLICY_NONE = 0, /**< No policy */
+ MALI_PMM_POLICY_ALWAYS_ON = 1, /**< Always on policy */
+ MALI_PMM_POLICY_JOB_CONTROL = 2, /**< Job control policy */
+ MALI_PMM_POLICY_RUNTIME_JOB_CONTROL = 3 /**< Run time power management control policy */
+} mali_pmm_policy;
+
+/** @brief Function to report to the OS when the power down has finished
+ *
+ * @param data The event message data that initiated the power down
+ */
+void _mali_osk_pmm_power_down_done(mali_pmm_message_data data);
+
+/** @brief Function to report to the OS when the power up has finished
+ *
+ * @param data The event message data that initiated the power up
+ */
+void _mali_osk_pmm_power_up_done(mali_pmm_message_data data);
+
+/** @brief Function to report that DVFS operation done
+ *
+ * @param data The event message data
+ */
+void _mali_osk_pmm_dvfs_operation_done(mali_pmm_message_data data);
+
+#if MALI_POWER_MGMT_TEST_SUITE
+/** @brief Function to notify power management events
+ *
+ * @param data The event message data
+ */
+void _mali_osk_pmm_policy_events_notifications(mali_pmm_event_id event_id);
+
+#endif
+
+/** @brief Function to report the OS that device is idle
+ *
+ * @note inform the OS that device is idle
+ */
+_mali_osk_errcode_t _mali_osk_pmm_dev_idle( void );
+
+/** @brief Function to report the OS to activate device
+ *
+ * @note inform the os that device needs to be activated
+ */
+void _mali_osk_pmm_dev_activate( void );
+
+/** @brief Queries the current state of the PMM software
+ *
+ * @note the state of the PMM can change after this call has returned
+ *
+ * @return the current PMM state value
+ */
+mali_pmm_state _mali_pmm_state( void );
+
+/** @brief List of cores that are registered with the PMM
+ *
+ * This will return the cores that have been currently registered with the PMM,
+ * which is a bitwise OR of the mali_pmm_core_id_tags. A value of 0x0 means that
+ * there are no cores registered.
+ *
+ * @note the list of cores can change after this call has returned
+ *
+ * @return a bit mask representing all the cores that have been registered with the PMM
+ */
+mali_pmm_core_mask _mali_pmm_cores_list( void );
+
+/** @brief List of cores that are powered up in the PMM
+ *
+ * This will return the subset of the cores that can be listed using mali_pmm_cores_
+ * list, that have power. It is a bitwise OR of the mali_pmm_core_id_tags. A value of
+ * 0x0 means that none of the cores registered are powered.
+ *
+ * @note the list of cores can change after this call has returned
+ *
+ * @return a bit mask representing all the cores that are powered up
+ */
+mali_pmm_core_mask _mali_pmm_cores_powered( void );
+
+
+/** @brief List of power management policies that are supported by the PMM
+ *
+ * Given an empty array of policies - policy_list - which contains the number
+ * of entries as specified by - policy_list_size, this function will populate
+ * the list with the available policies. If the policy_list is too small for
+ * all the policies then only policy_list_size entries will be returned. If the
+ * policy_list is bigger than the number of available policies then, the extra
+ * entries will be set to MALI_PMM_POLICY_NONE.
+ * The function will also update available_policies with the number of policies
+ * that are available, even if it exceeds the policy_list_size.
+ * The function will succeed if all policies could be returned, else it will
+ * fail if none or only a subset of policies could be returned.
+ * The function will also fail if no policy_list is supplied, though
+ * available_policies is optional.
+ *
+ * @note this is a STUB function and is not yet implemented
+ *
+ * @param policy_list_size is the number of policies that can be returned in
+ * the policy_list argument
+ * @param policy_list is an array of policies that should be populated with
+ * the list of policies that are supported by the PMM
+ * @param policies_available optional argument, if non-NULL will be set to the
+ * number of policies available
+ * @return _MALI_OSK_ERR_OK if the policies could be listed, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t _mali_pmm_list_policies(
+ u32 policy_list_size,
+ mali_pmm_policy *policy_list,
+ u32 *policies_available );
+
+/** @brief Set the power management policy in the PMM
+ *
+ * Given a valid supported policy, this function will change the PMM to use
+ * this new policy
+ * The function will fail if the policy given is invalid or unsupported.
+ *
+ * @note this is a STUB function and is not yet implemented
+ *
+ * @param policy the new policy to be set
+ * @return _MALI_OSK_ERR_OK if the policy could be set, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t _mali_pmm_set_policy( mali_pmm_policy policy );
+
+/** @brief Get the current power management policy in the PMM
+ *
+ * Given a pointer to a policy data type, this function will return the current
+ * policy that is in effect for the PMM. This maybe out of date if there is a
+ * pending set policy call that has not been serviced.
+ * The function will fail if the policy given is NULL.
+ *
+ * @note the policy of the PMM can change after this call has returned
+ *
+ * @param policy a pointer to a policy that can be updated to the current
+ * policy
+ * @return _MALI_OSK_ERR_OK if the policy could be returned, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t _mali_pmm_get_policy( mali_pmm_policy *policy );
+
+#if MALI_PMM_TRACE
+
+/** @brief Indicates when a hardware state change occurs in the PMM
+ *
+ * @param old a mask of the cores indicating the previous state of the cores
+ * @param newstate a mask of the cores indicating the new current state of the cores
+ */
+void _mali_pmm_trace_hardware_change( mali_pmm_core_mask old, mali_pmm_core_mask newstate );
+
+/** @brief Indicates when a state change occurs in the PMM
+ *
+ * @param old the previous state for the PMM
+ * @param newstate the new current state of the PMM
+ */
+void _mali_pmm_trace_state_change( mali_pmm_state old, mali_pmm_state newstate );
+
+/** @brief Indicates when a policy change occurs in the PMM
+ *
+ * @param old the previous policy for the PMM
+ * @param newpolicy the new current policy of the PMM
+ */
+void _mali_pmm_trace_policy_change( mali_pmm_policy old, mali_pmm_policy newpolicy );
+
+/** @brief Records when an event message is read by the event system
+ *
+ * @param event the message details
+ * @param received MALI_TRUE when the message is received by the PMM, else it is being sent
+ */
+void _mali_pmm_trace_event_message( mali_pmm_message_t *event, mali_bool received );
+
+#endif /* MALI_PMM_TRACE */
+
+/** @brief Dumps the current state of OS PMM thread
+ */
+#if MALI_STATE_TRACKING
+void mali_pmm_dump_os_thread_state( void );
+#endif /* MALI_STATE_TRACKING */
+
+#if (defined(DEBUG) || MALI_STATE_TRACKING)
+/** @brief Dumps the current state of the PMM
+ */
+void malipmm_state_dump( void );
+#endif
+
+/** @} */ /* end group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.c
new file mode 100644
index 00000000000..327e8b4c853
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy.c
+ * Implementation of the common routines for power management module
+ * policies
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+
+#include "mali_pmm_policy_alwayson.h"
+#include "mali_pmm_policy_jobcontrol.h"
+
+/* Call back function for timer expiration */
+static void pmm_policy_timer_callback( void *arg );
+
+_mali_osk_errcode_t pmm_policy_timer_init( _pmm_policy_timer_t *pptimer, u32 timeout, mali_pmm_event_id id )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ /* All values get 0 as default */
+ _mali_osk_memset(pptimer, 0, sizeof(*pptimer));
+
+ pptimer->timer = _mali_osk_timer_init();
+ if( pptimer->timer )
+ {
+ _mali_osk_timer_setcallback( pptimer->timer, pmm_policy_timer_callback, (void *)pptimer );
+ pptimer->timeout = timeout;
+ pptimer->event_id = id;
+ MALI_SUCCESS;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void pmm_policy_timer_callback( void *arg )
+{
+ _pmm_policy_timer_t *pptimer = (_pmm_policy_timer_t *)arg;
+
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+ MALI_DEBUG_ASSERT( pptimer->set );
+
+ /* Set timer expired and flag there is a policy to check */
+ pptimer->expired = MALI_TRUE;
+ malipmm_set_policy_check();
+}
+
+
+void pmm_policy_timer_term( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ _mali_osk_timer_del( pptimer->timer );
+ _mali_osk_timer_term( pptimer->timer );
+ pptimer->timer = NULL;
+}
+
+mali_bool pmm_policy_timer_start( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+ MALI_DEBUG_ASSERT_POINTER(pptimer->timer);
+
+ if( !(pptimer->set) )
+ {
+ pptimer->set = MALI_TRUE;
+ pptimer->expired = MALI_FALSE;
+ pptimer->start = _mali_osk_time_tickcount();
+ _mali_osk_timer_add( pptimer->timer, pptimer->timeout );
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+mali_bool pmm_policy_timer_stop( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+ MALI_DEBUG_ASSERT_POINTER(pptimer->timer);
+
+ if( pptimer->set )
+ {
+ _mali_osk_timer_del( pptimer->timer );
+ pptimer->set = MALI_FALSE;
+ pptimer->expired = MALI_FALSE;
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+mali_bool pmm_policy_timer_raise_event( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ if( pptimer->expired )
+ {
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_TIMEOUT, /* Assume timeout id, but set it below */
+ 0 };
+
+ event.id = pptimer->event_id;
+ event.data = (mali_pmm_message_data)pptimer->start;
+
+ /* Don't need to do any other notification with this timer */
+ pptimer->expired = MALI_FALSE;
+ /* Unset timer so it is free to be set again */
+ pptimer->set = MALI_FALSE;
+
+ _mali_ukk_pmm_event_message( &event );
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+mali_bool pmm_policy_timer_valid( u32 timer_start, u32 other_start )
+{
+ return (_mali_osk_time_after( other_start, timer_start ) == 0);
+}
+
+
+_mali_osk_errcode_t pmm_policy_init(_mali_pmm_internal_state_t *pmm)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_ALWAYS_ON:
+ {
+ err = pmm_policy_init_always_on();
+ }
+ break;
+
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ err = pmm_policy_init_job_control(pmm);
+ }
+ break;
+
+ case MALI_PMM_POLICY_NONE:
+ default:
+ err = _MALI_OSK_ERR_FAULT;
+ }
+
+ return err;
+}
+
+void pmm_policy_term(_mali_pmm_internal_state_t *pmm)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_ALWAYS_ON:
+ {
+ pmm_policy_term_always_on();
+ }
+ break;
+
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ pmm_policy_term_job_control();
+ }
+ break;
+
+ case MALI_PMM_POLICY_NONE:
+ default:
+ MALI_PRINT_ERROR( ("PMM: Invalid policy terminated %d\n", pmm->policy) );
+ }
+}
+
+
+_mali_osk_errcode_t pmm_policy_process(_mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_ALWAYS_ON:
+ {
+ err = pmm_policy_process_always_on( pmm, event );
+ }
+ break;
+
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ err = pmm_policy_process_job_control( pmm, event );
+ }
+ break;
+
+ case MALI_PMM_POLICY_NONE:
+ default:
+ err = _MALI_OSK_ERR_FAULT;
+ }
+
+ return err;
+}
+
+
+void pmm_policy_check_policy( _mali_pmm_internal_state_t *pmm )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ pmm_policy_check_job_control();
+ }
+ break;
+
+ default:
+ /* Nothing needs to be done */
+ break;
+ }
+}
+
+
+#endif /* USING_MALI_PMM */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.h
new file mode 100644
index 00000000000..83cb7f29a92
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy.h
+ * Defines the power management module policies
+ */
+
+#ifndef __MALI_PMM_POLICY_H__
+#define __MALI_PMM_POLICY_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi Power Management Module APIs
+ *
+ * @{
+ *
+ * @defgroup pmmapi_policy Power Management Module Policies
+ *
+ * @{
+ */
+
+/** @brief Generic timer for use with policies
+ */
+typedef struct _pmm_policy_timer
+{
+ u32 timeout; /**< Timeout for this timer in ticks */
+ mali_pmm_event_id event_id; /**< Event id that will be raised when timer expires */
+ _mali_osk_timer_t *timer; /**< Timer */
+ mali_bool set; /**< Timer set */
+ mali_bool expired; /**< Timer expired - event needs to be raised */
+ u32 start; /**< Timer start ticks */
+} _pmm_policy_timer_t;
+
+/** @brief Policy timer initialization
+ *
+ * This will create a timer for use in policies, but won't start it
+ *
+ * @param pptimer An empty timer structure to be initialized
+ * @param timeout Timeout in ticks for the timer
+ * @param id Event id that will be raised on timeout
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_timer_init( _pmm_policy_timer_t *pptimer, u32 timeout, mali_pmm_event_id id );
+
+/** @brief Policy timer termination
+ *
+ * This will clean up a timer that was previously used in policies, it
+ * will also stop it if started
+ *
+ * @param pptimer An initialized timer structure to be terminated
+ */
+void pmm_policy_timer_term( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer start
+ *
+ * This will start a previously created timer for use in policies
+ * When the timer expires after the initialized timeout it will raise
+ * a PMM event of the event id given on initialization
+ * As data for the event it will pass the start time of the timer
+ *
+ * @param pptimer A previously initialized policy timer
+ * @return MALI_TRUE if the timer was started, MALI_FALSE if it is already started
+ */
+mali_bool pmm_policy_timer_start( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer stop
+ *
+ * This will stop a previously created timer for use in policies
+ *
+ * @param pptimer A previously started policy timer
+ * @return MALI_TRUE if the timer was stopped, MALI_FALSE if it is already stopped
+ */
+mali_bool pmm_policy_timer_stop( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer stop
+ *
+ * This raise an event for an expired timer
+ *
+ * @param pptimer An expired policy timer
+ * @return MALI_TRUE if an event was raised, else MALI_FALSE
+ */
+mali_bool pmm_policy_timer_raise_event( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer valid checker
+ *
+ * This will check that a timer was started after a given time
+ *
+ * @param timer_start Time the timer was started
+ * @param other_start Time when another event or action occurred
+ * @return MALI_TRUE if the timer was started after the other time, else MALI_FALSE
+ */
+mali_bool pmm_policy_timer_valid( u32 timer_start, u32 other_start );
+
+
+/** @brief Common policy initialization
+ *
+ * This will initialize the current policy
+ *
+ * @note Any previously initialized policy should be terminated first
+ *
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_init( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Common policy termination
+ *
+ * This will terminate the current policy.
+ * @note This can be called when a policy has not been initialized
+ */
+void pmm_policy_term( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Common policy state changer
+ *
+ * Given the next available event message, this routine passes it to
+ * the current policy for processing
+ *
+ * @param pmm internal PMM state
+ * @param event PMM event to process
+ * @return _MALI_OSK_ERR_OK if the policy state completed okay, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_process( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+
+/** @brief Common policy checker
+ *
+ * If a policy timer fires then this function will be called to
+ * allow the policy to take the correct action
+ *
+ * @param pmm internal PMM state
+ */
+void pmm_policy_check_policy( _mali_pmm_internal_state_t *pmm );
+
+/** @} */ /* End group pmmapi_policy */
+/** @} */ /* End group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_POLICY_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.c
new file mode 100644
index 00000000000..643bb04553b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy_alwayson.c
+ * Implementation of the power management module policy - always on
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+#include "mali_pmm_policy_alwayson.h"
+
+_mali_osk_errcode_t pmm_policy_init_always_on(void)
+{
+ /* Nothing to set up */
+ MALI_SUCCESS;
+}
+
+void pmm_policy_term_always_on(void)
+{
+ /* Nothing to tear down */
+}
+
+_mali_osk_errcode_t pmm_policy_process_always_on( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ /* We aren't going to do anything, but signal so we don't block the OS
+ * NOTE: This may adversely affect any jobs Mali is currently running
+ */
+ _mali_osk_pmm_power_down_done( event->data );
+ break;
+
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+ /* Not expected in this policy */
+ MALI_DEBUG_ASSERT( MALI_FALSE );
+ break;
+
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ /* Nothing to do */
+ _mali_osk_pmm_power_up_done( event->data );
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ /* Nothing to do - we are always on */
+ break;
+
+ case MALI_PMM_EVENT_TIMEOUT:
+ /* Not expected in this policy */
+ MALI_DEBUG_ASSERT( MALI_FALSE );
+ break;
+
+ default:
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+
+ MALI_SUCCESS;
+}
+
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.h
new file mode 100644
index 00000000000..a158b09f610
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy_alwayson.h
+ * Defines the power management module policy for always on
+ */
+
+#ifndef __MALI_PMM_POLICY_ALWAYSON_H__
+#define __MALI_PMM_POLICY_ALWAYSON_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi_policy Power Management Module Policies
+ *
+ * @{
+ */
+
+/** @brief Always on policy initialization
+ *
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_init_always_on(void);
+
+/** @brief Always on policy termination
+ */
+void pmm_policy_term_always_on(void);
+
+/** @brief Always on policy state changer
+ *
+ * Given the next available event message, this routine processes it
+ * for the policy and changes state as needed.
+ *
+ * Always on policy will ignore all events and keep the Mali cores on
+ * all the time
+ *
+ * @param pmm internal PMM state
+ * @param event PMM event to process
+ * @return _MALI_OSK_ERR_OK if the policy state completed okay, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_process_always_on( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+/** @} */ /* End group pmmapi_policies */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_POLICY_ALWAYSON_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.c
new file mode 100644
index 00000000000..8450bd722f4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy_jobcontrol.c
+ * Implementation of the power management module policy - job control
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_platform.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+#include "mali_pmm_policy_jobcontrol.h"
+
+typedef struct _pmm_policy_data_job_control
+{
+ _pmm_policy_timer_t latency; /**< Latency timeout timer for all cores */
+ u32 core_active_start; /**< Last time a core was set to active */
+ u32 timeout; /**< Timeout in ticks for latency timer */
+} _pmm_policy_data_job_control_t;
+
+
+/* @ brief Local data for this policy
+ */
+static _pmm_policy_data_job_control_t *data_job_control = NULL;
+
+/* @brief Set up the timeout if it hasn't already been set and if there are active cores */
+static void job_control_timeout_setup( _mali_pmm_internal_state_t *pmm, _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ /* Do we have an inactivity time out and some powered cores? */
+ if( pptimer->timeout > 0 && pmm->cores_powered != 0 )
+ {
+ /* Is the system idle and all the powered cores are idle? */
+ if( pmm->status == MALI_PMM_STATUS_IDLE && pmm->cores_idle == pmm->cores_powered )
+ {
+ if( pmm_policy_timer_start(pptimer) )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM policy - Job control: Setting in-activity latency timer\n") );
+ }
+ }
+ else
+ {
+ /* We are not idle so there is no need for an inactivity timer
+ */
+ if( pmm_policy_timer_stop(pptimer) )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM policy - Job control: Removing in-activity latency timer\n") );
+ }
+ }
+ }
+}
+
+/* @brief Check the validity of the timeout - and if there is one set */
+static mali_bool job_control_timeout_valid( _mali_pmm_internal_state_t *pmm, _pmm_policy_timer_t *pptimer, u32 timer_start )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ /* Not a valid timer! */
+ if( pptimer->timeout == 0 ) return MALI_FALSE;
+
+ /* Are some cores powered and are they all idle? */
+ if( (pmm->cores_powered != 0) && (pmm->cores_idle == pmm->cores_powered) )
+ {
+ /* Has latency timeout started after the last core was active? */
+ if( pmm_policy_timer_valid( timer_start, data_job_control->core_active_start ) )
+ {
+ return MALI_TRUE;
+ }
+ else
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: In-activity latency time out ignored - out of date\n") );
+ }
+ }
+ else
+ {
+ if( pmm->cores_powered == 0 )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: In-activity latency time out ignored - cores already off\n") );
+ }
+ else
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: In-activity latency time out ignored - cores active\n") );
+ }
+ }
+
+ return MALI_FALSE;
+}
+
+_mali_osk_errcode_t pmm_policy_init_job_control( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_POINTER( pmm );
+ MALI_DEBUG_ASSERT( data_job_control == NULL );
+
+ data_job_control = (_pmm_policy_data_job_control_t *) _mali_osk_malloc(sizeof(*data_job_control));
+ MALI_CHECK_NON_NULL( data_job_control, _MALI_OSK_ERR_NOMEM );
+
+ data_job_control->core_active_start = _mali_osk_time_tickcount();
+ data_job_control->timeout = MALI_PMM_POLICY_JOBCONTROL_INACTIVITY_TIMEOUT;
+
+ err = pmm_policy_timer_init( &data_job_control->latency, data_job_control->timeout, MALI_PMM_EVENT_TIMEOUT );
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ _mali_osk_free( data_job_control );
+ data_job_control = NULL;
+ return err;
+ }
+
+ /* Start the latency timeout */
+ job_control_timeout_setup( pmm, &data_job_control->latency );
+
+ MALI_SUCCESS;
+}
+
+void pmm_policy_term_job_control(void)
+{
+ if( data_job_control != NULL )
+ {
+ pmm_policy_timer_term( &data_job_control->latency );
+ _mali_osk_free( data_job_control );
+ data_job_control = NULL;
+ }
+}
+
+static void pmm_policy_job_control_job_queued( _mali_pmm_internal_state_t *pmm )
+{
+ mali_pmm_core_mask cores;
+ mali_pmm_core_mask cores_subset;
+
+ /* Make sure that all cores are powered in this
+ * simple policy
+ */
+ cores = pmm->cores_registered;
+ cores_subset = pmm_cores_to_power_up( pmm, cores );
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering up */
+ if( !pmm_invoke_power_up( pmm ) )
+ {
+ /* Need to wait until finished */
+ pmm->status = MALI_PMM_STATUS_POLICY_POWER_UP;
+ }
+ }
+}
+
+_mali_osk_errcode_t pmm_policy_process_job_control( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event )
+{
+ mali_pmm_core_mask cores;
+ mali_pmm_core_mask cores_subset;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+ MALI_DEBUG_ASSERT_POINTER(data_job_control);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: Job control policy process start - status=%d\n", pmm->status) );
+
+ /* Mainly the data is the cores */
+ cores = pmm_cores_from_event_data( pmm, event );
+
+#if MALI_STATE_TRACKING
+ pmm->mali_last_pmm_status = pmm->status;
+#endif /* MALI_STATE_TRACKING */
+
+ switch( pmm->status )
+ {
+ /**************** IDLE ****************/
+ case MALI_PMM_STATUS_IDLE:
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ /* Not expected in this state */
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+
+ /* Update idle cores to indicate active - remove these! */
+ pmm_cores_set_active( pmm, cores );
+ /* Remember when this happened */
+ data_job_control->core_active_start = event->ts;
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_JOB_SCHEDULED);
+#endif
+
+ /*** FALL THROUGH to QUEUED to check POWER UP ***/
+
+ case MALI_PMM_EVENT_JOB_QUEUED:
+
+ pmm_policy_job_control_job_queued( pmm );
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_JOB_QUEUED);
+#endif
+ break;
+
+ case MALI_PMM_EVENT_DVFS_PAUSE:
+
+ cores_subset = pmm_cores_to_power_down( pmm, cores, MALI_FALSE );
+ if ( cores_subset != 0 )
+ {
+ if ( !pmm_power_down_okay( pmm ) )
+ {
+ pmm->is_dvfs_active = 1;
+ pmm->status = MALI_PMM_STATUS_OS_POWER_DOWN;
+ pmm_save_os_event_data( pmm, event->data );
+ break;
+ }
+ }
+ pmm->status = MALI_PMM_STATUS_DVFS_PAUSE;
+ _mali_osk_pmm_dvfs_operation_done(0);
+ break;
+
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+
+ /* Need to power down all cores even if we need to wait for them */
+ cores_subset = pmm_cores_to_power_down( pmm, cores, MALI_FALSE );
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering down */
+ if( !pmm_invoke_power_down( pmm ) )
+ {
+ /* We need to wait until they are idle */
+
+ pmm->status = MALI_PMM_STATUS_OS_POWER_DOWN;
+ /* Save the OS data to respond later */
+ pmm_save_os_event_data( pmm, event->data );
+ /* Exit this case - as we have to wait */
+ break;
+ }
+ }
+ /* Set waiting status */
+ pmm->status = MALI_PMM_STATUS_OS_WAITING;
+ /* All cores now down - respond to OS power event */
+ _mali_osk_pmm_power_down_done( event->data );
+ break;
+
+ case MALI_PMM_EVENT_JOB_FINISHED:
+
+ /* Update idle cores - add these! */
+ pmm_cores_set_idle( pmm, cores );
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_JOB_FINISHED);
+#endif
+ if( data_job_control->timeout > 0 )
+ {
+ /* Wait for time out to fire */
+ break;
+ }
+ /* For job control policy - turn off all cores */
+ cores = pmm->cores_powered;
+
+ /*** FALL THROUGH to TIMEOUT TEST as NO TIMEOUT ***/
+
+ case MALI_PMM_EVENT_TIMEOUT:
+
+ /* Main job control policy - turn off cores after inactivity */
+ if( job_control_timeout_valid( pmm, &data_job_control->latency, (u32)event->data ) )
+ {
+ /* Valid timeout of inactivity - so find out if we can power down
+ * immedately - if we can't then this means the cores are still in fact
+ * active
+ */
+ cores_subset = pmm_cores_to_power_down( pmm, cores, MALI_TRUE );
+ if( cores_subset != 0 )
+ {
+ /* Check if we can really power down, if not then we are not
+ * really in-active
+ */
+ if( !pmm_invoke_power_down( pmm ) )
+ {
+ pmm_power_down_cancel( pmm );
+ }
+ }
+ /* else there are no cores powered up! */
+ }
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_TIMEOUT);
+#endif
+ break;
+
+ default:
+ /* Unexpected event */
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+ break;
+
+ /******************DVFS PAUSE**************/
+ case MALI_PMM_STATUS_DVFS_PAUSE:
+ switch ( event->id )
+ {
+ case MALI_PMM_EVENT_DVFS_RESUME:
+
+ if ( pmm->cores_powered != 0 )
+ {
+ pmm->cores_ack_down =0;
+ pmm_power_down_cancel( pmm );
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ }
+ else
+ {
+ pmm_policy_job_control_job_queued( pmm );
+ }
+ _mali_osk_pmm_dvfs_operation_done( 0 );
+ break;
+
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ /* Set waiting status */
+ pmm->status = MALI_PMM_STATUS_OS_WAITING;
+ if ( pmm->cores_powered != 0 )
+ {
+ if ( pmm_invoke_power_down( pmm ) )
+ {
+ _mali_osk_pmm_power_down_done( 0 );
+ break;
+ }
+ }
+ _mali_osk_pmm_power_down_done( 0 );
+ break;
+ default:
+ break;
+ }
+ break;
+
+ /**************** POWER UP ****************/
+ case MALI_PMM_STATUS_OS_POWER_UP:
+ case MALI_PMM_STATUS_POLICY_POWER_UP:
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ /* Make sure cores powered off equal what we expect */
+ MALI_DEBUG_ASSERT( cores == pmm->cores_pend_up );
+ pmm_cores_set_up_ack( pmm, cores );
+
+ if( pmm_invoke_power_up( pmm ) )
+ {
+ if( pmm->status == MALI_PMM_STATUS_OS_POWER_UP )
+ {
+ /* Get the OS data and respond to the power up */
+ _mali_osk_pmm_power_up_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ }
+ break;
+
+ default:
+ /* Unexpected event */
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+ break;
+
+ /**************** POWER DOWN ****************/
+ case MALI_PMM_STATUS_OS_POWER_DOWN:
+ case MALI_PMM_STATUS_POLICY_POWER_DOWN:
+ switch( event->id )
+ {
+
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+
+ pmm_cores_set_down_ack( pmm, cores );
+
+ if ( pmm->is_dvfs_active == 1 )
+ {
+ if( pmm_power_down_okay( pmm ) )
+ {
+ pmm->is_dvfs_active = 0;
+ pmm->status = MALI_PMM_STATUS_DVFS_PAUSE;
+ _mali_osk_pmm_dvfs_operation_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ break;
+ }
+
+ /* Now check if we can power down */
+ if( pmm_invoke_power_down( pmm ) )
+ {
+ if( pmm->status == MALI_PMM_STATUS_OS_POWER_DOWN )
+ {
+ /* Get the OS data and respond to the power down */
+ _mali_osk_pmm_power_down_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ pmm->status = MALI_PMM_STATUS_OS_WAITING;
+ }
+ break;
+
+ default:
+ /* Unexpected event */
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+ break;
+
+ case MALI_PMM_STATUS_OS_WAITING:
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ cores_subset = pmm_cores_to_power_up( pmm, cores );
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering up */
+ if( !pmm_invoke_power_up( pmm ) )
+ {
+ /* Need to wait until power up complete */
+ pmm->status = MALI_PMM_STATUS_OS_POWER_UP;
+ /* Save the OS data to respond later */
+ pmm_save_os_event_data( pmm, event->data );
+ /* Exit this case - as we have to wait */
+ break;
+ }
+ }
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ /* All cores now up - respond to OS power up event */
+ _mali_osk_pmm_power_up_done( event->data );
+ break;
+
+ default:
+ /* All other messages are ignored in this state */
+ break;
+ }
+ break;
+
+ default:
+ /* Unexpected state */
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Set in-activity latency timer - if required */
+ job_control_timeout_setup( pmm, &data_job_control->latency );
+
+ /* Update the PMM state */
+ pmm_update_system_state( pmm );
+#if MALI_STATE_TRACKING
+ pmm->mali_new_event_status = event->id;
+#endif /* MALI_STATE_TRACKING */
+
+ MALIPMM_DEBUG_PRINT( ("PMM: Job control policy process end - status=%d and event=%d\n", pmm->status,event->id) );
+
+ MALI_SUCCESS;
+}
+
+void pmm_policy_check_job_control()
+{
+ MALI_DEBUG_ASSERT_POINTER(data_job_control);
+
+ /* Latency timer must have expired raise the event */
+ pmm_policy_timer_raise_event(&data_job_control->latency);
+}
+
+
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.h
new file mode 100644
index 00000000000..455234b80bc
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy.h
+ * Defines the power management module policies
+ */
+
+#ifndef __MALI_PMM_POLICY_JOBCONTROL_H__
+#define __MALI_PMM_POLICY_JOBCONTROL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi_policy Power Management Module Policies
+ *
+ * @{
+ */
+
+/** @brief The jobcontrol policy inactivity latency timeout (in ticks)
+ * before the hardware is switched off
+ *
+ * @note Setting this low whilst tracing or producing debug output can
+ * cause alot of timeouts to fire which can affect the PMM behaviour
+ */
+#define MALI_PMM_POLICY_JOBCONTROL_INACTIVITY_TIMEOUT 50
+
+/** @brief Job control policy initialization
+ *
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_init_job_control(_mali_pmm_internal_state_t *pmm);
+
+/** @brief Job control policy termination
+ */
+void pmm_policy_term_job_control(void);
+
+/** @brief Job control policy state changer
+ *
+ * Given the next available event message, this routine processes it
+ * for the policy and changes state as needed.
+ *
+ * Job control policy depends on events from the Mali cores, and will
+ * power down all cores after an inactivity latency timeout. It will
+ * power the cores back on again when a job is scheduled to run.
+ *
+ * @param pmm internal PMM state
+ * @param event PMM event to process
+ * @return _MALI_OSK_ERR_OK if the policy state completed okay, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_process_job_control( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+/** @brief Job control policy checker
+ *
+ * The latency timer has fired and we need to raise the correct event to
+ * handle it
+ *
+ * @param pmm internal PMM state
+ */
+void pmm_policy_check_job_control(void);
+
+/** @} */ /* End group pmmapi_policy */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_POLICY_JOBCONTROL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.c
new file mode 100644
index 00000000000..a0ac1c7790c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.c
@@ -0,0 +1,718 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_state.c
+ * Implementation of the power management module internal state
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_subsystem.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_system.h"
+
+#include "mali_kernel_core.h"
+#include "mali_platform.h"
+
+#define SIZEOF_CORES_LIST 6
+
+/* NOTE: L2 *MUST* be first on the list so that it
+ * is correctly powered on first and powered off last
+ */
+static mali_pmm_core_id cores_list[] = { MALI_PMM_CORE_L2,
+ MALI_PMM_CORE_GP,
+ MALI_PMM_CORE_PP0,
+ MALI_PMM_CORE_PP1,
+ MALI_PMM_CORE_PP2,
+ MALI_PMM_CORE_PP3 };
+
+
+
+void pmm_update_system_state( _mali_pmm_internal_state_t *pmm )
+{
+ mali_pmm_state state;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ if( pmm->cores_registered == 0 )
+ {
+ state = MALI_PMM_STATE_UNAVAILABLE;
+ }
+ else if( pmm->cores_powered == 0 )
+ {
+ state = MALI_PMM_STATE_SYSTEM_OFF;
+ }
+ else if( pmm->cores_powered == pmm->cores_registered )
+ {
+ state = MALI_PMM_STATE_SYSTEM_ON;
+ }
+ else
+ {
+ /* Some other state where not everything is on or off */
+ state = MALI_PMM_STATE_SYSTEM_TRANSITION;
+ }
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_state_change( pmm->state, state );
+#endif
+ pmm->state = state;
+}
+
+mali_pmm_core_mask pmm_cores_from_event_data( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event )
+{
+ mali_pmm_core_mask cores;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ /* All cores - the system */
+ cores = pmm->cores_registered;
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+ /* Currently the main event data is only the cores
+ * for these messages
+ */
+ cores = (mali_pmm_core_mask)event->data;
+ if( cores == MALI_PMM_CORE_SYSTEM )
+ {
+ cores = pmm->cores_registered;
+ }
+ else if( cores == MALI_PMM_CORE_PP_ALL )
+ {
+ /* Get the subset of registered PP cores */
+ cores = (pmm->cores_registered & MALI_PMM_CORE_PP_ALL);
+ }
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+ break;
+
+ default:
+ /* Assume timeout messages - report cores still powered */
+ cores = pmm->cores_powered;
+ break;
+ }
+
+ return cores;
+}
+
+mali_pmm_core_mask pmm_cores_to_power_up( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ mali_pmm_core_mask cores_subset;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check that cores aren't pending power down when asked for power up */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_down == 0 );
+
+ cores_subset = (~(pmm->cores_powered) & cores);
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering up */
+ pmm->cores_pend_up = cores_subset;
+ }
+
+ return cores_subset;
+}
+
+mali_pmm_core_mask pmm_cores_to_power_down( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores, mali_bool immediate_only )
+{
+ mali_pmm_core_mask cores_subset;
+ _mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check that cores aren't pending power up when asked for power down */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_up == 0 );
+
+ cores_subset = (pmm->cores_powered & cores);
+ if( cores_subset != 0 )
+ {
+ int n;
+ volatile mali_pmm_core_mask *ppowered = &(pmm->cores_powered);
+
+ /* There are some cores that need powering up, but we may
+ * need to wait until they are idle
+ */
+ for( n = SIZEOF_CORES_LIST-1; n >= 0; n-- )
+ {
+ if( (cores_list[n] & cores_subset) != 0 )
+ {
+ /* Core is to be powered down */
+ pmm->cores_pend_down |= cores_list[n];
+
+ /* Can't hold the power lock, when acessing subsystem mutex via
+ * the core power call.
+ * Due to terminatation of driver requiring a subsystem mutex
+ * and then power lock held to unregister a core.
+ * This does mean that the following function could fail
+ * as the core is unregistered before we tell it to power
+ * down, but it does not matter as we are terminating
+ */
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ /* Signal the core to power down
+ * If it is busy (not idle) it will set a pending power down flag
+ * (as long as we don't want to only immediately power down).
+ * If it isn't busy it will move out of the idle queue right
+ * away
+ */
+ err = mali_core_signal_power_down( cores_list[n], immediate_only );
+ MALI_PMM_LOCK(pmm);
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ /* Re-read cores_subset in case it has changed */
+ cores_subset = (*ppowered & cores);
+
+ if( err == _MALI_OSK_ERR_OK )
+ {
+ /* We moved an idle core to the power down queue
+ * which means it is now acknowledged (if it is still
+ * registered)
+ */
+ pmm->cores_ack_down |= (cores_list[n] & cores_subset);
+ }
+ else
+ {
+ MALI_DEBUG_ASSERT( err == _MALI_OSK_ERR_BUSY ||
+ (err == _MALI_OSK_ERR_FAULT &&
+ (*ppowered & cores_list[n]) == 0) );
+ /* If we didn't move a core - it must be active, so
+ * leave it pending, so we get an acknowledgement (when
+ * not in immediate only mode)
+ * Alternatively we are shutting down and the core has
+ * been unregistered
+ */
+ }
+ }
+ }
+ }
+
+ return cores_subset;
+}
+
+void pmm_power_down_cancel( _mali_pmm_internal_state_t *pmm )
+{
+ int n;
+ mali_pmm_core_mask pd, ad;
+ _mali_osk_errcode_t err;
+ volatile mali_pmm_core_mask *pregistered;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: Cancelling power down\n") );
+
+ pd = pmm->cores_pend_down;
+ ad = pmm->cores_ack_down;
+ /* Clear the pending cores so that they don't move to the off
+ * queue if they haven't already
+ */
+ pmm->cores_pend_down = 0;
+ pmm->cores_ack_down = 0;
+ pregistered = &(pmm->cores_registered);
+
+ /* Power up all the pending power down cores - just so
+ * we make sure the system is in a known state, as a
+ * pending core might have sent an acknowledged message
+ * which hasn't been read yet.
+ */
+ for( n = 0; n < SIZEOF_CORES_LIST; n++ )
+ {
+ if( (cores_list[n] & pd) != 0 )
+ {
+ /* Can't hold the power lock, when acessing subsystem mutex via
+ * the core power call.
+ * Due to terminatation of driver requiring a subsystem mutex
+ * and then power lock held to unregister a core.
+ * This does mean that the following power up function could fail
+ * as the core is unregistered before we tell it to power
+ * up, but it does not matter as we are terminating
+ */
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ /* As we are cancelling - only move the cores back to the queue -
+ * no reset needed
+ */
+ err = mali_core_signal_power_up( cores_list[n], MALI_TRUE );
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ /* Update pending list with the current registered cores */
+ pd &= (*pregistered);
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_BUSY &&
+ ((cores_list[n] & ad) == 0)) ||
+ (err == _MALI_OSK_ERR_FAULT &&
+ (*pregistered & cores_list[n]) == 0) );
+ /* If we didn't power up a core - it must be active and
+ * hasn't actually tried to power down - this is expected
+ * for cores that haven't acknowledged
+ * Alternatively we are shutting down and the core has
+ * been unregistered
+ */
+ }
+ }
+ }
+ /* Only used in debug builds */
+ MALI_IGNORE(ad);
+}
+
+
+mali_bool pmm_power_down_okay( _mali_pmm_internal_state_t *pmm )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return ( pmm->cores_pend_down == pmm->cores_ack_down ? MALI_TRUE : MALI_FALSE );
+}
+
+mali_bool pmm_invoke_power_down( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ /* Check that cores are pending power down during power down invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_down != 0 );
+ /* Check that cores are not pending power up during power down invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_up == 0 );
+
+ if( !pmm_power_down_okay( pmm ) )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: Waiting for cores to go idle for power off - 0x%08x / 0x%08x\n",
+ pmm->cores_pend_down, pmm->cores_ack_down) );
+ return MALI_FALSE;
+ }
+ else
+ {
+#if !MALI_PMM_NO_PMU
+ err = mali_platform_powerdown( pmm->cores_pend_down );
+#else
+ err = _MALI_OSK_ERR_OK;
+#endif
+
+ if( err == _MALI_OSK_ERR_OK )
+ {
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+ /* Remove powered down cores from idle and powered list */
+ pmm->cores_powered &= ~(pmm->cores_pend_down);
+ pmm->cores_idle &= ~(pmm->cores_pend_down);
+ /* Reset pending/acknowledged status */
+ pmm->cores_pend_down = 0;
+ pmm->cores_ack_down = 0;
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ }
+ else
+ {
+ MALI_PRINT_ERROR( ("PMM: Failed to get PMU to power down cores - (0x%x) %s",
+ pmm->cores_pend_down, pmm_trace_get_core_name(pmm->cores_pend_down)) );
+ pmm->fatal_power_err = MALI_TRUE;
+ }
+ }
+
+ return MALI_TRUE;
+}
+
+
+mali_bool pmm_power_up_okay( _mali_pmm_internal_state_t *pmm )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return ( pmm->cores_pend_up == pmm->cores_ack_up ? MALI_TRUE : MALI_FALSE );
+}
+
+
+mali_bool pmm_invoke_power_up( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ /* Check that cores are pending power up during power up invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_up != 0 );
+ /* Check that cores are not pending power down during power up invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_down == 0 );
+
+ if( pmm_power_up_okay( pmm ) )
+ {
+ /* Power up has completed - sort out subsystem core status */
+
+ int n;
+ /* Use volatile to access, so that it is updated if any cores are unregistered */
+ volatile mali_pmm_core_mask *ppendup = &(pmm->cores_pend_up);
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+ /* Move cores into idle queues */
+ for( n = 0; n < SIZEOF_CORES_LIST; n++ )
+ {
+ if( (cores_list[n] & (*ppendup)) != 0 )
+ {
+ /* Can't hold the power lock, when acessing subsystem mutex via
+ * the core power call.
+ * Due to terminatation of driver requiring a subsystem mutex
+ * and then power lock held to unregister a core.
+ * This does mean that the following function could fail
+ * as the core is unregistered before we tell it to power
+ * up, but it does not matter as we are terminating
+ */
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ err = mali_core_signal_power_up( cores_list[n], MALI_FALSE );
+ MALI_PMM_LOCK(pmm);
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_FAULT &&
+ (*ppendup & cores_list[n]) == 0) );
+ /* We only expect this to fail when we are shutting down
+ * and the core has been unregistered
+ */
+ }
+ }
+ }
+ /* Finished power up - add cores to idle and powered list */
+ pmm->cores_powered |= (*ppendup);
+ pmm->cores_idle |= (*ppendup);
+ /* Reset pending/acknowledge status */
+ pmm->cores_pend_up = 0;
+ pmm->cores_ack_up = 0;
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ return MALI_TRUE;
+ }
+ else
+ {
+#if !MALI_PMM_NO_PMU
+ /* Power up must now be done */
+ err = mali_platform_powerup( pmm->cores_pend_up );
+#else
+ err = _MALI_OSK_ERR_OK;
+#endif
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_PRINT_ERROR( ("PMM: Failed to get PMU to power up cores - (0x%x) %s",
+ pmm->cores_pend_up, pmm_trace_get_core_name(pmm->cores_pend_up)) );
+ pmm->fatal_power_err = MALI_TRUE;
+ }
+ else
+ {
+ /* TBD - Update core status immediately rather than use event message */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK,
+ 0 };
+ /* All the cores that were pending power up, have now completed power up */
+ event.data = pmm->cores_pend_up;
+ _mali_ukk_pmm_event_message( &event );
+ MALIPMM_DEBUG_PRINT( ("PMM: Sending ACK to power up") );
+ }
+ }
+
+ /* Always return false, as we need an interrupt to acknowledge
+ * when power up is complete
+ */
+ return MALI_FALSE;
+}
+
+mali_pmm_core_mask pmm_cores_set_active( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ pmm->cores_idle &= (~cores);
+ return pmm->cores_idle;
+}
+
+mali_pmm_core_mask pmm_cores_set_idle( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ pmm->cores_idle |= (cores);
+ return pmm->cores_idle;
+}
+
+mali_pmm_core_mask pmm_cores_set_down_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check core is not pending a power down */
+ MALI_DEBUG_ASSERT( (pmm->cores_pend_down & cores) != 0 );
+ /* Check core has not acknowledged power down more than once */
+ MALI_DEBUG_ASSERT( (pmm->cores_ack_down & cores) == 0 );
+
+ pmm->cores_ack_down |= (cores);
+
+ return pmm->cores_ack_down;
+}
+
+void pmm_fatal_reset( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_osk_notification_t *msg = NULL;
+ mali_pmm_status status;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALIPMM_DEBUG_PRINT( ("PMM: Fatal Reset called") );
+
+ MALI_DEBUG_ASSERT( pmm->status != MALI_PMM_STATUS_OFF );
+
+ /* Reset the common status */
+ pmm->waiting = 0;
+ pmm->missed = 0;
+ pmm->fatal_power_err = MALI_FALSE;
+ pmm->no_events = 0;
+ pmm->check_policy = MALI_FALSE;
+ pmm->cores_pend_down = 0;
+ pmm->cores_pend_up = 0;
+ pmm->cores_ack_down = 0;
+ pmm->cores_ack_up = 0;
+ pmm->is_dvfs_active = 0;
+#if MALI_PMM_TRACE
+ pmm->messages_sent = 0;
+ pmm->messages_received = 0;
+ pmm->imessages_sent = 0;
+ pmm->imessages_received = 0;
+ MALI_PRINT( ("PMM Trace: *** Fatal reset occurred ***") );
+#endif
+
+ /* Set that we are unavailable whilst resetting */
+ pmm->state = MALI_PMM_STATE_UNAVAILABLE;
+ status = pmm->status;
+ pmm->status = MALI_PMM_STATUS_OFF;
+
+ /* We want all cores powered */
+ pmm->cores_powered = pmm->cores_registered;
+ /* The cores may not be idle, but this state will be rectified later */
+ pmm->cores_idle = pmm->cores_registered;
+
+ /* So power on any cores that are registered */
+ if( pmm->cores_registered != 0 )
+ {
+ int n;
+ volatile mali_pmm_core_mask *pregistered = &(pmm->cores_registered);
+#if !MALI_PMM_NO_PMU
+ err = mali_platform_powerup( pmm->cores_registered );
+#endif
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ /* This is very bad as we can't even be certain the cores are now
+ * powered up
+ */
+ MALI_PRINT_ERROR( ("PMM: Failed to perform PMM reset!\n") );
+ /* TBD driver exit? */
+ }
+
+ for( n = SIZEOF_CORES_LIST-1; n >= 0; n-- )
+ {
+ if( (cores_list[n] & (*pregistered)) != 0 )
+ {
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ /* Core is now active - so try putting it in the idle queue */
+ err = mali_core_signal_power_up( cores_list[n], MALI_FALSE );
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ /* We either succeeded, or we were not off anyway, or we have
+ * just be deregistered
+ */
+ MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_OK) ||
+ (err == _MALI_OSK_ERR_BUSY) ||
+ (err == _MALI_OSK_ERR_FAULT &&
+ (*pregistered & cores_list[n]) == 0) );
+ }
+ }
+ }
+
+ /* Unblock any pending OS event */
+ if( status == MALI_PMM_STATUS_OS_POWER_UP )
+ {
+ /* Get the OS data and respond to the power up */
+ _mali_osk_pmm_power_up_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ if( status == MALI_PMM_STATUS_OS_POWER_DOWN )
+ {
+ /* Get the OS data and respond to the power down
+ * NOTE: We are not powered down at this point due to power problems,
+ * so we are lying to the system, but something bad has already
+ * happened and we are trying unstick things
+ * TBD - Add busy loop to power down cores?
+ */
+ _mali_osk_pmm_power_down_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+
+ /* Purge the event queues */
+ do
+ {
+ if( _mali_osk_notification_queue_dequeue( pmm->iqueue, &msg ) == _MALI_OSK_ERR_OK )
+ {
+ _mali_osk_notification_delete ( msg );
+ break;
+ }
+ } while (MALI_TRUE);
+
+ do
+ {
+ if( _mali_osk_notification_queue_dequeue( pmm->queue, &msg ) == _MALI_OSK_ERR_OK )
+ {
+ _mali_osk_notification_delete ( msg );
+ break;
+ }
+ } while (MALI_TRUE);
+
+ /* Return status/state to normal */
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ pmm_update_system_state(pmm);
+}
+
+mali_pmm_core_mask pmm_cores_set_up_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check core is not pending a power up */
+ MALI_DEBUG_ASSERT( (pmm->cores_pend_up & cores) != 0 );
+ /* Check core has not acknowledged power up more than once */
+ MALI_DEBUG_ASSERT( (pmm->cores_ack_up & cores) == 0 );
+
+ pmm->cores_ack_up |= (cores);
+
+ return pmm->cores_ack_up;
+}
+
+void pmm_save_os_event_data(_mali_pmm_internal_state_t *pmm, mali_pmm_message_data data)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ /* Check that there is no saved data */
+ MALI_DEBUG_ASSERT( pmm->os_data == 0 );
+ /* Can't store zero data - as retrieve check will fail */
+ MALI_DEBUG_ASSERT( data != 0 );
+
+ pmm->os_data = data;
+}
+
+mali_pmm_message_data pmm_retrieve_os_event_data(_mali_pmm_internal_state_t *pmm)
+{
+ mali_pmm_message_data data;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ /* Check that there is saved data */
+ MALI_DEBUG_ASSERT( pmm->os_data != 0 );
+
+ /* Get data, and clear the saved version */
+ data = pmm->os_data;
+ pmm->os_data = 0;
+
+ return data;
+}
+
+/* Create list of core names to look up
+ * We are doing it this way to overcome the need for
+ * either string allocation, or stack space, so we
+ * use constant strings instead
+ */
+typedef struct pmm_trace_corelist
+{
+ mali_pmm_core_mask id;
+ const char *name;
+} pmm_trace_corelist_t;
+
+static pmm_trace_corelist_t pmm_trace_cores[] = {
+ { MALI_PMM_CORE_SYSTEM, "SYSTEM" },
+ { MALI_PMM_CORE_GP, "GP" },
+ { MALI_PMM_CORE_L2, "L2" },
+ { MALI_PMM_CORE_PP0, "PP0" },
+ { MALI_PMM_CORE_PP1, "PP1" },
+ { MALI_PMM_CORE_PP2, "PP2" },
+ { MALI_PMM_CORE_PP3, "PP3" },
+ { MALI_PMM_CORE_PP_ALL, "PP (all)" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_L2 | MALI_PMM_CORE_PP0),
+ "GP+L2+PP0" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_PP0),
+ "GP+PP0" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_L2 | MALI_PMM_CORE_PP0 | MALI_PMM_CORE_PP1),
+ "GP+L2+PP0+PP1" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_PP0 | MALI_PMM_CORE_PP1),
+ "GP+PP0+PP1" },
+ { 0, NULL } /* Terminator of list */
+};
+
+const char *pmm_trace_get_core_name( mali_pmm_core_mask cores )
+{
+ const char *dname = NULL;
+ int cl;
+
+ /* Look up name in corelist */
+ cl = 0;
+ while( pmm_trace_cores[cl].name != NULL )
+ {
+ if( pmm_trace_cores[cl].id == cores )
+ {
+ dname = pmm_trace_cores[cl].name;
+ break;
+ }
+ cl++;
+ }
+
+ if( dname == NULL )
+ {
+ /* We don't know a good short-hand for the configuration */
+ dname = "[multi-core]";
+ }
+
+ return dname;
+}
+
+#endif /* USING_MALI_PMM */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.h
new file mode 100644
index 00000000000..3c8c31f066d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_state.h
+ * Defines the internal power management module state
+ */
+
+#ifndef __MALI_PMM_STATE_H__
+#define __MALI_PMM_STATE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi Power Management Module APIs
+ *
+ * @{
+ *
+ * @defgroup pmmapi_state Power Management Module State
+ *
+ * @{
+ */
+
+/* Check that the subset is really a subset of cores */
+#define MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( cores, subset ) \
+ MALI_DEBUG_ASSERT( ((~(cores)) & (subset)) == 0 )
+
+
+/* Locking macros */
+#define MALI_PMM_LOCK(pmm) \
+ _mali_osk_lock_wait( pmm->lock, _MALI_OSK_LOCKMODE_RW )
+#define MALI_PMM_UNLOCK(pmm) \
+ _mali_osk_lock_signal( pmm->lock, _MALI_OSK_LOCKMODE_RW )
+#define MALI_PMM_LOCK_TERM(pmm) \
+ _mali_osk_lock_term( pmm->lock )
+
+/* Notification type for messages */
+#define MALI_PMM_NOTIFICATION_TYPE 0
+
+/** @brief Status of the PMM state machine
+ */
+typedef enum mali_pmm_status_tag
+{
+ MALI_PMM_STATUS_IDLE, /**< PMM is waiting next event */
+ MALI_PMM_STATUS_POLICY_POWER_DOWN, /**< Policy initiated power down */
+ MALI_PMM_STATUS_POLICY_POWER_UP, /**< Policy initiated power down */
+ MALI_PMM_STATUS_OS_WAITING, /**< PMM is waiting for OS power up */
+ MALI_PMM_STATUS_OS_POWER_DOWN, /**< OS initiated power down */
+ MALI_PMM_STATUS_RUNTIME_IDLE_IN_PROGRESS,
+ MALI_PMM_STATUS_DVFS_PAUSE, /**< PMM DVFS Status Pause */
+ MALI_PMM_STATUS_OS_POWER_UP, /**< OS initiated power up */
+ MALI_PMM_STATUS_OFF, /**< PMM is not active */
+} mali_pmm_status;
+
+
+/** @brief Internal state of the PMM
+ */
+typedef struct _mali_pmm_internal_state
+{
+ mali_pmm_status status; /**< PMM state machine */
+ mali_pmm_policy policy; /**< PMM policy */
+ mali_bool check_policy; /**< PMM policy needs checking */
+ mali_pmm_state state; /**< PMM state */
+ mali_pmm_core_mask cores_registered; /**< Bitmask of cores registered */
+ mali_pmm_core_mask cores_powered; /**< Bitmask of cores powered up */
+ mali_pmm_core_mask cores_idle; /**< Bitmask of cores idle */
+ mali_pmm_core_mask cores_pend_down; /**< Bitmask of cores pending power down */
+ mali_pmm_core_mask cores_pend_up; /**< Bitmask of cores pending power up */
+ mali_pmm_core_mask cores_ack_down; /**< Bitmask of cores acknowledged power down */
+ mali_pmm_core_mask cores_ack_up; /**< Bitmask of cores acknowledged power up */
+
+ _mali_osk_notification_queue_t *queue; /**< PMM event queue */
+ _mali_osk_notification_queue_t *iqueue; /**< PMM internal event queue */
+ _mali_osk_irq_t *irq; /**< PMM irq handler */
+ _mali_osk_lock_t *lock; /**< PMM lock */
+
+ mali_pmm_message_data os_data; /**< OS data sent via the OS events */
+
+ mali_bool pmu_initialized; /**< PMU initialized */
+
+ _mali_osk_atomic_t messages_queued; /**< PMM event messages queued */
+ u32 waiting; /**< PMM waiting events - due to busy */
+ u32 no_events; /**< PMM called to process when no events */
+
+ u32 missed; /**< PMM missed events due to OOM */
+ mali_bool fatal_power_err; /**< PMM has had a fatal power error? */
+ u32 is_dvfs_active; /**< PMM DVFS activity */
+
+#if (defined(DEBUG) || MALI_STATE_TRACKING)
+ u32 mali_last_pmm_status;
+ u32 mali_new_event_status;
+ u32 mali_pmm_lock_acquired;
+#endif
+
+#if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ u32 messages_sent; /**< Total event messages sent */
+ u32 messages_received; /**< Total event messages received */
+ u32 imessages_sent; /**< Total event internal messages sent */
+ u32 imessages_received; /**< Total event internal messages received */
+#endif
+} _mali_pmm_internal_state_t;
+
+/** @brief Sets that a policy needs a check before processing events
+ *
+ * A timer or something has expired that needs dealing with
+ */
+void malipmm_set_policy_check(void);
+
+/** @brief Update the PMM externally viewable state depending on the current PMM internal state
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the timeout is valid, else MALI_FALSE
+ */
+void pmm_update_system_state( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Returns the core mask from the event data - if applicable
+ *
+ * @param pmm internal PMM state
+ * @param event event message to get the core mask from
+ * @return mask of cores that is relevant to this event message
+ */
+mali_pmm_core_mask pmm_cores_from_event_data( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+/** @brief Sort out which cores need to be powered up from the given core mask
+ *
+ * All cores that can be powered up will be put into a pending state
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to check if they need to be powered up
+ * @return mask of cores that need to be powered up, this can be 0 if all cores
+ * are powered up already
+ */
+mali_pmm_core_mask pmm_cores_to_power_up( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Sort out which cores need to be powered down from the given core mask
+ *
+ * All cores that can be powered down will be put into a pending state. If they
+ * can be powered down immediately they will also be acknowledged that they can be
+ * powered down. If the immediate_only flag is set, then only those cores that
+ * can be acknowledged for power down will be put into a pending state.
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to check if they need to be powered down
+ * @param immediate_only MALI_TRUE means that only cores that can power down now will
+ * be put into a pending state
+ * @return mask of cores that need to be powered down, this can be 0 if all cores
+ * are powered down already
+ */
+mali_pmm_core_mask pmm_cores_to_power_down( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores, mali_bool immediate_only );
+
+/** @brief Cancel an invokation to power down (pmm_invoke_power_down)
+ *
+ * @param pmm internal PMM state
+ */
+void pmm_power_down_cancel( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Check if a call to invoke power down should succeed, or fail
+ *
+ * This will report MALI_FALSE if some of the cores are still active and need
+ * to acknowledge that they are ready to power down
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores to power down have acknowledged they
+ * can power down, else MALI_FALSE
+ */
+mali_bool pmm_power_down_okay( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Try to make all the pending cores power down
+ *
+ * If all the pending cores have acknowledged they can power down, this will call the
+ * PMU power down function to turn them off
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores have been powered down, else MALI_FALSE
+ */
+mali_bool pmm_invoke_power_down( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Check if all the pending cores to power up have done so
+ *
+ * This will report MALI_FALSE if some of the cores are still powered off
+ * and have not acknowledged that they have powered up
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores to power up have acknowledged they
+ * are now powered up, else MALI_FALSE
+ */
+mali_bool pmm_power_up_okay( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Try to make all the pending cores power up
+ *
+ * If all the pending cores have acknowledged they have powered up, this will
+ * make the cores start processing jobs again, else this will call the PMU
+ * power up function to turn them on, and the PMM is then expected to wait for an
+ * interrupt to acknowledge the power up
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores have been powered up, else MALI_FALSE
+ */
+mali_bool pmm_invoke_power_up( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Set the cores that are now active in the system
+ *
+ * Updates which cores are active and returns which cores are still idle
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to set to active
+ * @return mask of all the cores that are idle
+ */
+mali_pmm_core_mask pmm_cores_set_active( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Set the cores that are now idle in the system
+ *
+ * Updates which cores are idle and returns which cores are still idle
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to set to idle
+ * @return mask of all the cores that are idle
+ */
+mali_pmm_core_mask pmm_cores_set_idle( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Set the cores that have acknowledged a pending power down
+ *
+ * Updates which cores have acknowledged the pending power down and are now ready
+ * to be turned off
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores that have acknowledged the pending power down
+ * @return mask of all the cores that have acknowledged the power down
+ */
+mali_pmm_core_mask pmm_cores_set_down_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Set the cores that have acknowledged a pending power up
+ *
+ * Updates which cores have acknowledged the pending power up and are now
+ * fully powered and ready to run jobs
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores that have acknowledged the pending power up
+ * @return mask of all the cores that have acknowledged the power up
+ */
+mali_pmm_core_mask pmm_cores_set_up_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+
+/** @brief Tries to reset the PMM and PMU hardware to a known state after any fatal issues
+ *
+ * This will try and make all the cores powered up and reset the PMM state
+ * to its initial state after core registration - all cores powered but not
+ * pending or active.
+ * All events in the event queues will be thrown away.
+ *
+ * @note: Any pending power down will be cancelled including the OS calling for power down
+ */
+void pmm_fatal_reset( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Save the OS specific data for an OS power up/down event
+ *
+ * @param pmm internal PMM state
+ * @param data OS specific event data
+ */
+void pmm_save_os_event_data(_mali_pmm_internal_state_t *pmm, mali_pmm_message_data data);
+
+/** @brief Retrieve the OS specific data for an OS power up/down event
+ *
+ * This will clear the stored OS data, as well as return it.
+ *
+ * @param pmm internal PMM state
+ * @return OS specific event data that was saved previously
+ */
+mali_pmm_message_data pmm_retrieve_os_event_data(_mali_pmm_internal_state_t *pmm);
+
+
+/** @brief Get a human readable name for the cores in a core mask
+ *
+ * @param core the core mask
+ * @return string containing a name relating to the given core mask
+ */
+const char *pmm_trace_get_core_name( mali_pmm_core_mask core );
+
+/** @} */ /* End group pmmapi_state */
+/** @} */ /* End group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_STATE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_system.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_system.h
new file mode 100644
index 00000000000..f5106479e33
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_system.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_system.h
+ * Defines the power management module system functions
+ */
+
+#ifndef __MALI_PMM_SYSTEM_H__
+#define __MALI_PMM_SYSTEM_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi Power Management Module APIs
+ *
+ * @{
+ *
+ * @defgroup pmmapi_system Power Management Module System Functions
+ *
+ * @{
+ */
+
+extern struct mali_kernel_subsystem mali_subsystem_pmm;
+
+/** @brief Register a core with the PMM, which will power up
+ * the core
+ *
+ * @param core the core to register with the PMM
+ * @return error if the core cannot be powered up
+ */
+_mali_osk_errcode_t malipmm_core_register( mali_pmm_core_id core );
+
+/** @brief Unregister a core with the PMM
+ *
+ * @param core the core to unregister with the PMM
+ */
+void malipmm_core_unregister( mali_pmm_core_id core );
+
+/** @brief Acknowledge that a power down is okay to happen
+ *
+ * A core should not be running a job, or be in the idle queue when this
+ * is called.
+ *
+ * @param core the core that can now be powered down
+ */
+void malipmm_core_power_down_okay( mali_pmm_core_id core );
+
+/** @} */ /* End group pmmapi_system */
+/** @} */ /* End group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/license/gpl/mali_kernel_license.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/license/gpl/mali_kernel_license.h
new file mode 100644
index 00000000000..e9e5e55a082
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/license/gpl/mali_kernel_license.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __MALI_KERNEL_LICENSE_H__
+#define __MALI_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_KERNEL_LINUX_LICENSE "GPL"
+#define MALI_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LICENSE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.c
new file mode 100644
index 00000000000..1c1bf306688
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.c
@@ -0,0 +1,82 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+#if USING_MALI_PMM
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_platform.h"
+#include "mali_linux_pm.h"
+#include "mali_device_pause_resume.h"
+#include "mali_pmm.h"
+#include "mali_kernel_license.h"
+#ifdef CONFIG_PM
+#if MALI_LICENSE_IS_GPL
+
+/* Mali Pause Resume APIs */
+int mali_dev_pause()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_SUSPEND) || (mali_device_state == _MALI_DEVICE_SUSPEND_IN_PROGRESS)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND)
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ || (mali_device_state == _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB))
+#else
+ )
+#endif
+ {
+ err = -EPERM;
+ }
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) && (!err))
+ {
+ mali_device_suspend(MALI_PMM_EVENT_DVFS_PAUSE, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_SUSPEND;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_pause);
+
+int mali_dev_resume()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) || (mali_device_state == _MALI_DEVICE_SUSPEND_IN_PROGRESS)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND)
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ || (mali_device_state == _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB))
+#else
+ )
+#endif
+ {
+ err = -EPERM;
+ }
+ if (!err)
+ {
+ mali_device_resume(MALI_PMM_EVENT_DVFS_RESUME, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_resume);
+
+#endif /* MALI_LICENSE_IS_GPL */
+#endif /* CONFIG_PM */
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.h
new file mode 100644
index 00000000000..5362f88cdbd
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DEVICE_PAUSE_RESUME_H__
+#define __MALI_DEVICE_PAUSE_RESUME_H__
+
+#if USING_MALI_PMM
+int mali_dev_pause(void);
+int mali_dev_resume(void);
+#endif /* USING_MALI_PMM */
+
+#endif /* __MALI_DEVICE_PAUSE_RESUME_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_ioctl.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_ioctl.h
new file mode 100644
index 00000000000..30a6fa041db
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_ioctl.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_IOCTL_H__
+#define __MALI_KERNEL_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h> /* file system operations */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @file mali_kernel_ioctl.h
+ * Interface to the Linux device driver.
+ * This file describes the interface needed to use the Linux device driver.
+ * Its interface is designed to used by the HAL implementation through a thin arch layer.
+ */
+
+/**
+ * ioctl commands
+ */
+
+#define MALI_IOC_BASE 0x82
+#define MALI_IOC_CORE_BASE (_MALI_UK_CORE_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_MEMORY_BASE (_MALI_UK_MEMORY_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PP_BASE (_MALI_UK_PP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_GP_BASE (_MALI_UK_GP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_VSYNC_BASE (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
+
+#define MALI_IOC_GET_SYSTEM_INFO_SIZE _IOR (MALI_IOC_CORE_BASE, _MALI_UK_GET_SYSTEM_INFO_SIZE, _mali_uk_get_system_info_s *)
+#define MALI_IOC_GET_SYSTEM_INFO _IOR (MALI_IOC_CORE_BASE, _MALI_UK_GET_SYSTEM_INFO, _mali_uk_get_system_info_s *)
+#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s *)
+#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_s *)
+#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s *)
+#define MALI_IOC_MEM_GET_BIG_BLOCK _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_GET_BIG_BLOCK, _mali_uk_get_big_block_s *)
+#define MALI_IOC_MEM_FREE_BIG_BLOCK _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_BIG_BLOCK, _mali_uk_free_big_block_s *)
+#define MALI_IOC_MEM_INIT _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_INIT_MEM, _mali_uk_init_mem_s *)
+#define MALI_IOC_MEM_TERM _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_TERM_MEM, _mali_uk_term_mem_s *)
+#define MALI_IOC_MEM_MAP_EXT _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s *)
+#define MALI_IOC_MEM_UNMAP_EXT _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s *)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s *)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s *)
+#define MALI_IOC_MEM_ATTACH_UMP _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s *)
+#define MALI_IOC_MEM_RELEASE_UMP _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s *)
+#define MALI_IOC_PP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s *)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s *)
+#define MALI_IOC_PP_CORE_VERSION_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s * )
+#define MALI_IOC_PP_ABORT_JOB _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_ABORT_JOB, _mali_uk_pp_abort_job_s * )
+#define MALI_IOC_GP2_START_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s *)
+#define MALI_IOC_GP2_ABORT_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_ABORT_JOB, _mali_uk_gp_abort_job_s *)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s *)
+#define MALI_IOC_GP2_CORE_VERSION_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s *)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s *)
+#define MALI_IOC_PROFILING_START _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_START, _mali_uk_profiling_start_s *)
+#define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s*)
+#define MALI_IOC_PROFILING_STOP _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STOP, _mali_uk_profiling_stop_s *)
+#define MALI_IOC_PROFILING_GET_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_EVENT, _mali_uk_profiling_get_event_s *)
+#define MALI_IOC_PROFILING_CLEAR _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CLEAR, _mali_uk_profiling_clear_s *)
+#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s *)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_IOCTL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.c
new file mode 100644
index 00000000000..a5144faae2b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.c
@@ -0,0 +1,565 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_linux.c
+ * Implementation of the Linux device driver entrypoints
+ */
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/mm.h> /* memory mananger definitions */
+#include <asm/uaccess.h> /* user space access */
+#include <linux/device.h>
+#include <linux/proc_fs.h>
+
+/* the mali kernel subsystem types */
+#include "mali_kernel_subsystem.h"
+
+/* A memory subsystem always exists, so no need to conditionally include it */
+#include "mali_kernel_common.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_kernel_core.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+#include "mali_kernel_ioctl.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_kernel_pm.h"
+
+/* */
+#include "mali_kernel_license.h"
+
+/* Setting this parameter will override memory settings in arch/config.h */
+char *mali_mem = "";
+module_param(mali_mem, charp, S_IRUSR | S_IWUSR | S_IWGRP | S_IROTH); /* rw-r--r-- */
+MODULE_PARM_DESC(mali_mem, "Override mali memory configuration. e.g. 32M@224M or 64M@OS_MEMORY");
+
+/* Module parameter to control log level */
+int mali_debug_level = 2;
+module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+int mali_major = 0;
+module_param(mali_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(mali_major, "Device major number");
+
+int mali_benchmark = 0;
+module_param(mali_benchmark, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_benchmark, "Bypass Mali hardware when non-zero");
+
+extern int mali_hang_check_interval;
+module_param(mali_hang_check_interval, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_hang_check_interval, "Interval at which to check for progress after the hw watchdog has been triggered");
+
+extern int mali_max_job_runtime;
+module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
+
+#if defined(USING_MALI400_L2_CACHE)
+extern int mali_l2_max_reads;
+module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
+#endif
+
+struct mali_dev
+{
+ struct cdev cdev;
+#if MALI_LICENSE_IS_GPL
+ struct class * mali_class;
+#endif
+};
+
+static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
+
+/* the mali device */
+static struct mali_dev device;
+
+#if MALI_STATE_TRACKING
+static struct proc_dir_entry *proc_entry;
+static int mali_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data);
+#endif
+
+
+static int mali_open(struct inode *inode, struct file *filp);
+static int mali_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma);
+
+/* Linux char file operations provided by the Mali module */
+struct file_operations mali_fops =
+{
+ .owner = THIS_MODULE,
+ .open = mali_open,
+ .release = mali_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = mali_ioctl,
+#else
+ .ioctl = mali_ioctl,
+#endif
+ .mmap = mali_mmap
+};
+
+
+int mali_driver_init(void)
+{
+ int err;
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ err = _mali_dev_platform_register();
+ if (err)
+ {
+ return err;
+ }
+#endif
+#endif
+#endif
+ err = mali_kernel_constructor();
+ if (_MALI_OSK_ERR_OK != err)
+ {
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ _mali_dev_platform_unregister();
+#endif
+#endif
+#endif
+ MALI_PRINT(("Failed to initialize driver (error %d)\n", err));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void mali_driver_exit(void)
+{
+
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+
+ _mali_osk_pmm_dev_activate();
+#endif
+#endif
+#endif
+#endif
+ mali_kernel_destructor();
+
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ _mali_osk_pmm_dev_idle();
+#endif
+#endif
+#endif
+#endif
+
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ _mali_dev_platform_unregister();
+#endif
+#endif
+#endif
+}
+
+/* called from _mali_osk_init */
+int initialize_kernel_device(void)
+{
+ int err;
+ dev_t dev = 0;
+ if (0 == mali_major)
+ {
+ /* auto select a major */
+ err = alloc_chrdev_region(&dev, 0/*first minor*/, 1/*count*/, mali_dev_name);
+ mali_major = MAJOR(dev);
+ }
+ else
+ {
+ /* use load time defined major number */
+ dev = MKDEV(mali_major, 0);
+ err = register_chrdev_region(dev, 1/*count*/, mali_dev_name);
+ }
+
+ if (0 == err)
+ {
+ memset(&device, 0, sizeof(device));
+
+ /* initialize our char dev data */
+ cdev_init(&device.cdev, &mali_fops);
+ device.cdev.owner = THIS_MODULE;
+ device.cdev.ops = &mali_fops;
+
+ /* register char dev with the kernel */
+ err = cdev_add(&device.cdev, dev, 1/*count*/);
+
+ if (0 == err)
+ {
+#if MALI_STATE_TRACKING
+ proc_entry = create_proc_entry(mali_dev_name, 0444, NULL);
+ if (proc_entry != NULL)
+ {
+ proc_entry->read_proc = mali_proc_read;
+#endif
+#if MALI_LICENSE_IS_GPL
+ device.mali_class = class_create(THIS_MODULE, mali_dev_name);
+ if (IS_ERR(device.mali_class))
+ {
+ err = PTR_ERR(device.mali_class);
+ }
+ else
+ {
+ struct device * mdev;
+ mdev = device_create(device.mali_class, NULL, dev, NULL, mali_dev_name);
+ if (!IS_ERR(mdev))
+ {
+ return 0;
+ }
+
+ err = PTR_ERR(mdev);
+ }
+ cdev_del(&device.cdev);
+#else
+ return 0;
+#endif
+#if MALI_STATE_TRACKING
+ remove_proc_entry(mali_dev_name, NULL);
+ }
+ else
+ {
+ err = EFAULT;
+ }
+#endif
+ }
+ unregister_chrdev_region(dev, 1/*count*/);
+ }
+
+
+ return err;
+}
+
+/* called from _mali_osk_term */
+void terminate_kernel_device(void)
+{
+ dev_t dev = MKDEV(mali_major, 0);
+
+#if MALI_LICENSE_IS_GPL
+ device_destroy(device.mali_class, dev);
+ class_destroy(device.mali_class);
+#endif
+
+#if MALI_STATE_TRACKING
+ remove_proc_entry(mali_dev_name, NULL);
+#endif
+
+ /* unregister char device */
+ cdev_del(&device.cdev);
+ /* free major */
+ unregister_chrdev_region(dev, 1/*count*/);
+ return;
+}
+
+/** @note munmap handler is done by vma close handler */
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+ struct mali_session_data * session_data;
+ _mali_uk_mem_mmap_s args = {0, };
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MALI_PRINT_ERROR(("mmap called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X\n", (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT), (unsigned int)(vma->vm_end - vma->vm_start)) );
+
+ /* Re-pack the arguments that mmap() packed for us */
+ args.ctx = session_data;
+ args.phys_addr = vma->vm_pgoff << PAGE_SHIFT;
+ args.size = vma->vm_end - vma->vm_start;
+ args.ukk_private = vma;
+
+ /* Call the common mmap handler */
+ MALI_CHECK(_MALI_OSK_ERR_OK ==_mali_ukk_mem_mmap( &args ), -EFAULT);
+
+ return 0;
+}
+
+static int mali_open(struct inode *inode, struct file *filp)
+{
+ struct mali_session_data * session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev)) return -ENODEV;
+
+ /* allocated struct to track this session */
+ err = _mali_ukk_open((void **)&session_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* initialize file pointer */
+ filp->f_pos = 0;
+
+ /* link in our session data */
+ filp->private_data = (void*)session_data;
+
+ return 0;
+}
+
+static int mali_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev)) return -ENODEV;
+
+ err = _mali_ukk_close((void **)&filp->private_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int map_errcode( _mali_osk_errcode_t err )
+{
+ switch(err)
+ {
+ case _MALI_OSK_ERR_OK : return 0;
+ case _MALI_OSK_ERR_FAULT: return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
+ default: return -EFAULT;
+ }
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err;
+ struct mali_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ /* inode not used */
+ (void)inode;
+#endif
+
+ MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
+ return -ENOTTY;
+ }
+ if (NULL == (void *)arg)
+ {
+ MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
+ return -ENOTTY;
+ }
+
+ switch(cmd)
+ {
+ case MALI_IOC_GET_SYSTEM_INFO_SIZE:
+ err = get_system_info_size_wrapper(session_data, (_mali_uk_get_system_info_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_SYSTEM_INFO:
+ err = get_system_info_wrapper(session_data, (_mali_uk_get_system_info_s __user *)arg);
+ break;
+
+ case MALI_IOC_WAIT_FOR_NOTIFICATION:
+ err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_API_VERSION:
+ err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_POST_NOTIFICATION:
+ err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
+ break;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ case MALI_IOC_PROFILING_START:
+ err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_ADD_EVENT:
+ err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_STOP:
+ err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_GET_EVENT:
+ err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_CLEAR:
+ err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
+ break;
+#endif
+
+ case MALI_IOC_MEM_INIT:
+ err = mem_init_wrapper(session_data, (_mali_uk_init_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_TERM:
+ err = mem_term_wrapper(session_data, (_mali_uk_term_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_MAP_EXT:
+ err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_UNMAP_EXT:
+ err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+ err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+ err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_GET_BIG_BLOCK:
+ err = mem_get_big_block_wrapper(filp, (_mali_uk_get_big_block_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_FREE_BIG_BLOCK:
+ err = mem_free_big_block_wrapper(session_data, (_mali_uk_free_big_block_s __user *)arg);
+ break;
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_RELEASE_UMP:
+ err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
+ break;
+
+#else
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ case MALI_IOC_MEM_RELEASE_UMP: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("UMP not supported\n"));
+ err = -ENOTTY;
+ break;
+#endif
+
+ case MALI_IOC_PP_START_JOB:
+ err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_ABORT_JOB:
+ err = pp_abort_job_wrapper(session_data, (_mali_uk_pp_abort_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+ err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_CORE_VERSION_GET:
+ err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_START_JOB:
+ err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_ABORT_JOB:
+ err = gp_abort_job_wrapper(session_data, (_mali_uk_gp_abort_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+ err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_CORE_VERSION_GET:
+ err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_SUSPEND_RESPONSE:
+ err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
+ break;
+
+ case MALI_IOC_VSYNC_EVENT_REPORT:
+ err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
+ err = -ENOTTY;
+ };
+
+ return err;
+}
+
+#if MALI_STATE_TRACKING
+static int mali_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int len = 0;
+
+ MALI_DEBUG_PRINT(1, ("mali_proc_read(page=%p, start=%p, off=%u, count=%d, eof=%p, data=%p\n", page, start, off, count, eof, data));
+
+ if (off > 0)
+ {
+ return 0;
+ }
+
+ if (count < 1024)
+ {
+ return 0;
+ }
+
+ len = sprintf(page + len, "Mali device driver %s\n", SVN_REV_STRING);
+ len += sprintf(page + len, "License: %s\n", MALI_KERNEL_LINUX_LICENSE);
+
+ /*
+ * A more elegant solution would be to gather information from all subsystems and
+ * then report it all in the /proc/mali file, but this would require a bit more work.
+ * Use MALI_PRINT for now so we get the information in the dmesg log at least.
+ */
+ _mali_kernel_core_dump_state();
+
+ return len;
+}
+#endif
+
+
+module_init(mali_driver_init);
+module_exit(mali_driver_exit);
+
+MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.h
new file mode 100644
index 00000000000..969449d8ef4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_LINUX_H__
+#define __MALI_KERNEL_LINUX_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+_mali_osk_errcode_t initialize_kernel_device(void);
+void terminate_kernel_device(void);
+
+void mali_osk_low_level_mem_init(void);
+void mali_osk_low_level_mem_term(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.c
new file mode 100644
index 00000000000..aaafc151077
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.c
@@ -0,0 +1,786 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_pm.c
+ * Implementation of the Linux Power Management for Mali GPU kernel driver
+ */
+#undef CONFIG_HAS_EARLYSUSPEND /*ARM will remove .early_suspend support for r2p2_rel*/
+#if USING_MALI_PMM
+#include <linux/sched.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <asm/current.h>
+#include <asm/delay.h>
+#include <linux/suspend.h>
+
+#include "mali_platform.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_pmm.h"
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_pm.h"
+#include "mali_device_pause_resume.h"
+#include "mali_linux_pm.h"
+
+#if MALI_GPU_UTILIZATION
+#include "mali_kernel_utilization.h"
+#endif /* MALI_GPU_UTILIZATION */
+
+#if MALI_POWER_MGMT_TEST_SUITE
+#ifdef CONFIG_PM
+#include "mali_linux_pm_testsuite.h"
+unsigned int pwr_mgmt_status_reg = 0;
+#endif /* CONFIG_PM */
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+#if MALI_STATE_TRACKING
+ int is_os_pmm_thread_waiting = -1;
+#endif /* MALI_STATE_TRACKING */
+
+/* kernel should be configured with power management support */
+#ifdef CONFIG_PM
+
+/* License should be GPL */
+#if MALI_LICENSE_IS_GPL
+
+/* Linux kernel major version */
+#define LINUX_KERNEL_MAJOR_VERSION 2
+
+/* Linux kernel minor version */
+#define LINUX_KERNEL_MINOR_VERSION 6
+
+/* Linux kernel development version */
+#define LINUX_KERNEL_DEVELOPMENT_VERSION 29
+
+#ifdef CONFIG_PM_DEBUG
+static const char* const mali_states[_MALI_MAX_DEBUG_OPERATIONS] = {
+ [_MALI_DEVICE_SUSPEND] = "suspend",
+ [_MALI_DEVICE_RESUME] = "resume",
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ [_MALI_DEVICE_EARLYSUSPEND_DISABLE_FB] = "early_suspend_level_disable_framebuffer",
+ [_MALI_DEVICE_LATERESUME] = "late_resume",
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ [_MALI_DVFS_PAUSE_EVENT] = "dvfs_pause",
+ [_MALI_DVFS_RESUME_EVENT] = "dvfs_resume",
+};
+
+#endif /* CONFIG_PM_DEBUG */
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+extern void set_mali_parent_power_domain(struct platform_device* dev);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifndef CONFIG_HAS_EARLYSUSPEND
+static int mali_pwr_suspend_notifier(struct notifier_block *nb,unsigned long event,void* dummy);
+
+static struct notifier_block mali_pwr_notif_block = {
+ .notifier_call = mali_pwr_suspend_notifier
+};
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+/* Power management thread pointer */
+struct task_struct *pm_thread;
+
+/* dvfs power management thread */
+struct task_struct *dvfs_pm_thread;
+
+/* is wake up needed */
+short is_wake_up_needed = 0;
+int timeout_fired = 2;
+unsigned int is_mali_pmm_testsuite_enabled = 0;
+
+_mali_device_power_states mali_device_state = _MALI_DEVICE_RESUME;
+_mali_device_power_states mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+_mali_osk_lock_t *lock;
+
+#if MALI_POWER_MGMT_TEST_SUITE
+
+const char* const mali_pmm_recording_events[_MALI_DEVICE_MAX_PMM_EVENTS] = {
+ [_MALI_DEVICE_PMM_TIMEOUT_EVENT] = "timeout",
+ [_MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS] = "job_scheduling",
+ [_MALI_DEVICE_PMM_REGISTERED_CORES] = "cores",
+
+};
+
+unsigned int mali_timeout_event_recording_on = 0;
+unsigned int mali_job_scheduling_events_recording_on = 0;
+unsigned int is_mali_pmu_present = 0;
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+/* Function prototypes */
+static int mali_pm_probe(struct platform_device *pdev);
+static int mali_pm_remove(struct platform_device *pdev);
+
+/* Mali device suspend function */
+static int mali_pm_suspend(struct device *dev);
+
+/* Mali device resume function */
+static int mali_pm_resume(struct device *dev);
+
+/* Run time suspend and resume functions */
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+static int mali_device_runtime_suspend(struct device *dev);
+static int mali_device_runtime_resume(struct device *dev);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+/* Early suspend functions */
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void mali_pm_early_suspend(struct early_suspend *mali_dev);
+static void mali_pm_late_resume(struct early_suspend *mali_dev);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+/* OS suspend and resume callbacks */
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifndef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_suspend(struct platform_device *pdev, pm_message_t state);
+#else
+static int mali_pm_os_suspend(struct device *dev);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_resume(struct platform_device *pdev);
+#else
+static int mali_pm_os_resume(struct device *dev);
+#endif
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+
+/* OS Hibernation suspend callback */
+static int mali_pm_os_suspend_on_hibernation(struct device *dev);
+
+/* OS Hibernation resume callback */
+static int mali_pm_os_resume_on_hibernation(struct device *dev);
+
+static void _mali_release_pm(struct device* device);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static const struct dev_pm_ops mali_dev_pm_ops = {
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ .runtime_suspend = mali_device_runtime_suspend,
+ .runtime_resume = mali_device_runtime_resume,
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ .suspend = mali_pm_os_suspend,
+ .resume = mali_pm_os_resume,
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+ .freeze = mali_pm_os_suspend_on_hibernation,
+ .poweroff = mali_pm_os_suspend_on_hibernation,
+ .thaw = mali_pm_os_resume_on_hibernation,
+ .restore = mali_pm_os_resume_on_hibernation,
+};
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+struct pm_ext_ops mali_pm_operations = {
+ .base = {
+ .freeze = mali_pm_os_suspend_on_hibernation,
+ .thaw = mali_pm_os_resume_on_hibernation,
+ .poweroff = mali_pm_os_resume_on_hibernation,
+ .restore = mali_pm_os_resume_on_hibernation,
+ },
+};
+#endif
+
+static struct platform_driver mali_plat_driver = {
+ .probe = mali_pm_probe,
+ .remove = mali_pm_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ .suspend = mali_pm_os_suspend,
+ .resume = mali_pm_os_resume,
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+ .pm = &mali_pm_operations,
+#endif
+
+ .driver = {
+ .name = "mali_dev",
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+ .pm = &mali_dev_pm_ops,
+#endif
+ },
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/* Early suspend hooks */
+static struct early_suspend mali_dev_early_suspend = {
+ .suspend = mali_pm_early_suspend,
+ .resume = mali_pm_late_resume,
+ .level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
+};
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+/* Mali GPU platform device */
+struct platform_device mali_gpu_device = {
+ .name = "mali_dev",
+ .id = 0,
+ .dev.release = _mali_release_pm
+};
+
+/** This function is called when platform device is unregistered. This function
+ * is necessary when the platform device is unregistered.
+ */
+static void _mali_release_pm(struct device *device)
+{
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI Platform device removed\n" ));
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+void mali_is_pmu_present(void)
+{
+ int temp = 0;
+ temp = pmu_get_power_up_down_info();
+ if (4095 == temp)
+ {
+ is_mali_pmu_present = 0;
+ }
+ else
+ {
+ is_mali_pmu_present = 1;
+ }
+}
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+#endif /* MALI_LICENSE_IS_GPL */
+
+#if MALI_LICENSE_IS_GPL
+
+static int mali_wait_for_power_management_policy_event(void)
+{
+ int err = 0;
+ for (; ;)
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ {
+ err = -EINTR;
+ break;
+ }
+ if (is_wake_up_needed == 1)
+ {
+ break;
+ }
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ is_wake_up_needed =0;
+ return err;
+}
+
+/** This function is invoked when mali device is suspended
+ */
+int mali_device_suspend(unsigned int event_id, struct task_struct **pwr_mgmt_thread)
+{
+ int err = 0;
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ event_id,
+ timeout_fired};
+ *pwr_mgmt_thread = current;
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI device is being suspended\n" ));
+ _mali_ukk_pmm_event_message(&event);
+#if MALI_STATE_TRACKING
+ is_os_pmm_thread_waiting = 1;
+#endif /* MALI_STATE_TRACKING */
+ err = mali_wait_for_power_management_policy_event();
+#if MALI_STATE_TRACKING
+ is_os_pmm_thread_waiting = 0;
+#endif /* MALI_STATE_TRACKING */
+ return err;
+}
+
+/** This function is called when Operating system wants to power down
+ * the mali GPU device.
+ */
+static int mali_pm_suspend(struct device *dev)
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+#if MALI_GPU_UTILIZATION
+ mali_utilization_suspend();
+#endif /* MALI_GPU_UTILIZATION */
+ if ((mali_device_state == _MALI_DEVICE_SUSPEND)
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ || mali_device_state == (_MALI_DEVICE_EARLYSUSPEND_DISABLE_FB))
+#else
+ )
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+ }
+ mali_device_state = _MALI_DEVICE_SUSPEND_IN_PROGRESS;
+ err = mali_device_suspend(MALI_PMM_EVENT_OS_POWER_DOWN, &pm_thread);
+ mali_device_state = _MALI_DEVICE_SUSPEND;
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_suspend(struct platform_device *pdev, pm_message_t state)
+#else
+static int mali_pm_os_suspend(struct device *dev)
+#endif
+{
+ int err = 0;
+ err = mali_pm_suspend(NULL);
+ return err;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifndef CONFIG_HAS_EARLYSUSPEND
+static int mali_pwr_suspend_notifier(struct notifier_block *nb,unsigned long event,void* dummy)
+{
+ int err = 0;
+ switch (event)
+ {
+ case PM_SUSPEND_PREPARE:
+ err = mali_pm_suspend(NULL);
+ break;
+
+ case PM_POST_SUSPEND:
+ err = mali_pm_resume(NULL);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+/** This function is called when mali GPU device is to be resumed.
+ */
+int mali_device_resume(unsigned int event_id, struct task_struct **pwr_mgmt_thread)
+{
+ int err = 0;
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ event_id,
+ timeout_fired};
+ *pwr_mgmt_thread = current;
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI device is being resumed\n" ));
+ _mali_ukk_pmm_event_message(&event);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI Power up event is scheduled\n" ));
+
+#if MALI_STATE_TRACKING
+ is_os_pmm_thread_waiting = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ err = mali_wait_for_power_management_policy_event();
+
+#if MALI_STATE_TRACKING
+ is_os_pmm_thread_waiting = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ return err;
+}
+
+/** This function is called when mali GPU device is to be resumed
+ */
+
+static int mali_pm_resume(struct device *dev)
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if (mali_device_state == _MALI_DEVICE_RESUME)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+ }
+ err = mali_device_resume(MALI_PMM_EVENT_OS_POWER_UP, &pm_thread);
+ mali_device_state = _MALI_DEVICE_RESUME;
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_resume(struct platform_device *pdev)
+#else
+static int mali_pm_os_resume(struct device *dev)
+#endif
+{
+ int err = 0;
+ err = mali_pm_resume(NULL);
+ return err;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+static int mali_pm_os_suspend_on_hibernation(struct device *dev)
+{
+ int err = 0;
+ err = mali_pm_suspend(NULL);
+ return err;
+}
+
+static int mali_pm_os_resume_on_hibernation(struct device *dev)
+{
+ int err = 0;
+ err = mali_pm_resume(NULL);
+ return err;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+/** This function is called when runtime suspend of mali device is required.
+ */
+static int mali_device_runtime_suspend(struct device *dev)
+{
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Mali device Run time suspended \n" ));
+ return 0;
+}
+
+/** This function is called when runtime resume of mali device is required.
+ */
+static int mali_device_runtime_resume(struct device *dev)
+{
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Mali device Run time Resumed \n" ));
+ return 0;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+
+/* This function is called from android framework.
+ */
+static void mali_pm_early_suspend(struct early_suspend *mali_dev)
+{
+ switch(mali_dev->level)
+ {
+ /* Screen should be turned off but framebuffer will be accessible */
+ case EARLY_SUSPEND_LEVEL_BLANK_SCREEN:
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Screen is off\n" ));
+ break;
+
+ case EARLY_SUSPEND_LEVEL_STOP_DRAWING:
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Suspend level stop drawing\n" ));
+ break;
+
+ /* Turn off the framebuffer. In our case No Mali GPU operation */
+ case EARLY_SUSPEND_LEVEL_DISABLE_FB:
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Suspend level Disable framebuffer\n" ));
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+#if MALI_GPU_UTILIZATION
+ mali_utilization_suspend();
+#endif /* MALI_GPU_UTILIZATION */
+ if ((mali_device_state == _MALI_DEVICE_SUSPEND) || (mali_device_state == _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB))
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return;
+ }
+ mali_device_suspend(MALI_PMM_EVENT_OS_POWER_DOWN, &pm_thread);
+ mali_device_state = _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB;
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Invalid Suspend Mode\n" ));
+ break;
+ }
+}
+
+/* This function is invoked from android framework when mali device needs to be
+ * resumed.
+ */
+static void mali_pm_late_resume(struct early_suspend *mali_dev)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if (mali_device_state == _MALI_DEVICE_RESUME)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return;
+ }
+ if (mali_device_state == _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB)
+ {
+ mali_device_resume(MALI_PMM_EVENT_OS_POWER_UP, &pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ mali_device_state = _MALI_DEVICE_RESUME;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+}
+
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#ifdef CONFIG_PM_DEBUG
+
+/** This function is used for debugging purposes when the user want to see
+ * which power management operations are supported for
+ * mali device.
+ */
+static ssize_t show_file(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char *str = buf;
+#if !MALI_POWER_MGMT_TEST_SUITE
+ int pm_counter = 0;
+ for (pm_counter = 0; pm_counter<_MALI_MAX_DEBUG_OPERATIONS; pm_counter++)
+ {
+ str += sprintf(str, "%s ", mali_states[pm_counter]);
+ }
+#else
+ str += sprintf(str, "%d ",pwr_mgmt_status_reg);
+#endif
+ if (str != buf)
+ {
+ *(str-1) = '\n';
+ }
+ return (str-buf);
+}
+
+/** This function is called when user wants to suspend the mali GPU device in order
+ * to simulate the power up and power down events.
+ */
+static ssize_t store_file(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ int err = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend mali_dev;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#if MALI_POWER_MGMT_TEST_SUITE
+ int test_flag_dvfs = 0;
+ pwr_mgmt_status_reg = 0;
+ mali_is_pmu_present();
+
+#endif
+ if (!strncmp(buf,mali_states[_MALI_DEVICE_SUSPEND],strlen(mali_states[_MALI_DEVICE_SUSPEND])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI suspend Power operation is scheduled\n" ));
+ err = mali_pm_suspend(NULL);
+ }
+
+#if MALI_POWER_MGMT_TEST_SUITE
+ else if (!strncmp(buf,mali_pmm_recording_events[_MALI_DEVICE_PMM_REGISTERED_CORES],strlen(mali_pmm_recording_events[_MALI_DEVICE_PMM_REGISTERED_CORES])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Device get number of registerd cores\n" ));
+ pwr_mgmt_status_reg = _mali_pmm_cores_list();
+ return count;
+ }
+ else if (!strncmp(buf,mali_pmm_recording_events[_MALI_DEVICE_PMM_TIMEOUT_EVENT],strlen(mali_pmm_recording_events[_MALI_DEVICE_PMM_TIMEOUT_EVENT])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI timeout event recording is enabled\n" ));
+ mali_timeout_event_recording_on = 1;
+ }
+ else if (!strncmp(buf,mali_pmm_recording_events[_MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS],strlen(mali_pmm_recording_events[_MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Job scheduling events recording is enabled\n" ));
+ mali_job_scheduling_events_recording_on = 1;
+ }
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+ else if (!strncmp(buf,mali_states[_MALI_DEVICE_RESUME],strlen(mali_states[_MALI_DEVICE_RESUME])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Resume Power operation is scheduled\n" ));
+ err = mali_pm_resume(NULL);
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ else if (!strncmp(buf,mali_states[_MALI_DEVICE_EARLYSUSPEND_DISABLE_FB],strlen(mali_states[_MALI_DEVICE_EARLYSUSPEND_DISABLE_FB])))
+ {
+ mali_dev.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Android early suspend operation is scheduled\n" ));
+ mali_pm_early_suspend(&mali_dev);
+ }
+ else if (!strncmp(buf,mali_states[_MALI_DEVICE_LATERESUME],strlen(mali_states[_MALI_DEVICE_LATERESUME])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Resume Power operation is scheduled\n" ));
+ mali_pm_late_resume(NULL);
+ }
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ else if (!strncmp(buf,mali_states[_MALI_DVFS_PAUSE_EVENT],strlen(mali_states[_MALI_DVFS_PAUSE_EVENT])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI DVFS Pause Power operation is scheduled\n" ));
+ err = mali_dev_pause();
+#if MALI_POWER_MGMT_TEST_SUITE
+ test_flag_dvfs = 1;
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ }
+ else if (!strncmp(buf,mali_states[_MALI_DVFS_RESUME_EVENT],strlen(mali_states[_MALI_DVFS_RESUME_EVENT])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI DVFS Resume Power operation is scheduled\n" ));
+ err = mali_dev_resume();
+#if MALI_POWER_MGMT_TEST_SUITE
+ test_flag_dvfs = 1;
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Invalid Power Mode Operation selected\n" ));
+ }
+#if MALI_POWER_MGMT_TEST_SUITE
+ if (test_flag_dvfs == 1)
+ {
+ if (err)
+ {
+ pwr_mgmt_status_reg = 2;
+ }
+ else
+ {
+ pwr_mgmt_status_reg = 1;
+ }
+ }
+ else
+ {
+ if (1 == is_mali_pmu_present)
+ {
+ pwr_mgmt_status_reg = pmu_get_power_up_down_info();
+ }
+ }
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ return count;
+}
+
+/* Device attribute file */
+static DEVICE_ATTR(file, 0644, show_file, store_file);
+#endif /* CONFIG_PM_DEBUG */
+
+static int mali_pm_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM_DEBUG
+ device_remove_file(&mali_gpu_device.dev, &dev_attr_file);
+#endif /* CONFIG_PM_DEBUG */
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ pm_runtime_disable(&pdev->dev);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+ return 0;
+}
+
+/** This function is called when the device is probed */
+static int mali_pm_probe(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM_DEBUG
+ int err;
+ err = device_create_file(&mali_gpu_device.dev, &dev_attr_file);
+ if (err)
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Error in creating device file\n" ));
+ }
+#endif /* CONFIG_PM_DEBUG */
+ return 0;
+}
+
+/** This function is called when Mali GPU device is initialized
+ */
+int _mali_dev_platform_register(void)
+{
+ int err;
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ set_mali_parent_power_domain(&mali_gpu_device);
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ err = register_pm_notifier(&mali_pwr_notif_block);
+ if (err)
+ {
+ return err;
+ }
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_PM_RUNTIME */
+ err = platform_device_register(&mali_gpu_device);
+ lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)( _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED), 0, 0);
+ if (!err)
+ {
+ err = platform_driver_register(&mali_plat_driver);
+ if (!err)
+ {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ register_early_suspend(&mali_dev_early_suspend);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ }
+ else
+ {
+ _mali_osk_lock_term(lock);
+#ifdef CONFIG_PM_RUNTIME
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ unregister_pm_notifier(&mali_pwr_notif_block);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_PM_RUNTIME */
+ platform_device_unregister(&mali_gpu_device);
+ }
+ }
+ return err;
+}
+
+/** This function is called when Mali GPU device is unloaded
+ */
+void _mali_dev_platform_unregister(void)
+{
+ _mali_osk_lock_term(lock);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&mali_dev_early_suspend);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#ifdef CONFIG_PM_RUNTIME
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ unregister_pm_notifier(&mali_pwr_notif_block);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_PM_RUNTIME */
+
+ platform_driver_unregister(&mali_plat_driver);
+ platform_device_unregister(&mali_gpu_device);
+}
+
+#endif /* MALI_LICENSE_IS_GPL */
+#endif /* CONFIG_PM */
+
+#if MALI_STATE_TRACKING
+void mali_pmm_dump_os_thread_state( void )
+{
+ MALI_PRINTF(("\nOSPMM::The OS PMM thread state is...%d",is_os_pmm_thread_waiting));
+}
+#endif /* MALI_STATE_TRACKING */
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.h
new file mode 100644
index 00000000000..6e879fea447
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PM_H__
+#define __MALI_KERNEL_PM_H__
+
+#ifdef USING_MALI_PMM
+int _mali_dev_platform_register(void);
+void _mali_dev_platform_unregister(void);
+#endif /* USING_MALI_PMM */
+
+#endif /* __MALI_KERNEL_PM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_dvfs_pause_resume.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_dvfs_pause_resume.c
new file mode 100644
index 00000000000..3d60d18e005
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_dvfs_pause_resume.c
@@ -0,0 +1,72 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_linux_dvfs_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+#if USING_MALI_PMM
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_platform.h"
+#include "mali_linux_pm.h"
+#include "mali_linux_dvfs_pause_resume.h"
+#include "mali_pmm.h"
+#include "mali_kernel_license.h"
+#ifdef CONFIG_PM
+#if MALI_LICENSE_IS_GPL
+
+/* Mali Pause Resume APIs */
+int mali_dev_dvfs_pause()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_SUSPEND) || (mali_device_state == _MALI_DEVICE_SUSPEND_IN_PROGRESS)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND))
+ {
+ err = -EPERM;
+ }
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) && (!err))
+ {
+ mali_device_suspend(MALI_PMM_EVENT_DVFS_PAUSE, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_SUSPEND;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_dvfs_pause);
+
+int mali_dev_dvfs_resume()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) || (mali_device_state == _MALI_DEVICE_SUSPEND_IN_PROGRESS)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND))
+ {
+ err = -EPERM;
+ }
+ if (!err)
+ {
+ mali_device_resume(MALI_PMM_EVENT_DVFS_RESUME, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_dvfs_resume);
+
+#endif /* MALI_LICENSE_IS_GPL */
+#endif /* CONFIG_PM */
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm.h
new file mode 100644
index 00000000000..614d5e1deee
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm.h
@@ -0,0 +1,57 @@
+
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_LINUX_PM_H__
+#define __MALI_LINUX_PM_H__
+
+#if USING_MALI_PMM
+
+#ifdef CONFIG_PM
+/* Number of power states supported for making power up and down */
+typedef enum
+{
+ _MALI_DEVICE_SUSPEND, /* Suspend */
+ _MALI_DEVICE_RESUME, /* Resume */
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB, /* Early suspend */
+ _MALI_DEVICE_LATERESUME, /* Late resume */
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ _MALI_DEVICE_SUSPEND_IN_PROGRESS, /* Suspend in progress */
+ _MALI_DEVICE_MAX_POWER_STATES, /* Maximum power states */
+} _mali_device_power_states;
+
+/* Number of DVFS events */
+typedef enum
+{
+ _MALI_DVFS_PAUSE_EVENT = _MALI_DEVICE_MAX_POWER_STATES-1, /* DVFS Pause event */
+ _MALI_DVFS_RESUME_EVENT, /* DVFS Resume event */
+ _MALI_MAX_DEBUG_OPERATIONS,
+} _mali_device_dvfs_events;
+
+extern _mali_device_power_states mali_device_state;
+extern _mali_device_power_states mali_dvfs_device_state;
+extern _mali_osk_lock_t *lock;
+extern short is_wake_up_needed;
+extern int timeout_fired;
+extern struct platform_device mali_gpu_device;
+
+/* dvfs pm thread */
+extern struct task_struct *dvfs_pm_thread;
+
+/* Power management thread */
+extern struct task_struct *pm_thread;
+
+int mali_device_suspend(u32 event_id, struct task_struct **pwr_mgmt_thread);
+int mali_device_resume(u32 event_id, struct task_struct **pwr_mgmt_thread);
+
+#endif /* CONFIG_PM */
+#endif /* USING_MALI_PMM */
+#endif /* __MALI_LINUX_PM_H___ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm_testsuite.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm_testsuite.h
new file mode 100644
index 00000000000..c80b0b0a5e3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm_testsuite.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __MALI_LINUX_PM_TESTSUITE_H__
+#define __MALI_LINUX_PM_TESTSUITE_H__
+
+#if USING_MALI_PMM
+#if MALI_POWER_MGMT_TEST_SUITE
+#ifdef CONFIG_PM
+
+typedef enum
+{
+ _MALI_DEVICE_PMM_TIMEOUT_EVENT,
+ _MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS,
+ _MALI_DEVICE_PMM_REGISTERED_CORES,
+ _MALI_DEVICE_MAX_PMM_EVENTS
+
+} _mali_device_pmm_recording_events;
+
+extern unsigned int mali_timeout_event_recording_on;
+extern unsigned int mali_job_scheduling_events_recording_on;
+extern unsigned int pwr_mgmt_status_reg;
+extern unsigned int is_mali_pmm_testsuite_enabled;
+extern unsigned int is_mali_pmu_present;
+
+#endif /* CONFIG_PM */
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+#endif /* USING_MALI_PMM */
+#endif /* __MALI_LINUX_PM_TESTSUITE_H__ */
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_atomics.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_atomics.c
new file mode 100644
index 00000000000..cc029c4a8f8
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_atomics.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_atomics.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <asm/atomic.h>
+#include "mali_kernel_common.h"
+
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom )
+{
+ atomic_dec((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom )
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom )
+{
+ atomic_inc((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom )
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
+
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val )
+{
+ MALI_CHECK_NON_NULL(atom, _MALI_OSK_ERR_INVALID_ARGS);
+ atomic_set((atomic_t *)&atom->u.val, val);
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_read((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom )
+{
+ MALI_IGNORE(atom);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.c
new file mode 100644
index 00000000000..b60bf825897
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/atomic.h>
+
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * @file mali_osk_specific.c
+ * Implementation of per-OS Kernel level specifics
+ */
+
+_mali_osk_errcode_t _mali_osk_specific_indirect_mmap( _mali_uk_mem_mmap_s *args )
+{
+ /* args->ctx ignored here; args->ukk_private required instead */
+ /* we need to lock the mmap semaphore before calling the do_mmap function */
+ down_write(&current->mm->mmap_sem);
+
+ args->mapping = (void __user *)do_mmap(
+ (struct file *)args->ukk_private,
+ 0, /* start mapping from any address after NULL */
+ args->size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ args->phys_addr
+ );
+
+ /* and unlock it after the call */
+ up_write(&current->mm->mmap_sem);
+
+ /* No cookie required here */
+ args->cookie = 0;
+ /* uku_private meaningless, so zero */
+ args->uku_private = NULL;
+
+ if ( (NULL == args->mapping) || IS_ERR((void *)args->mapping) )
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Success */
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_osk_specific_indirect_munmap( _mali_uk_mem_munmap_s *args )
+{
+ /* args->ctx and args->cookie ignored here */
+
+ if ((NULL != current) && (NULL != current->mm))
+ {
+ /* remove mapping of mali memory from the process' view */
+ /* lock mmap semaphore before call */
+ /* lock mmap_sem before calling do_munmap */
+ down_write(&current->mm->mmap_sem);
+ do_munmap(
+ current->mm,
+ (unsigned long)args->mapping,
+ args->size
+ );
+ /* and unlock after call */
+ up_write(&current->mm->mmap_sem);
+ MALI_DEBUG_PRINT(5, ("unmapped\n"));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Freeing of a big block while no user process attached, assuming crash cleanup in progress\n"));
+ }
+
+ return _MALI_OSK_ERR_OK; /* always succeeds */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.h
new file mode 100644
index 00000000000..f87739bb8f9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_INDIR_MMAP_H__
+#define __MALI_OSK_INDIR_MMAP_H__
+
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * Linux specific means for calling _mali_ukk_mem_mmap/munmap
+ *
+ * The presence of _MALI_OSK_SPECIFIC_INDIRECT_MMAP indicates that
+ * _mali_osk_specific_indirect_mmap and _mali_osk_specific_indirect_munmap
+ * should be used instead of _mali_ukk_mem_mmap/_mali_ukk_mem_munmap.
+ *
+ * The arguments are the same as _mali_ukk_mem_mmap/_mali_ukk_mem_munmap.
+ *
+ * In ALL operating system other than Linux, it is expected that common code
+ * should be able to call _mali_ukk_mem_mmap/_mali_ukk_mem_munmap directly.
+ * Such systems should NOT define _MALI_OSK_SPECIFIC_INDIRECT_MMAP.
+ */
+_mali_osk_errcode_t _mali_osk_specific_indirect_mmap( _mali_uk_mem_mmap_s *args );
+_mali_osk_errcode_t _mali_osk_specific_indirect_munmap( _mali_uk_mem_munmap_s *args );
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_INDIR_MMAP_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_irq.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_irq.c
new file mode 100644
index 00000000000..1a4bbcecdce
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_irq.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_irq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/workqueue.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "linux/interrupt.h"
+
+typedef struct _mali_osk_irq_t_struct
+{
+ u32 irqnum;
+ void *data;
+ _mali_osk_irq_uhandler_t uhandler;
+ _mali_osk_irq_bhandler_t bhandler;
+ struct work_struct work_queue_irq_handle; /* Workqueue for the bottom half of the IRQ-handling. This job is activated when this core gets an IRQ.*/
+} mali_osk_irq_object_t;
+
+#if MALI_LICENSE_IS_GPL
+static struct workqueue_struct *pmm_wq=NULL;
+#endif
+
+typedef void (*workqueue_func_t)(void *);
+typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ); /* , struct pt_regs *regs*/
+
+#if defined(INIT_DELAYED_WORK)
+static void irq_handler_bottom_half ( struct work_struct *work );
+#else
+static void irq_handler_bottom_half ( void * input );
+#endif
+
+/**
+ * Linux kernel version has marked SA_SHIRQ as deprecated, IRQF_SHARED should be used.
+ * This is to handle older kernels which haven't done this swap.
+ */
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif /* IRQF_SHARED */
+
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, _mali_osk_irq_bhandler_t bhandler, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *data, const char *description )
+{
+ mali_osk_irq_object_t *irq_object;
+
+ irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL);
+ if (NULL == irq_object) return NULL;
+
+ /* workqueue API changed in 2.6.20, support both versions: */
+#if defined(INIT_DELAYED_WORK)
+ /* New syntax: INIT_WORK( struct work_struct *work, void (*function)(struct work_struct *)) */
+ INIT_WORK( &irq_object->work_queue_irq_handle, irq_handler_bottom_half);
+#else
+ /* Old syntax: INIT_WORK( struct work_struct *work, void (*function)(void *), void *data) */
+ INIT_WORK( &irq_object->work_queue_irq_handle, irq_handler_bottom_half, irq_object);
+#endif /* defined(INIT_DELAYED_WORK) */
+
+ if (-1 == irqnum)
+ {
+ /* Probe for IRQ */
+ if ( (NULL != trigger_func) && (NULL != ack_func) )
+ {
+ unsigned long probe_count = 3;
+ _mali_osk_errcode_t err;
+ int irq;
+
+ MALI_DEBUG_PRINT(2, ("Probing for irq\n"));
+
+ do
+ {
+ unsigned long mask;
+
+ mask = probe_irq_on();
+ trigger_func(data);
+
+ _mali_osk_time_ubusydelay(5);
+
+ irq = probe_irq_off(mask);
+ err = ack_func(data);
+ }
+ while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--);
+
+ if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1;
+ else irqnum = irq;
+ }
+ else irqnum = -1; /* no probe functions, fault */
+
+ if (-1 != irqnum)
+ {
+ /* found an irq */
+ MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Probe for irq failed\n"));
+ }
+ }
+
+ irq_object->irqnum = irqnum;
+ irq_object->uhandler = uhandler;
+ irq_object->bhandler = bhandler;
+ irq_object->data = data;
+
+ /* Is this a real IRQ handler we need? */
+ if (!mali_benchmark && irqnum != _MALI_OSK_IRQ_NUMBER_FAKE && irqnum != _MALI_OSK_IRQ_NUMBER_PMM)
+ {
+ if (-1 == irqnum)
+ {
+ MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+ if (0 != request_irq(irqnum, irq_handler_upper_half, IRQF_SHARED, description, irq_object))
+ {
+ MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+ }
+
+#if MALI_LICENSE_IS_GPL
+ if ( _MALI_OSK_IRQ_NUMBER_PMM == irqnum )
+ {
+ pmm_wq = create_singlethread_workqueue("mali-pmm-wq");
+ }
+#endif
+
+ return irq_object;
+}
+
+void _mali_osk_irq_schedulework( _mali_osk_irq_t *irq )
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+#if MALI_LICENSE_IS_GPL
+ if ( irq_object->irqnum == _MALI_OSK_IRQ_NUMBER_PMM )
+ {
+ queue_work(pmm_wq,&irq_object->work_queue_irq_handle);
+ }
+ else
+ {
+#endif
+ schedule_work(&irq_object->work_queue_irq_handle);
+#if MALI_LICENSE_IS_GPL
+ }
+#endif
+}
+
+void _mali_osk_irq_term( _mali_osk_irq_t *irq )
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+
+#if MALI_LICENSE_IS_GPL
+ if(irq_object->irqnum == _MALI_OSK_IRQ_NUMBER_PMM )
+ {
+ flush_workqueue(pmm_wq);
+ destroy_workqueue(pmm_wq);
+ }
+#endif
+ if (!mali_benchmark)
+ {
+ free_irq(irq_object->irqnum, irq_object);
+ }
+ kfree(irq_object);
+ flush_scheduled_work();
+}
+
+
+/** This function is called directly in interrupt context from the OS just after
+ * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel.
+ * It is registered one of these function for each mali core. When an interrupt
+ * arrives this function will be called equal times as registered mali cores.
+ * That means that we only check one mali core in one function call, and the
+ * core we check for each turn is given by the \a dev_id variable.
+ * If we detect an pending interrupt on the given core, we mask the interrupt
+ * out by settging the core's IRQ_MASK register to zero.
+ * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
+ * work queue job.
+ */
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ) /* , struct pt_regs *regs*/
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
+
+ if (irq_object->uhandler(irq_object->data) == _MALI_OSK_ERR_OK)
+ {
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/* Is executed when an interrupt occur on one core */
+/* workqueue API changed in 2.6.20, support both versions: */
+#if defined(INIT_DELAYED_WORK)
+static void irq_handler_bottom_half ( struct work_struct *work )
+#else
+static void irq_handler_bottom_half ( void * input )
+#endif
+{
+ mali_osk_irq_object_t *irq_object;
+
+#if defined(INIT_DELAYED_WORK)
+ irq_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_irq_object_t, work_queue_irq_handle);
+#else
+ if ( NULL == input )
+ {
+ MALI_PRINT_ERROR(("IRQ: Null pointer! Illegal!"));
+ return; /* Error */
+ }
+ irq_object = (mali_osk_irq_object_t *) input;
+#endif
+
+ irq_object->bhandler(irq_object->data);
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_locks.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_locks.c
new file mode 100644
index 00000000000..11285efc682
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_locks.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* These are all the locks we implement: */
+typedef enum
+{
+ _MALI_OSK_INTERNAL_LOCKTYPE_SPIN, /* Mutex, implicitly non-interruptable, use spin_lock/spin_unlock */
+ _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ, /* Mutex, IRQ version of spinlock, use spin_lock_irqsave/spin_unlock_irqrestore */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX, /* Interruptable, use up()/down_interruptable() */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT, /* Non-Interruptable, use up()/down() */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW, /* Non-interruptable, Reader/Writer, use {up,down}{read,write}() */
+
+ /* Linux supports, but we do not support:
+ * Non-Interruptable Reader/Writer spinlock mutexes - RW optimization will be switched off
+ */
+
+ /* Linux does not support:
+ * One-locks, of any sort - no optimization for this fact will be made.
+ */
+
+} _mali_osk_internal_locktype;
+
+struct _mali_osk_lock_t_struct
+{
+ _mali_osk_internal_locktype type;
+ unsigned long flags;
+ union
+ {
+ spinlock_t spinlock;
+ struct semaphore sema;
+ struct rw_semaphore rw_sema;
+ } obj;
+ MALI_DEBUG_CODE(
+ /** original flags for debug checking */
+ _mali_osk_lock_flags_t orig_flags;
+ _mali_osk_lock_mode_t locked_as;
+ ); /* MALI_DEBUG_CODE */
+};
+
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order )
+{
+ _mali_osk_lock_t *lock = NULL;
+
+ /* Validate parameters: */
+ /* Flags acceptable */
+ MALI_DEBUG_ASSERT( 0 == ( flags & ~(_MALI_OSK_LOCKFLAG_SPINLOCK
+ | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE
+ | _MALI_OSK_LOCKFLAG_READERWRITER
+ | _MALI_OSK_LOCKFLAG_ORDERED
+ | _MALI_OSK_LOCKFLAG_ONELOCK )) );
+ /* Spinlocks are always non-interruptable */
+ MALI_DEBUG_ASSERT( (((flags & _MALI_OSK_LOCKFLAG_SPINLOCK) || (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ)) && (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE))
+ || !(flags & _MALI_OSK_LOCKFLAG_SPINLOCK));
+ /* Parameter initial SBZ - for future expansion */
+ MALI_DEBUG_ASSERT( 0 == initial );
+
+ lock = kmalloc(sizeof(_mali_osk_lock_t), GFP_KERNEL);
+
+ if ( NULL == lock )
+ {
+ return lock;
+ }
+
+ /* Determine type of mutex: */
+ /* defaults to interruptable mutex if no flags are specified */
+
+ if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK) )
+ {
+ /* Non-interruptable Spinlocks override all others */
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN;
+ spin_lock_init( &lock->obj.spinlock );
+ }
+ else if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ ) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ;
+ lock->flags = 0;
+ spin_lock_init( &lock->obj.spinlock );
+ }
+ else if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE)
+ && (flags & _MALI_OSK_LOCKFLAG_READERWRITER) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW;
+ init_rwsem( &lock->obj.rw_sema );
+ }
+ else
+ {
+ /* Usual mutex types */
+ if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT;
+ }
+ else
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX;
+ }
+
+ /* Initially unlocked */
+ sema_init( &lock->obj.sema, 1 );
+ }
+
+ MALI_DEBUG_CODE(
+ /* Debug tracking of flags */
+ lock->orig_flags = flags;
+ lock->locked_as = _MALI_OSK_LOCKMODE_UNDEF;
+ ); /* MALI_DEBUG_CODE */
+
+ return lock;
+}
+
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || _MALI_OSK_LOCKMODE_RO == mode );
+
+ /* Only allow RO locks when the initial object was a Reader/Writer lock
+ * Since information is lost on the internal locktype, we use the original
+ * information, which is only stored when built for DEBUG */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+ switch ( lock->type )
+ {
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+ spin_lock(&lock->obj.spinlock);
+ break;
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+ spin_lock_irqsave(&lock->obj.spinlock, lock->flags);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+ if ( down_interruptible(&lock->obj.sema) )
+ {
+ err = _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+ down(&lock->obj.sema);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+ if (mode == _MALI_OSK_LOCKMODE_RO)
+ {
+ down_read(&lock->obj.rw_sema);
+ }
+ else
+ {
+ down_write(&lock->obj.rw_sema);
+ }
+ break;
+
+ default:
+ /* Reaching here indicates a programming error, so you will not get here
+ * on non-DEBUG builds */
+ MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+ break;
+ }
+
+ /* DEBUG tracking of previously locked state - occurs after lock obtained */
+ MALI_DEBUG_CODE(
+ if ( _MALI_OSK_ERR_OK == err )
+ {
+ /* Assert that this is not currently locked */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_UNDEF == lock->locked_as );
+
+ lock->locked_as = mode;
+ }
+ ); /* MALI_DEBUG_CODE */
+
+ return err;
+}
+
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || _MALI_OSK_LOCKMODE_RO == mode );
+
+ /* Only allow RO locks when the initial object was a Reader/Writer lock
+ * Since information is lost on the internal locktype, we use the original
+ * information, which is only stored when built for DEBUG */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+ /* For DEBUG only, assert that we previously locked this, and in the same way (RW/RO) */
+ MALI_DEBUG_ASSERT( mode == lock->locked_as );
+
+ /* DEBUG tracking of previously locked state - occurs before lock released */
+ MALI_DEBUG_CODE( lock->locked_as = _MALI_OSK_LOCKMODE_UNDEF );
+
+ switch ( lock->type )
+ {
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+ spin_unlock(&lock->obj.spinlock);
+ break;
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+ spin_unlock_irqrestore(&lock->obj.spinlock, lock->flags);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+ /* FALLTHROUGH */
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+ up(&lock->obj.sema);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+ if (mode == _MALI_OSK_LOCKMODE_RO)
+ {
+ up_read(&lock->obj.rw_sema);
+ }
+ else
+ {
+ up_write(&lock->obj.rw_sema);
+ }
+ break;
+
+ default:
+ /* Reaching here indicates a programming error, so you will not get here
+ * on non-DEBUG builds */
+ MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+ break;
+ }
+}
+
+void _mali_osk_lock_term( _mali_osk_lock_t *lock )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ /* For DEBUG only, assert that this is not currently locked */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_UNDEF == lock->locked_as );
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c
new file mode 100644
index 00000000000..6454709caaf
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c
@@ -0,0 +1,578 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_low_level_mem.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#include "mali_osk.h"
+#include "mali_ukk.h" /* required to hook in _mali_ukk_mem_mmap handling */
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma);
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+
+typedef struct mali_vma_usage_tracker
+{
+ int references;
+ u32 cookie;
+} mali_vma_usage_tracker;
+
+
+/* Linked list structure to hold details of all OS allocations in a particular
+ * mapping
+ */
+struct AllocationList
+{
+ struct AllocationList *next;
+ u32 offset;
+ u32 physaddr;
+};
+
+typedef struct AllocationList AllocationList;
+
+/* Private structure to store details of a mapping region returned
+ * from _mali_osk_mem_mapregion_init
+ */
+struct MappingInfo
+{
+ struct vm_area_struct *vma;
+ struct AllocationList *list;
+};
+
+typedef struct MappingInfo MappingInfo;
+
+
+static u32 _kernel_page_allocate(void);
+static void _kernel_page_release(u32 physical_address);
+static AllocationList * _allocation_list_item_get(void);
+static void _allocation_list_item_release(AllocationList * item);
+
+
+/* Variable declarations */
+spinlock_t allocation_list_spinlock;
+static AllocationList * pre_allocated_memory = (AllocationList*) NULL ;
+static int pre_allocated_memory_size_current = 0;
+#ifdef MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB
+ static int pre_allocated_memory_size_max = MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 1024 * 1024;
+#else
+ static int pre_allocated_memory_size_max = 6 * 1024 * 1024; /* 6 MiB */
+#endif
+
+static struct vm_operations_struct mali_kernel_vm_ops =
+{
+ .open = mali_kernel_memory_vma_open,
+ .close = mali_kernel_memory_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .fault = mali_kernel_memory_cpu_page_fault_handler
+#else
+ .nopfn = mali_kernel_memory_cpu_page_fault_handler
+#endif
+};
+
+
+void mali_osk_low_level_mem_init(void)
+{
+ spin_lock_init( &allocation_list_spinlock );
+ pre_allocated_memory = (AllocationList*) NULL ;
+}
+
+void mali_osk_low_level_mem_term(void)
+{
+ while ( NULL != pre_allocated_memory )
+ {
+ AllocationList *item;
+ item = pre_allocated_memory;
+ pre_allocated_memory = item->next;
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free( item );
+ }
+ pre_allocated_memory_size_current = 0;
+}
+
+static u32 _kernel_page_allocate(void)
+{
+ struct page *new_page;
+ u32 linux_phys_addr;
+
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+
+ if ( NULL == new_page )
+ {
+ return 0;
+ }
+
+ /* Ensure page is flushed from CPU caches. */
+ linux_phys_addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ return linux_phys_addr;
+}
+
+static void _kernel_page_release(u32 physical_address)
+{
+ struct page *unmap_page;
+
+ #if 1
+ dma_unmap_page(NULL, physical_address, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ #endif
+
+ unmap_page = pfn_to_page( physical_address >> PAGE_SHIFT );
+ MALI_DEBUG_ASSERT_POINTER( unmap_page );
+ __free_page( unmap_page );
+}
+
+static AllocationList * _allocation_list_item_get(void)
+{
+ AllocationList *item = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&allocation_list_spinlock,flags);
+ if ( pre_allocated_memory )
+ {
+ item = pre_allocated_memory;
+ pre_allocated_memory = pre_allocated_memory->next;
+ pre_allocated_memory_size_current -= PAGE_SIZE;
+
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+ return item;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ item = _mali_osk_malloc( sizeof(AllocationList) );
+ if ( NULL == item)
+ {
+ return NULL;
+ }
+
+ item->physaddr = _kernel_page_allocate();
+ if ( 0 == item->physaddr )
+ {
+ /* Non-fatal error condition, out of memory. Upper levels will handle this. */
+ _mali_osk_free( item );
+ return NULL;
+ }
+ return item;
+}
+
+static void _allocation_list_item_release(AllocationList * item)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&allocation_list_spinlock,flags);
+ if ( pre_allocated_memory_size_current < pre_allocated_memory_size_max)
+ {
+ item->next = pre_allocated_memory;
+ pre_allocated_memory = item;
+ pre_allocated_memory_size_current += PAGE_SIZE;
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+ return;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free( item );
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ void __user * address;
+ address = vmf->virtual_address;
+#endif
+ /*
+ * We always fail the call since all memory is pre-faulted when assigned to the process.
+ * Only the Mali cores can use page faults to extend buffers.
+ */
+
+ MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
+ MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void*)address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ return VM_FAULT_SIGBUS;
+#else
+ return NOPFN_SIGBUS;
+#endif
+}
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma)
+{
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
+
+ vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+ vma_usage_tracker->references++;
+
+ return;
+}
+
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma)
+{
+ _mali_uk_mem_munmap_s args = {0, };
+ mali_memory_allocation * descriptor;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));
+
+ vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+
+ BUG_ON(!vma_usage_tracker);
+ BUG_ON(0 == vma_usage_tracker->references);
+
+ vma_usage_tracker->references--;
+
+ if (0 != vma_usage_tracker->references)
+ {
+ MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", vma_usage_tracker->references));
+ return;
+ }
+
+ /** @note args->context unused, initialized to 0.
+ * Instead, we use the memory_session from the cookie */
+
+ descriptor = (mali_memory_allocation *)vma_usage_tracker->cookie;
+
+ args.cookie = (u32)descriptor;
+ args.mapping = descriptor->mapping;
+ args.size = descriptor->size;
+
+ _mali_ukk_mem_munmap( &args );
+
+ /* vma_usage_tracker is free()d by _mali_osk_mem_mapregion_term().
+ * In the case of the memory engine, it is called as the release function that has been registered with the engine*/
+}
+
+
+void _mali_osk_mem_barrier( void )
+{
+ mb();
+}
+
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description )
+{
+ return (mali_io_address)ioremap_nocache(phys, size);
+}
+
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address virt )
+{
+ iounmap((void*)virt);
+}
+
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size )
+{
+ void * virt;
+ MALI_DEBUG_ASSERT_POINTER( phys );
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+ MALI_DEBUG_ASSERT( 0 != size );
+
+ /* dma_alloc_* uses a limited region of address space. On most arch/marchs
+ * 2 to 14 MiB is available. This should be enough for the page tables, which
+ * currently is the only user of this function. */
+ virt = dma_alloc_coherent(NULL, size, phys, GFP_KERNEL | GFP_DMA );
+
+ MALI_DEBUG_PRINT(3, ("Page table virt: 0x%x = dma_alloc_coherent(size:%d, phys:0x%x, )\n", virt, size, phys));
+
+ if ( NULL == virt )
+ {
+ MALI_DEBUG_PRINT(1, ("allocioregion: Failed to allocate Pagetable memory, size=0x%.8X\n", size ));
+ MALI_DEBUG_PRINT(1, ("Solution: When configuring and building linux kernel, set CONSISTENT_DMA_SIZE to be 14 MB.\n"));
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT( 0 == (*phys & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ return (mali_io_address)virt;
+}
+
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address virt )
+{
+ MALI_DEBUG_ASSERT_POINTER( (void*)virt );
+ MALI_DEBUG_ASSERT( 0 != size );
+ MALI_DEBUG_ASSERT( 0 == (phys & ( (1 << PAGE_SHIFT) - 1 )) );
+
+ dma_free_coherent(NULL, size, virt, phys);
+}
+
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description )
+{
+ return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
+}
+
+void inline _mali_osk_mem_unreqregion( u32 phys, u32 size )
+{
+ release_mem_region(phys, size);
+}
+
+u32 inline _mali_osk_mem_ioread32( volatile mali_io_address addr, u32 offset )
+{
+ return ioread32(((u8*)addr) + offset);
+}
+
+void inline _mali_osk_mem_iowrite32( volatile mali_io_address addr, u32 offset, u32 val )
+{
+ iowrite32(val, ((u8*)addr) + offset);
+}
+
+void _mali_osk_cache_flushall( void )
+{
+ /** @note Cached memory is not currently supported in this implementation */
+}
+
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size )
+{
+ wmb();
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor )
+{
+ struct vm_area_struct *vma;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ vma = (struct vm_area_struct*)descriptor->process_addr_mapping_info;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ /* Re-write the process_addr_mapping_info */
+ mappingInfo = _mali_osk_calloc( 1, sizeof(MappingInfo) );
+
+ if ( NULL == mappingInfo ) return _MALI_OSK_ERR_FAULT;
+
+ vma_usage_tracker = _mali_osk_calloc( 1, sizeof(mali_vma_usage_tracker) );
+
+ if (NULL == vma_usage_tracker)
+ {
+ MALI_DEBUG_PRINT(2, ("Failed to allocate memory to track memory usage\n"));
+ _mali_osk_free( mappingInfo );
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mappingInfo->vma = vma;
+ descriptor->process_addr_mapping_info = mappingInfo;
+
+ /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+ descriptor->mapping = (void __user*)vma->vm_start;
+ /* list member is already NULL */
+
+ /*
+ set some bits which indicate that:
+ The memory is IO memory, meaning that no paging is to be performed and the memory should not be included in crash dumps
+ The memory is reserved, meaning that it's present and can never be paged out (see also previous entry)
+ */
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTCOPY;
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
+
+ vma_usage_tracker->references = 1; /* set initial reference count to be 1 as vma_open won't be called for the first mmap call */
+ vma_usage_tracker->cookie = (u32)descriptor; /* cookie for munmap */
+
+ vma->vm_private_data = vma_usage_tracker;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor )
+{
+ struct vm_area_struct* vma;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ /* Linux does the right thing as part of munmap to remove the mapping
+ * All that remains is that we remove the vma_usage_tracker setup in init() */
+ vma = mappingInfo->vma;
+
+ MALI_DEBUG_ASSERT_POINTER( vma );
+
+ /* ASSERT that there are no allocations on the list. Unmap should've been
+ * called on all OS allocations. */
+ MALI_DEBUG_ASSERT( NULL == mappingInfo->list );
+
+ vma_usage_tracker = vma->vm_private_data;
+
+ /* We only get called if mem_mapregion_init succeeded */
+ _mali_osk_free(vma_usage_tracker);
+
+ _mali_osk_free( mappingInfo );
+ return;
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size )
+{
+ struct vm_area_struct *vma;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT_POINTER( phys_addr );
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK));
+
+ if (NULL == descriptor->mapping) return _MALI_OSK_ERR_INVALID_ARGS;
+
+ if (size > (descriptor->size - offset))
+ {
+ MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_map: virtual memory area not large enough to map physical 0x%x size %x into area 0x%x at offset 0x%xr\n",
+ *phys_addr, size, descriptor->mapping, offset));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ vma = mappingInfo->vma;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_PRINT(7, ("Process map: mapping 0x%08X to process address 0x%08lX length 0x%08X\n", *phys_addr, (long unsigned int)(descriptor->mapping + offset), size));
+
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == *phys_addr )
+ {
+ _mali_osk_errcode_t ret;
+ AllocationList *alloc_item;
+ u32 linux_phys_frame_num;
+
+ alloc_item = _allocation_list_item_get();
+
+ linux_phys_frame_num = alloc_item->physaddr >> PAGE_SHIFT;
+
+ ret = ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, linux_phys_frame_num, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+ if ( ret != _MALI_OSK_ERR_OK)
+ {
+ _allocation_list_item_release(alloc_item);
+ return ret;
+ }
+
+ /* Put our alloc_item into the list of allocations on success */
+ alloc_item->next = mappingInfo->list;
+ alloc_item->offset = offset;
+
+ /*alloc_item->physaddr = linux_phys_addr;*/
+ mappingInfo->list = alloc_item;
+
+ /* Write out new physical address on success */
+ *phys_addr = alloc_item->physaddr;
+
+ return ret;
+ }
+
+ /* Otherwise, Use the supplied physical address */
+
+ /* ASSERT that supplied phys_addr is page aligned */
+ MALI_DEBUG_ASSERT( 0 == ((*phys_addr) & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ return ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, *phys_addr >> PAGE_SHIFT, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+}
+
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags )
+{
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ if (NULL == descriptor->mapping) return;
+
+ if (size > (descriptor->size - offset))
+ {
+ MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_unmap: virtual memory area not large enough to unmap size %x from area 0x%x at offset 0x%x\n",
+ size, descriptor->mapping, offset));
+ return;
+ }
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ if ( 0 != (flags & _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR) )
+ {
+ /* This physical RAM was allocated in _mali_osk_mem_mapregion_map and
+ * so needs to be unmapped
+ */
+ while (size)
+ {
+ /* First find the allocation in the list of allocations */
+ AllocationList *alloc = mappingInfo->list;
+ AllocationList **prev = &(mappingInfo->list);
+ while (NULL != alloc && alloc->offset != offset)
+ {
+ prev = &(alloc->next);
+ alloc = alloc->next;
+ }
+ if (alloc == NULL) {
+ MALI_DEBUG_PRINT(1, ("Unmapping memory that isn't mapped\n"));
+ size -= _MALI_OSK_CPU_PAGE_SIZE;
+ offset += _MALI_OSK_CPU_PAGE_SIZE;
+ continue;
+ }
+
+ _kernel_page_release(alloc->physaddr);
+
+ /* Remove the allocation from the list */
+ *prev = alloc->next;
+ _mali_osk_free( alloc );
+
+ /* Move onto the next allocation */
+ size -= _MALI_OSK_CPU_PAGE_SIZE;
+ offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+ }
+
+ /* Linux does the right thing as part of munmap to remove the mapping */
+
+ return;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_mali.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_mali.c
new file mode 100644
index 00000000000..694d8eeaabc
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_mali.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Added support for overriding memory settings in arch_configuration using
+ * mali_mem module parameter.
+ *
+ * Author: Magnus Wendt <magnus.wendt@stericsson.com> for
+ * ST-Ericsson.
+ */
+
+/**
+ * @file mali_osk_mali.c
+ * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+
+#include "mali_kernel_common.h" /* MALI_xxx macros */
+#include "mali_osk.h" /* kernel side OS functions */
+#include "mali_uk_types.h"
+#include "mali_kernel_linux.h" /* exports initialize/terminate_kernel_device() definition of mali_osk_low_level_mem_init() and term */
+#include "arch/config.h" /* contains the configuration of the arch we are compiling for */
+
+extern char* mali_mem;
+
+/* is called from mali_kernel_constructor in common code */
+_mali_osk_errcode_t _mali_osk_init( void )
+{
+ if (0 != initialize_kernel_device()) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ mali_osk_low_level_mem_init();
+
+ MALI_SUCCESS;
+}
+
+/* is called from mali_kernel_deconstructor in common code */
+void _mali_osk_term( void )
+{
+ mali_osk_low_level_mem_term();
+ terminate_kernel_device();
+}
+
+_mali_osk_errcode_t _mali_osk_resources_init( _mali_osk_resource_t **arch_config, u32 *num_resources )
+{
+ *num_resources = sizeof(arch_configuration) / sizeof(arch_configuration[0]);
+ *arch_config = arch_configuration;
+
+ /* override the MEMORY resource if a value has been supplied from the command line */
+ if ('\0' != mali_mem[0]) {
+ char *p = mali_mem;
+ _mali_osk_resource_type_t mem_type = MEMORY;
+ unsigned long mem_base = 0;
+ unsigned long mem_size = memparse(p, &p);
+ if (*p == '@') {
+ if ((*(p + 1) == 'O') || (*(p + 1) == 'o')) { /* as in OS. e.g. mali_mem=64M@OS_MEMORY */
+ mem_type = OS_MEMORY;
+ mem_base = 0;
+ } else { /* parse the base address */
+ mem_type = MEMORY;
+ mem_base = memparse(p + 1, &p);
+ }
+ }
+
+ /* change the first memory entry in the architecture config. */
+ if (0 < mem_size) {
+ int i;
+ for (i = 0; i < *num_resources; ++i) {
+ if ((MEMORY == arch_configuration[i].type) ||
+ (OS_MEMORY == arch_configuration[i].type)) {
+ MALI_DEBUG_PRINT( 1, ("Overriding arch resource[%d] :\n",i));
+ MALI_DEBUG_PRINT( 1, ("Type: %s, base: %x, size %x\n",
+ (OS_MEMORY==mem_type?"OS_MEMORY":"MEMORY"),mem_base,mem_size));
+ arch_configuration[i].type = mem_type;
+ arch_configuration[i].base = mem_base;
+ arch_configuration[i].size = mem_size;
+ break;
+ }
+ }
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_resources_term( _mali_osk_resource_t **arch_config, u32 num_resources )
+{
+ /* Nothing to do */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_math.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_math.c
new file mode 100644
index 00000000000..2f7fc15847b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_math.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_math.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/bitops.h>
+
+u32 inline _mali_osk_clz( u32 input )
+{
+ return 32-fls(input);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_memory.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_memory.c
new file mode 100644
index 00000000000..bf6679f7bac
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_memory.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/slab.h>
+
+void inline *_mali_osk_calloc( u32 n, u32 size )
+{
+ return kcalloc(n, size, GFP_KERNEL);
+}
+
+void inline *_mali_osk_malloc( u32 size )
+{
+ return kmalloc(size, GFP_KERNEL);
+}
+
+void inline _mali_osk_free( void *ptr )
+{
+ kfree(ptr);
+}
+
+void inline *_mali_osk_memcpy( void *dst, const void *src, u32 len )
+{
+ return memcpy(dst, src, len);
+}
+
+void inline *_mali_osk_memset( void *s, u32 c, u32 n )
+{
+ return memset(s, c, n);
+}
+
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated )
+{
+ /* No need to prevent an out-of-memory dialogue appearing on Linux,
+ * so we always return MALI_TRUE.
+ */
+ return MALI_TRUE;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_misc.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_misc.c
new file mode 100644
index 00000000000..3afd2da736c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_misc.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_misc.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+
+void _mali_osk_dbgmsg( const char *fmt, ... )
+{
+ va_list args;
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+}
+
+void _mali_osk_abort(void)
+{
+ /* make a simple fault by dereferencing a NULL pointer */
+ *(int *)0 = 0;
+}
+
+void _mali_osk_break(void)
+{
+ _mali_osk_abort();
+}
+
+u32 _mali_osk_get_pid(void)
+{
+ /* Thread group ID is the process ID on Linux */
+ return (u32)current->tgid;
+}
+
+u32 _mali_osk_get_tid(void)
+{
+ /* pid is actually identifying the thread on Linux */
+ return (u32)current->pid;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_notification.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_notification.c
new file mode 100644
index 00000000000..2efc140f60a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_notification.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_notification.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+/**
+ * Declaration of the notification queue object type
+ * Contains a linked list of notification pending delivery to user space.
+ * It also contains a wait queue of exclusive waiters blocked in the ioctl
+ * When a new notification is posted a single thread is resumed.
+ */
+struct _mali_osk_notification_queue_t_struct
+{
+ struct semaphore mutex; /**< Mutex protecting the list */
+ wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */
+ struct list_head head; /**< List of notifications waiting to be picked up */
+};
+
+typedef struct _mali_osk_notification_wrapper_t_struct
+{
+ struct list_head list; /**< Internal linked list variable */
+ _mali_osk_notification_t data; /**< Notification data */
+} _mali_osk_notification_wrapper_t;
+
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void )
+{
+ _mali_osk_notification_queue_t * result;
+
+ result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
+ if (NULL == result) return NULL;
+
+ sema_init(&result->mutex, 1);
+ init_waitqueue_head(&result->receive_queue);
+ INIT_LIST_HEAD(&result->head);
+
+ return result;
+}
+
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size )
+{
+ /* OPT Recycling of notification objects */
+ _mali_osk_notification_wrapper_t *notification;
+
+ notification = (_mali_osk_notification_wrapper_t *)kmalloc( sizeof(_mali_osk_notification_wrapper_t) + size, GFP_KERNEL );
+ if (NULL == notification)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
+ return NULL;
+ }
+
+ /* Init the list */
+ INIT_LIST_HEAD(&notification->list);
+
+ if (0 != size)
+ {
+ notification->data.result_buffer = ((u8*)notification) + sizeof(_mali_osk_notification_wrapper_t);
+ }
+ else
+ {
+ notification->data.result_buffer = NULL;
+ }
+
+ /* set up the non-allocating fields */
+ notification->data.notification_type = type;
+ notification->data.result_buffer_size = size;
+
+ /* all ok */
+ return &(notification->data);
+}
+
+void _mali_osk_notification_delete( _mali_osk_notification_t *object )
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER( object );
+
+ notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+ /* Remove from the list */
+ list_del(&notification->list);
+ /* Free the container */
+ kfree(notification);
+}
+
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue )
+{
+ MALI_DEBUG_ASSERT_POINTER( queue );
+
+ /* not much to do, just free the memory */
+ kfree(queue);
+}
+
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object )
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_ASSERT_POINTER( object );
+
+ notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+ /* lock queue access */
+ down(&queue->mutex);
+ /* add to list */
+ list_add_tail(&notification->list, &queue->head);
+ /* unlock the queue */
+ up(&queue->mutex);
+
+ /* and wake up one possible exclusive waiter */
+ wake_up(&queue->receive_queue);
+}
+
+static int _mali_notification_queue_is_empty( _mali_osk_notification_queue_t *queue )
+{
+ int ret;
+
+ down(&queue->mutex);
+ ret = list_empty(&queue->head);
+ up(&queue->mutex);
+ return ret;
+}
+
+#if MALI_STATE_TRACKING
+mali_bool _mali_osk_notification_queue_is_empty( _mali_osk_notification_queue_t *queue )
+{
+ return _mali_notification_queue_is_empty(queue) ? MALI_TRUE : MALI_FALSE;
+}
+#endif
+
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ _mali_osk_notification_wrapper_t *wrapper_object;
+
+ down(&queue->mutex);
+
+ if (!list_empty(&queue->head))
+ {
+ wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list);
+ *result = &(wrapper_object->data);
+ list_del_init(&wrapper_object->list);
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+ up(&queue->mutex);
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_ASSERT_POINTER( result );
+
+ /* default result */
+ *result = NULL;
+
+ while (_MALI_OSK_ERR_OK != _mali_osk_notification_queue_dequeue(queue, result))
+ {
+ if (wait_event_interruptible(queue->receive_queue, !_mali_notification_queue_is_empty(queue)))
+ {
+ return _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_pm.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_pm.c
new file mode 100644
index 00000000000..04fcd4572df
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_pm.c
@@ -0,0 +1,195 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_pm.c
+ * Implementation of the callback functions from common power management
+ */
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+
+#include <linux/platform_device.h>
+
+#include "mali_platform.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_pmm.h"
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_pm.h"
+#include "mali_device_pause_resume.h"
+#include "mali_linux_pm.h"
+#include "mali_linux_pm_testsuite.h"
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifdef CONFIG_PM_RUNTIME
+static int is_runtime =0;
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+
+#if MALI_POWER_MGMT_TEST_SUITE
+
+#ifdef CONFIG_PM
+unsigned int mali_pmm_events_triggered_mask = 0;
+#endif /* CONFIG_PM */
+
+void _mali_osk_pmm_policy_events_notifications(mali_pmm_event_id mali_pmm_event)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+
+ switch (mali_pmm_event)
+ {
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ if (mali_job_scheduling_events_recording_on == 1)
+ {
+ mali_pmm_events_triggered_mask |= (1<<0);
+ }
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ if (mali_job_scheduling_events_recording_on == 1)
+ {
+ mali_pmm_events_triggered_mask |= (1<<1);
+ }
+ break;
+
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ if (mali_job_scheduling_events_recording_on == 1)
+ {
+ mali_pmm_events_triggered_mask |= (1<<2);
+ mali_job_scheduling_events_recording_on = 0;
+ pwr_mgmt_status_reg = mali_pmm_events_triggered_mask;
+ }
+ break;
+
+ case MALI_PMM_EVENT_TIMEOUT:
+ if (mali_timeout_event_recording_on == 1)
+ {
+ pwr_mgmt_status_reg = (1<<3);
+ mali_timeout_event_recording_on = 0;
+ }
+ break;
+
+ default:
+
+ break;
+
+ }
+#endif /* CONFIG_PM */
+
+#endif /* MALI_LICENSE_IS_GPL */
+}
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+/** This function is called when the Mali device has completed power up
+ * operation.
+ */
+void _mali_osk_pmm_power_up_done(mali_pmm_message_data data)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ is_wake_up_needed = 1;
+ wake_up_process(pm_thread);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI OSK Power up Done\n" ));
+ return;
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+/** This function is called when the Mali device has completed power down
+ * operation.
+ */
+void _mali_osk_pmm_power_down_done(mali_pmm_message_data data)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ is_wake_up_needed = 1;
+#if MALI_POWER_MGMT_TEST_SUITE
+ if (is_mali_pmu_present == 0)
+ {
+ pwr_mgmt_status_reg = _mali_pmm_cores_list();
+ }
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ wake_up_process(pm_thread);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI Power down Done\n" ));
+ return;
+
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+/** This function is invoked when mali device is idle.
+*/
+_mali_osk_errcode_t _mali_osk_pmm_dev_idle(void)
+{
+ _mali_osk_errcode_t err = 0;
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+
+ err = pm_runtime_put_sync(&(mali_gpu_device.dev));
+ if(err)
+ {
+ MALI_DEBUG_PRINT(4, ("OSPMM: Error in _mali_osk_pmm_dev_idle\n" ));
+ }
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_LICENSE_IS_GPL */
+ return err;
+}
+
+/** This funtion is invoked when mali device needs to be activated.
+*/
+void _mali_osk_pmm_dev_activate(void)
+{
+
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ int err = 0;
+ if(is_runtime == 0)
+ {
+ pm_suspend_ignore_children(&(mali_gpu_device.dev), true);
+ pm_runtime_enable(&(mali_gpu_device.dev));
+ pm_runtime_get_sync(&(mali_gpu_device.dev));
+ is_runtime = 1;
+ }
+ else
+ {
+ err = pm_runtime_get_sync(&(mali_gpu_device.dev));
+ }
+ if(err)
+ {
+ MALI_DEBUG_PRINT(4, ("OSPMM: Error in _mali_osk_pmm_dev_activate\n" ));
+ }
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+void _mali_osk_pmm_dvfs_operation_done(mali_pmm_message_data data)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ is_wake_up_needed = 1;
+ wake_up_process(dvfs_pm_thread);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI OSK DVFS Operation done\n" ));
+ return;
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_specific.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_specific.h
new file mode 100644
index 00000000000..54acfdd138c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_specific.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_SPECIFIC_H__
+#define __MALI_OSK_SPECIFIC_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_STATIC_INLINE static inline
+#define MALI_NON_STATIC_INLINE inline
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_SPECIFIC_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_time.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_time.c
new file mode 100644
index 00000000000..aa7962380e6
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_time.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_time.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/delay.h>
+
+int _mali_osk_time_after( u32 ticka, u32 tickb )
+{
+ return time_after((unsigned long)ticka, (unsigned long)tickb);
+}
+
+u32 _mali_osk_time_mstoticks( u32 ms )
+{
+ return msecs_to_jiffies(ms);
+}
+
+u32 _mali_osk_time_tickstoms( u32 ticks )
+{
+ return jiffies_to_msecs(ticks);
+}
+
+u32 _mali_osk_time_tickcount( void )
+{
+ return jiffies;
+}
+
+void _mali_osk_time_ubusydelay( u32 usecs )
+{
+ udelay(usecs);
+}
+
+u64 _mali_osk_time_get_ns( void )
+{
+ struct timespec tsval;
+ getnstimeofday(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_timers.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_timers.c
new file mode 100644
index 00000000000..3dfba76bcb3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_timers.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_timers.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_timer_t_struct
+{
+ struct timer_list timer;
+};
+
+typedef void (*timer_timeout_function_t)(unsigned long);
+
+_mali_osk_timer_t *_mali_osk_timer_init(void)
+{
+ _mali_osk_timer_t *t = (_mali_osk_timer_t*)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+ if (NULL != t) init_timer(&t->timer);
+ return t;
+}
+
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.expires = _mali_osk_time_tickcount() + ticks_to_expire;
+ add_timer(&(tim->timer));
+}
+
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 expiry_tick)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ mod_timer(&(tim->timer), expiry_tick);
+}
+
+void _mali_osk_timer_del( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer_sync(&(tim->timer));
+}
+
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.data = (unsigned long)data;
+ tim->timer.function = (timer_timeout_function_t)callback;
+}
+
+void _mali_osk_timer_term( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ kfree(tim);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_core.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_core.c
new file mode 100644
index 00000000000..0f745492613
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_core.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <linux/slab.h> /* memort allocation functions */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs)
+{
+ _mali_uk_get_api_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_api_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+ return 0;
+}
+
+int get_system_info_size_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_size_s __user *uargs)
+{
+ _mali_uk_get_system_info_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_system_info_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int get_system_info_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_s __user *uargs)
+{
+ _mali_uk_get_system_info_s kargs;
+ _mali_osk_errcode_t err;
+ _mali_system_info *system_info_user;
+ _mali_system_info *system_info_kernel;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.system_info, &uargs->system_info)) return -EFAULT;
+ if (0 != get_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ /* A temporary kernel buffer for the system_info datastructure is passed through the system_info
+ * member. The ukk_private member will point to the user space destination of this buffer so
+ * that _mali_ukk_get_system_info() can correct the pointers in the system_info correctly
+ * for user space.
+ */
+ system_info_kernel = kmalloc(kargs.size, GFP_KERNEL);
+ if (NULL == system_info_kernel) return -EFAULT;
+
+ system_info_user = kargs.system_info;
+ kargs.system_info = system_info_kernel;
+ kargs.ukk_private = (u32)system_info_user;
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_get_system_info(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ kfree(system_info_kernel);
+ return map_errcode(err);
+ }
+
+ if (0 != copy_to_user(system_info_user, system_info_kernel, kargs.size))
+ {
+ kfree(system_info_kernel);
+ return -EFAULT;
+ }
+
+ kfree(system_info_kernel);
+ return 0;
+}
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
+{
+ _mali_uk_wait_for_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_wait_for_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if(_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type)
+ {
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
+ }
+ else
+ {
+ if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
+ }
+
+ return 0;
+}
+
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs)
+{
+ _mali_uk_post_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ if (0 != get_user(kargs.type, &uargs->type))
+ {
+ return -EFAULT;
+ }
+
+ err = _mali_ukk_post_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_gp.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_gp.c
new file mode 100644
index 00000000000..a6f355fd459
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_gp.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs)
+{
+ _mali_uk_gp_start_job_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (!access_ok(VERIFY_WRITE, uargs, sizeof(_mali_uk_gp_start_job_s)))
+ {
+ return -EFAULT;
+ }
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_start_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_gp_start_job(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_gp_start_job_s)))
+ {
+ /*
+ * If this happens, then user space will not know that the job was actually started,
+ * and if we return a queued job, then user space will still think that one is still queued.
+ * This will typically lead to a deadlock in user space.
+ * This could however only happen if user space deliberately passes a user buffer which
+ * passes the access_ok(VERIFY_WRITE) check, but isn't fully writable at the time of copy_to_user().
+ * The official Mali driver will never attempt to do that, and kernel space should not be affected.
+ * That is why we do not bother to do a complex rollback in this very very very rare case.
+ */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int gp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_abort_job_s __user *uargs)
+{
+ _mali_uk_gp_abort_job_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_abort_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ _mali_ukk_gp_abort_job(&kargs);
+
+ return 0;
+}
+
+
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs)
+{
+ _mali_uk_get_gp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs)
+{
+ _mali_uk_gp_suspend_response_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_gp_suspend_response(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT;
+
+ /* no known transactions to roll-back */
+ return 0;
+}
+
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_gp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_mem.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_mem.c
new file mode 100644
index 00000000000..3f67a1e3328
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_mem.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs)
+{
+ _mali_uk_init_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_init_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.mali_address_base, &uargs->mali_address_base)) goto mem_init_rollback;
+ if (0 != put_user(kargs.memory_size, &uargs->memory_size)) goto mem_init_rollback;
+
+ return 0;
+
+mem_init_rollback:
+ {
+ _mali_uk_term_mem_s kargs;
+ kargs.ctx = session_data;
+ err = _mali_ukk_term_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_init_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+}
+
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs)
+{
+ _mali_uk_term_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_term_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument)
+{
+ _mali_uk_map_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_map_external_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_map_external_mem( &uk_args );
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie))
+ {
+ if (_MALI_OSK_ERR_OK == err_code)
+ {
+ /* Rollback */
+ _mali_uk_unmap_external_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_unmap_external_mem( &uk_args_unmap );
+ if (_MALI_OSK_ERR_OK != err_code)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_unmap_external_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument)
+{
+ _mali_uk_unmap_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_unmap_external_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_unmap_external_mem( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument)
+{
+ _mali_uk_release_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_release_ump_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_release_ump_mem( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument)
+{
+ _mali_uk_attach_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_attach_ump_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_attach_ump_mem( &uk_args );
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie))
+ {
+ if (_MALI_OSK_ERR_OK == err_code)
+ {
+ /* Rollback */
+ _mali_uk_release_ump_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_release_ump_mem( &uk_args_unmap );
+ if (_MALI_OSK_ERR_OK != err_code)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_attach_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_map_external_ump_mem produced */
+ return map_errcode(err_code);
+}
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs)
+{
+ _mali_uk_query_mmu_page_table_dump_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs)
+{
+ _mali_uk_dump_mmu_page_table_s kargs;
+ _mali_osk_errcode_t err;
+ void *buffer;
+ int rc = -EFAULT;
+
+ /* validate input */
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ /* the session_data pointer was validated by caller */
+
+ kargs.buffer = NULL;
+
+ /* get location of user buffer */
+ if (0 != get_user(buffer, &uargs->buffer)) goto err_exit;
+ /* get size of mmu page table info buffer from user space */
+ if ( 0 != get_user(kargs.size, &uargs->size) ) goto err_exit;
+ /* verify we can access the whole of the user buffer */
+ if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit;
+
+ /* allocate temporary buffer (kernel side) to store mmu page table info */
+ kargs.buffer = _mali_osk_malloc(kargs.size);
+ if (NULL == kargs.buffer)
+ {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_dump_mmu_page_table(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ rc = map_errcode(err);
+ goto err_exit;
+ }
+
+ /* copy mmu page table info back to user space and update pointers */
+ if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size) ) goto err_exit;
+ if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit;
+ if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit;
+ if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit;
+ if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit;
+ rc = 0;
+
+err_exit:
+ if (kargs.buffer) _mali_osk_free(kargs.buffer);
+ return rc;
+}
+
+
+
+int mem_get_big_block_wrapper( struct file * filp, _mali_uk_get_big_block_s __user * argument )
+{
+ _mali_uk_get_big_block_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_get_big_block_s)) )
+ {
+ return -EFAULT;
+ }
+
+ /* This interface inserts something into the ukk_private word */
+ uk_args.ukk_private = (u32)filp;
+ uk_args.ctx = filp->private_data;
+ err_code = _mali_ukk_get_big_block( &uk_args );
+
+ /* Do not leak the private word back into user space */
+ uk_args.ukk_private = 0;
+
+ if ( _MALI_OSK_ERR_OK != err_code )
+ {
+ return map_errcode(err_code);
+ }
+
+ /* From this point on, we must roll-back any failing action to preserve the
+ * meaning of the U/K interface (e.g. when excluded) */
+
+ /* transfer response back to user space */
+ if ( 0 != copy_to_user(argument, &uk_args, sizeof(_mali_uk_get_big_block_s)) )
+ {
+ /* Roll-back - the _mali_uk_get_big_block call succeeded, so all
+ * values in uk_args will be correct */
+ _mali_uk_free_big_block_s uk_args_rollback = {0, };
+
+ uk_args_rollback.ctx = uk_args.ctx;
+ uk_args_rollback.cookie = uk_args.cookie;
+ err_code = _mali_ukk_free_big_block( &uk_args_rollback );
+
+ if ( _MALI_OSK_ERR_OK != err_code )
+ {
+ /* error in DEBUG and RELEASE */
+ MALI_PRINT_ERROR( ("Failed to rollback get_big_block: %.8X\n", (u32)err_code) );
+ }
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mem_free_big_block_wrapper(struct mali_session_data *session_data, _mali_uk_free_big_block_s __user * argument)
+{
+ _mali_uk_free_big_block_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL );
+
+ /* get call arguments from user space. get_user returns 0 on success */
+ if ( 0 != get_user(uk_args.cookie, &argument->cookie) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_free_big_block( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_pp.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_pp.c
new file mode 100644
index 00000000000..f77a177d865
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_pp.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs)
+{
+ _mali_uk_pp_start_job_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (!access_ok(VERIFY_WRITE, uargs, sizeof(_mali_uk_pp_start_job_s)))
+ {
+ return -EFAULT;
+ }
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_start_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_pp_start_job(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.returned_user_job_ptr, &uargs->returned_user_job_ptr) ||
+ 0 != put_user(kargs.status, &uargs->status))
+ {
+ /*
+ * If this happens, then user space will not know that the job was actually started,
+ * and if we return a queued job, then user space will still think that one is still queued.
+ * This will typically lead to a deadlock in user space.
+ * This could however only happen if user space deliberately passes a user buffer which
+ * passes the access_ok(VERIFY_WRITE) check, but isn't fully writable at the time of copy_to_user().
+ * The official Mali driver will never attempt to do that, and kernel space should not be affected.
+ * That is why we do not bother to do a complex rollback in this very very very rare case.
+ */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int pp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_abort_job_s __user *uargs)
+{
+ _mali_uk_pp_abort_job_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_abort_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ _mali_ukk_pp_abort_job(&kargs);
+
+ return 0;
+}
+
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_pp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_pp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
+
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs)
+{
+ _mali_uk_get_pp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_pp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_profiling.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_profiling.c
new file mode 100644
index 00000000000..636bd03580e
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_profiling.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs)
+{
+ _mali_uk_profiling_start_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_start_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_start(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.limit, &uargs->limit))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+{
+ _mali_uk_profiling_add_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_add_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs)
+{
+ _mali_uk_profiling_stop_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_stop(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.count, &uargs->count))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs)
+{
+ _mali_uk_profiling_get_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.index, &uargs->index))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_profiling_get_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_get_event_s)))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs)
+{
+ _mali_uk_profiling_clear_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_clear(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_vsync.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_vsync.c
new file mode 100644
index 00000000000..965ee411535
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_vsync.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs)
+{
+ _mali_uk_vsync_event_report_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_vsync_event_report(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_wrappers.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_wrappers.h
new file mode 100644
index 00000000000..54e3f656b37
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_wrappers.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk_wrappers.h
+ * Defines the wrapper functions for each user-kernel function
+ */
+
+#ifndef __MALI_UKK_WRAPPERS_H__
+#define __MALI_UKK_WRAPPERS_H__
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+int get_system_info_size_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_size_s __user *uargs);
+int get_system_info_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_s __user *uargs);
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs);
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs);
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument);
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs);
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument);
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument);
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+int mem_get_big_block_wrapper( struct file * filp, _mali_uk_get_big_block_s __user * argument );
+int mem_free_big_block_wrapper( struct mali_session_data *session_data, _mali_uk_free_big_block_s __user * argument);
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
+int pp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_abort_job_s __user *uargs);
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs);
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs);
+int gp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_abort_job_s __user *uargs);
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs);
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs);
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs);
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs);
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs);
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
+
+int map_errcode( _mali_osk_errcode_t err );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_WRAPPERS_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/default/mali_platform.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/default/mali_platform.c
new file mode 100644
index 00000000000..36301a93a2c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/default/mali_platform.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for a default platform
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+ MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+u32 pmu_get_power_up_down_info(void)
+{
+ return 4095;
+
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali-runtimepm/mali_platform.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali-runtimepm/mali_platform.c
new file mode 100644
index 00000000000..b8c47094177
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali-runtimepm/mali_platform.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for a default platform
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+#include "mali_pmm.h"
+
+static int is_run_time = 0;
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+ if(is_run_time == 1)
+ {
+ _mali_osk_pmm_dev_idle();
+ is_run_time =0;
+ }
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+ if(is_run_time == 0)
+ {
+ _mali_osk_pmm_dev_activate();
+ is_run_time = 1;
+ }
+ MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+u32 pmu_get_power_up_down_info(void)
+{
+ return 4095;
+
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali400-pmu/mali_platform.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali400-pmu/mali_platform.c
new file mode 100644
index 00000000000..cf95b8ae09e
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali400-pmu/mali_platform.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+#if USING_MALI_PMM
+
+#include "mali_pmm.h"
+
+/* Internal test on/off */
+#define PMU_TEST 0
+
+/** @brief PMU hardware info
+ */
+typedef struct platform_pmu
+{
+ u32 reg_base_addr; /**< PMU registers base address */
+ u32 reg_size; /**< PMU registers size */
+ const char *name; /**< PMU name */
+ u32 irq_num; /**< PMU irq number */
+
+ mali_io_address reg_mapped; /**< IO-mapped pointer to registers */
+} platform_pmu_t;
+
+static platform_pmu_t *pmu_info = NULL;
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
+ PMU_REG_ADDR_MGMT_INT_STAT = 0x14, /*< Interrupt status register */
+ PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
+ PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Software delay register */
+ PMU_REG_ADDR_MGMT_MASTER_PWR_UP = 0x24, /*< Master power up register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+/* Internal functions */
+u32 pmu_reg_read(platform_pmu_t *pmu, u32 relative_address);
+void pmu_reg_write(platform_pmu_t *pmu, u32 relative_address, u32 new_val);
+mali_pmm_core_mask pmu_translate_cores_to_pmu(mali_pmm_core_mask cores);
+#if PMU_TEST
+void pmm_pmu_dump_regs( platform_pmu_t *pmu );
+void pmm_pmu_test( platform_pmu_t *pmu, u32 cores );
+#endif
+
+#endif /* USING_MALI_PMM */
+
+
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource)
+{
+#if USING_MALI_PMM
+ if( resource == NULL )
+ {
+ /* Nothing to set up for the system */
+ }
+ else if( resource->type == PMU )
+ {
+ if( (resource->base == 0) ||
+ (resource->description == NULL) )
+ {
+ /* NOTE: We currently don't care about any other resource settings */
+ MALI_PRINT_ERROR(("PLATFORM mali400-pmu: Missing PMU set up information\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_DEBUG_ASSERT( pmu_info == NULL );
+ pmu_info = (platform_pmu_t *)_mali_osk_malloc(sizeof(*pmu_info));
+ MALI_CHECK_NON_NULL( pmu_info, _MALI_OSK_ERR_NOMEM );
+
+ /* All values get 0 as default */
+ _mali_osk_memset(pmu_info, 0, sizeof(*pmu_info));
+
+ pmu_info->reg_base_addr = resource->base;
+ pmu_info->reg_size = (u32)PMU_REGISTER_ADDRESS_SPACE_SIZE;
+ pmu_info->name = resource->description;
+ pmu_info->irq_num = resource->irq;
+
+ if( _MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(pmu_info->reg_base_addr, pmu_info->reg_size, pmu_info->name) )
+ {
+ MALI_PRINT_ERROR(("PLATFORM mali400-pmu: Could not request register region (0x%08X - 0x%08X) for %s\n",
+ pmu_info->reg_base_addr, pmu_info->reg_base_addr + pmu_info->reg_size - 1, pmu_info->name));
+ goto cleanup;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Success: request_mem_region: (0x%08X - 0x%08X) for %s\n",
+ pmu_info->reg_base_addr, pmu_info->reg_base_addr + pmu_info->reg_size - 1, pmu_info->name));
+ }
+
+ pmu_info->reg_mapped = _mali_osk_mem_mapioregion( pmu_info->reg_base_addr, pmu_info->reg_size, pmu_info->name );
+
+ if( 0 == pmu_info->reg_mapped )
+ {
+ MALI_PRINT_ERROR(("PLATFORM mali400-pmu: Could not ioremap registers for %s .\n", pmu_info->name));
+ _mali_osk_mem_unreqregion( pmu_info->reg_base_addr, pmu_info->reg_size );
+ goto cleanup;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Success: ioremap_nocache: Internal ptr: (0x%08X - 0x%08X) for %s\n",
+ (u32) pmu_info->reg_mapped,
+ ((u32)pmu_info->reg_mapped)+ pmu_info->reg_size - 1,
+ pmu_info->name));
+ }
+
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Success: Mapping registers to %s\n", pmu_info->name));
+
+#if PMU_TEST
+ pmu_test(pmu_info, (MALI_PMM_CORE_GP));
+ pmu_test(pmu_info, (MALI_PMM_CORE_GP|MALI_PMM_CORE_L2|MALI_PMM_CORE_PP0));
+#endif
+
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Initialized - %s\n", pmu_info->name) );
+ }
+ else
+ {
+ /* Didn't expect a different resource */
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_SUCCESS;
+
+cleanup:
+ _mali_osk_free(pmu_info);
+ pmu_info = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+#else
+ /* Nothing to do when not using PMM - as mali already on */
+ MALI_SUCCESS;
+#endif
+
+}
+
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type)
+{
+#if USING_MALI_PMM
+ if( type == NULL )
+ {
+ /* Nothing to tear down for the system */
+ }
+ else if (*type == PMU)
+ {
+ if( pmu_info )
+ {
+ _mali_osk_mem_unmapioregion(pmu_info->reg_base_addr, pmu_info->reg_size, pmu_info->reg_mapped);
+ _mali_osk_mem_unreqregion(pmu_info->reg_base_addr, pmu_info->reg_size);
+ _mali_osk_free(pmu_info);
+ pmu_info = NULL;
+
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Terminated PMU\n") );
+ }
+ }
+ else
+ {
+ /* Didn't expect a different resource */
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_SUCCESS;
+
+#else
+ /* Nothing to do when not using PMM */
+ MALI_SUCCESS;
+#endif
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+#if USING_MALI_PMM
+ u32 stat;
+ u32 timeout;
+ u32 cores_pmu;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu_info);
+ MALI_DEBUG_ASSERT( cores != 0 ); /* Shouldn't receive zero from PMM */
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: power down (0x%x)\n", cores) );
+
+ cores_pmu = pmu_translate_cores_to_pmu(cores);
+ pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_POWER_DOWN, cores_pmu );
+
+ /* Wait for cores to be powered down */
+ timeout = 10; /* 10ms */
+ do
+ {
+ /* Get status of sleeping cores */
+ stat = pmu_reg_read( pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ stat &= cores_pmu;
+ if( stat == cores_pmu ) break; /* All cores we wanted are now asleep */
+ _mali_osk_time_ubusydelay(1000); /* 1ms */
+ timeout--;
+ } while( timeout > 0 );
+
+ if( timeout == 0 ) MALI_ERROR(_MALI_OSK_ERR_TIMEOUT);
+
+ MALI_SUCCESS;
+
+#else
+ /* Nothing to do when not using PMM */
+ MALI_SUCCESS;
+#endif
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+#if USING_MALI_PMM
+ u32 cores_pmu;
+ u32 stat;
+ u32 timeout;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu_info);
+ MALI_DEBUG_ASSERT( cores != 0 ); /* Shouldn't receive zero from PMM */
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: power up (0x%x)\n", cores) );
+
+ /* Don't use interrupts - just poll status */
+ pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_INT_MASK, 0 );
+ cores_pmu = pmu_translate_cores_to_pmu(cores);
+ pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_POWER_UP, cores_pmu );
+
+ timeout = 10; /* 10ms */
+ do
+ {
+ /* Get status of sleeping cores */
+ stat = pmu_reg_read( pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ stat &= cores_pmu;
+ if( stat == 0 ) break; /* All cores we wanted are now awake */
+ _mali_osk_time_ubusydelay(1000); /* 1ms */
+ timeout--;
+ } while( timeout > 0 );
+
+ if( timeout == 0 ) MALI_ERROR(_MALI_OSK_ERR_TIMEOUT);
+
+ MALI_SUCCESS;
+
+#else
+ /* Nothing to do when not using PMM */
+ MALI_SUCCESS;
+#endif
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+}
+
+#if USING_MALI_PMM
+
+/***** INTERNAL *****/
+
+/** @brief Internal PMU function to translate the cores bit mask
+ * into something the hardware PMU understands
+ *
+ * @param cores PMM cores bitmask
+ * @return PMU hardware cores bitmask
+ */
+u32 pmu_translate_cores_to_pmu(mali_pmm_core_mask cores)
+{
+ /* For Mali 400 PMU the cores mask is already the same as what
+ * the hardware PMU expects.
+ * For other hardware, some translation can be done here, by
+ * translating the MALI_PMM_CORE_* bits into specific hardware
+ * bits
+ */
+ return cores;
+}
+
+/** @brief Internal PMU function to read a PMU register
+ *
+ * @param pmu handle that identifies the PMU hardware
+ * @param relative_address relative PMU hardware address to read from
+ * @return 32-bit value that was read from the address
+ */
+u32 pmu_reg_read(platform_pmu_t *pmu, u32 relative_address)
+{
+ u32 read_val;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+ MALI_DEBUG_ASSERT(relative_address < pmu->reg_size);
+
+ read_val = _mali_osk_mem_ioread32(pmu->reg_mapped, relative_address);
+
+ MALI_DEBUG_PRINT( 5, ("PMU: reg_read: %s Addr:0x%04X Val:0x%08x\n",
+ pmu->name, relative_address, read_val));
+
+ return read_val;
+}
+
+/** @brief Internal PMU function to write to a PMU register
+ *
+ * @param pmu handle that identifies the PMU hardware
+ * @param relative_address relative PMU hardware address to write to
+ * @param new_val new 32-bit value to write into the address
+ */
+void pmu_reg_write(platform_pmu_t *pmu, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+ MALI_DEBUG_ASSERT(relative_address < pmu->reg_size);
+
+ MALI_DEBUG_PRINT( 5, ("PMU: reg_write: %s Addr:0x%04X Val:0x%08x\n",
+ pmu->name, relative_address, new_val));
+
+ _mali_osk_mem_iowrite32(pmu->reg_mapped, relative_address, new_val);
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+
+u32 pmu_get_power_up_down_info(void)
+{
+ return pmu_reg_read(pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS);
+}
+
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+#endif /* USING_MALI_PMM */
+
+
+#if USING_MALI_PMM && PMU_TEST
+
+/***** TEST *****/
+
+void pmu_dump_regs( platform_pmu_t *pmu )
+{
+ u32 addr;
+ for( addr = 0x0; addr < PMU_REGISTER_ADDRESS_SPACE_SIZE; addr += 0x4 )
+ {
+ MALI_PRINT( ("PMU_REG: 0x%08x: 0x%04x\n", (addr + pmu->reg_base_addr), pmu_reg_read( pmu, addr ) ) );
+ }
+}
+
+/* This function is an internal test for the PMU without any Mali h/w interaction */
+void pmu_test( platform_pmu_t *pmu, u32 cores )
+{
+ u32 stat;
+ u32 timeout;
+
+ MALI_PRINT( ("PMU_TEST: Start\n") );
+
+ pmu_dump_regs( pmu );
+
+ MALI_PRINT( ("PMU_TEST: Power down cores: 0x%x\n", cores) );
+ _mali_pmm_pmu_power_down( pmu, cores, MALI_TRUE );
+
+ stat = pmu_reg_read( pmu, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ MALI_PRINT( ("PMU_TEST: %s\n", (stat & cores) == cores ? "SUCCESS" : "FAIL" ) );
+
+ pmu_dump_regs( pmu );
+
+ MALI_PRINT( ("PMU_TEST: Power up cores: 0x%x\n", cores) );
+ _mali_pmm_pmu_power_up( pmu, cores, MALI_FALSE );
+
+ MALI_PRINT( ("PMU_TEST: Waiting for power up...\n") );
+ timeout = 1000; /* 1 sec */
+ while( !_mali_pmm_pmu_irq_power_up(pmu) && timeout > 0 )
+ {
+ _mali_osk_time_ubusydelay(1000); /* 1ms */
+ timeout--;
+ }
+
+ MALI_PRINT( ("PMU_TEST: Waited %dms for interrupt\n", (1000-timeout)) );
+ stat = pmu_reg_read( pmu, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ MALI_PRINT( ("PMU_TEST: %s\n", (stat & cores) == 0 ? "SUCCESS" : "FAIL" ) );
+
+ _mali_pmm_pmu_irq_power_up_clear(pmu);
+
+ pmu_dump_regs( pmu );
+
+ MALI_PRINT( ("PMU_TEST: Finish\n") );
+}
+#endif /* USING_MALI_PMM && PMU_TEST */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali_platform.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali_platform.h
new file mode 100644
index 00000000000..575c1fb6113
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali_platform.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#include "mali_osk.h"
+
+#if USING_MALI_PMM
+#include "mali_pmm.h"
+#endif
+
+#if !USING_MALI_PMM
+/* @brief System power up/down cores that can be passed into mali_platform_powerdown/up() */
+#define MALI_PLATFORM_SYSTEM 0
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ * When using PMM, it is also called from the PMM start up to initialise the
+ * system PMU
+ *
+ * @param resource This is NULL when called on first driver start up, else it will
+ * be a pointer to a PMU resource
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ * When using PMM, it is also called from the PMM termination code to clean up the
+ * system PMU
+ *
+ * @param type This is NULL when called on driver exit, else it will
+ * be a pointer to a PMU resource type (not the full resource)
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Called as part of platform init if there is no PMM support, else the
+ * PMM will call it.
+ *
+ * @param cores This is MALI_PLATFORM_SYSTEM when called without PMM, else it will
+ * be a mask of cores to power down based on the mali_pmm_core_id enum
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores);
+
+/** @brief Platform specific powerup sequence of MALI
+ *
+ * Called as part of platform deinit if there is no PMM support, else the
+ * PMM will call it.
+ *
+ * @param cores This is MALI_PLATFORM_SYSTEM when called without PMM, else it will
+ * be a mask of cores to power down based on the mali_pmm_core_id enum
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_powerup(u32 cores);
+
+/** @brief Platform specific handling of GPU utilization data
+ *
+ * When GPU utilization data is enabled, this function will be
+ * periodically called.
+ *
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+void mali_gpu_utilization_handler(u32 utilization);
+
+#if USING_MALI_PMM
+#if MALI_POWER_MGMT_TEST_SUITE
+/** @brief function to get status of individual cores
+ *
+ * This function is used by power management test suite to get the status of powered up/down the number
+ * of cores
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+u32 pmu_get_power_up_down_info(void);
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/mali_platform.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/mali_platform.c
new file mode 100644
index 00000000000..481e6e76c54
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/mali_platform.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Mali related Ux500 platform initialization
+ *
+ * Author: Marta Lofstedt <marta.lofstedt@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for ST-Ericsson's Ux500 platforms
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/dbx500-prcmu.h>
+
+#define MALI_HIGH_TO_LOW_LEVEL_UTILIZATION_LIMIT 64
+#define MALI_LOW_TO_HIGH_LEVEL_UTILIZATION_LIMIT 192
+
+static int is_running;
+static struct regulator *regulator;
+static struct clk *clk_sga;
+static u32 last_utilization;
+static struct work_struct mali_utilization_work;
+static struct workqueue_struct *mali_utilization_workqueue;
+
+/* Rationale behind the values for:
+* MALI_HIGH_LEVEL_UTILIZATION_LIMIT and MALI_LOW_LEVEL_UTILIZATION_LIMIT
+* When operating at half clock frequency a faster clock is requested when
+* reaching 75% utilization. When operating at full clock frequency a slower
+* clock is requested when reaching 25% utilization. There is a margin of 25%
+* at the high range of the slow clock to avoid complete saturation of the
+* hardware and there is some overlap to avoid an oscillating situation where
+* the clock goes back and forth from high to low.
+*
+* Utilization on full speed clock
+* 0 64 128 192 255
+* |---------------|---------------|---------------|---------------|
+* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+* | ^
+* V |
+* XXXXXXXXXXXXXXXXXXXXXXXXX
+* 0 64 128 192 255
+* |-------|-------|-------|-------|
+* Utilization on half speed clock
+*/
+void mali_utilization_function(struct work_struct *ptr)
+{
+ /*By default, platform start with 50% APE OPP and 25% DDR OPP*/
+ static u32 has_requested_low = 1;
+
+ if (last_utilization > MALI_LOW_TO_HIGH_LEVEL_UTILIZATION_LIMIT) {
+ if (has_requested_low) {
+ MALI_DEBUG_PRINT(5, ("MALI GPU utilization: %u SIGNAL_HIGH\n", last_utilization));
+ /*Request 100% APE_OPP.*/
+ if (prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, "mali", 100)) {
+ MALI_DEBUG_PRINT(2, ("MALI 100% APE_OPP failed\n"));
+ return;
+ }
+ /*
+ * Since the utilization values will be reported higher
+ * if DDR_OPP is lowered, we also request 100% DDR_OPP.
+ */
+ if (prcmu_qos_add_requirement(PRCMU_QOS_DDR_OPP, "mali", 100)) {
+ MALI_DEBUG_PRINT(2, ("MALI 100% DDR_OPP failed\n"));
+ return;
+ }
+ has_requested_low = 0;
+ }
+ } else {
+ if (last_utilization < MALI_HIGH_TO_LOW_LEVEL_UTILIZATION_LIMIT) {
+ if (!has_requested_low) {
+ /*Remove APE_OPP and DDR_OPP requests*/
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "mali");
+ prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP, "mali");
+ MALI_DEBUG_PRINT(5, ("MALI GPU utilization: %u SIGNAL_LOW\n", last_utilization));
+ has_requested_low = 1;
+ }
+ }
+ }
+ MALI_DEBUG_PRINT(5, ("MALI GPU utilization: %u\n", last_utilization));
+}
+
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource)
+{
+ is_running = 0;
+ last_utilization = 0;
+
+ mali_utilization_workqueue = create_singlethread_workqueue("mali_utilization_workqueue");
+ if (NULL == mali_utilization_workqueue) {
+ MALI_DEBUG_PRINT(2, ("%s: Failed to setup workqueue %s\n", __func__, "v-mali"));
+ goto error;
+ }
+ INIT_WORK(&mali_utilization_work, mali_utilization_function);
+
+ regulator = regulator_get(NULL, "v-mali");
+ if (IS_ERR(regulator)) {
+ MALI_DEBUG_PRINT(2, ("%s: Failed to get regulator %s\n", __func__, "v-mali"));
+ goto error;
+ }
+
+ clk_sga = clk_get_sys("mali", NULL);
+ if (IS_ERR(clk_sga)) {
+ regulator_put(regulator);
+ MALI_DEBUG_PRINT(2, ("%s: Failed to get clock %s\n", __func__, "mali"));
+ goto error;
+ }
+
+ MALI_SUCCESS;
+error:
+ MALI_DEBUG_PRINT(1, ("SGA initialization failed.\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type)
+{
+ destroy_workqueue(mali_utilization_workqueue);
+ regulator_put(regulator);
+ clk_put(clk_sga);
+ MALI_DEBUG_PRINT(2, ("SGA terminated.\n"));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+ if (is_running) {
+ clk_disable(clk_sga);
+ if (regulator) {
+ int ret = regulator_disable(regulator);
+ if (ret < 0) {
+ MALI_DEBUG_PRINT(2, ("%s: Failed to disable regulator %s\n", __func__, "v-mali"));
+ is_running = 0;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ }
+ is_running = 0;
+ }
+ MALI_DEBUG_PRINT(4, ("mali_platform_powerdown is_running: %u cores: %u\n", is_running, cores));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+ if (!is_running) {
+ int ret = regulator_enable(regulator);
+ if (ret < 0) {
+ MALI_DEBUG_PRINT(2, ("%s: Failed to enable regulator %s\n", __func__, "v-mali"));
+ goto error;
+ }
+
+ ret = clk_enable(clk_sga);
+ if (ret < 0) {
+ regulator_disable(regulator);
+ MALI_DEBUG_PRINT(2, ("%s: Failed to enable clock %s\n", __func__, "mali"));
+ goto error;
+ }
+ is_running = 1;
+ }
+ MALI_DEBUG_PRINT(4, ("mali_platform_powerup is_running:%u cores: %u\n", is_running, cores));
+ MALI_SUCCESS;
+error:
+ MALI_DEBUG_PRINT(1, ("Failed to power up.\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+ last_utilization = utilization;
+ /*
+ * We should not cancel the potentially not yet run old work
+ * in favor of a new work.
+ * Since the utilization value will change,
+ * the mali_utilization_function will evaluate based on
+ * what is the utilization now and not on what it was
+ * when it was scheduled.
+ */
+ queue_work(mali_utilization_workqueue, &mali_utilization_work);
+}
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/ump_kernel_api_hwmem.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/ump_kernel_api_hwmem.c
new file mode 100644
index 00000000000..b321b504839
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/ump_kernel_api_hwmem.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Magnus Wendt <magnus.wendt@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "ump_kernel_types.h"
+#include "mali_kernel_common.h"
+
+#include <linux/hwmem.h>
+#include <linux/err.h>
+
+
+/* The UMP kernel API for hwmem has been mapped so that
+ * ump_dd_handle == hwmem_alloc
+ * ump_secure_id == hwmem global name
+ *
+ * The current implementation is limited to contiguous memory
+ */
+
+ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
+{
+ int hwmem_name = hwmem_get_name((struct hwmem_alloc *) memh);
+
+ if (unlikely(hwmem_name < 0)) {
+ MALI_DEBUG_PRINT(1, ("%s: Invalid Alloc 0x%x\n",__func__, memh));
+ return UMP_INVALID_SECURE_ID;
+ }
+
+ return (ump_secure_id)hwmem_name;
+}
+
+
+
+ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
+{
+ struct hwmem_alloc *alloc;
+ enum hwmem_mem_type mem_type;
+ enum hwmem_access access;
+
+ alloc = hwmem_resolve_by_name((int) secure_id);
+
+ if (IS_ERR(alloc)) {
+ MALI_DEBUG_PRINT(1, ("%s: Invalid UMP id %d\n",__func__, secure_id));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ hwmem_get_info(alloc, NULL, &mem_type, &access);
+
+ if (unlikely((access & (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE | HWMEM_ACCESS_IMPORT)) !=
+ (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE | HWMEM_ACCESS_IMPORT))) {
+ MALI_DEBUG_PRINT(1, ("%s: Access denied on UMP id %d, (access==%d)\n",
+ __func__, secure_id, access));
+ hwmem_release(alloc);
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ if (unlikely(HWMEM_MEM_CONTIGUOUS_SYS != mem_type)) {
+ MALI_DEBUG_PRINT(1, ("%s: UMP id %d is non-contiguous! (not supported)\n",
+ __func__, secure_id));
+ hwmem_release(alloc);
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ return (ump_dd_handle)alloc;
+}
+
+
+
+unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
+{
+ return 1;
+}
+
+
+
+ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh,
+ ump_dd_physical_block * blocks,
+ unsigned long num_blocks)
+{
+ struct hwmem_mem_chunk hwmem_mem_chunk;
+ size_t hwmem_mem_chunk_length = 1;
+
+ int hwmem_result;
+ struct hwmem_alloc *alloc = (struct hwmem_alloc *)memh;
+
+ if (unlikely(blocks == NULL)) {
+ MALI_DEBUG_PRINT(1, ("%s: blocks == NULL\n",__func__));
+ return UMP_DD_INVALID;
+ }
+
+ if (unlikely(1 != num_blocks)) {
+ MALI_DEBUG_PRINT(1, ("%s: num_blocks == %d (!= 1)\n",__func__, num_blocks));
+ return UMP_DD_INVALID;
+ }
+
+ MALI_DEBUG_PRINT(5, ("Returning physical block information. Alloc: 0x%x\n", memh));
+
+ /* It might not look natural to pin here, but it matches the usage by the mali kernel module */
+ hwmem_result = hwmem_pin(alloc, &hwmem_mem_chunk, &hwmem_mem_chunk_length);
+
+ if (unlikely(hwmem_result < 0)) {
+ MALI_DEBUG_PRINT(1, ("%s: Pin failed. Alloc: 0x%x\n",__func__, memh));
+ return UMP_DD_INVALID;
+ }
+
+ blocks[0].addr = hwmem_mem_chunk.paddr;
+ blocks[0].size = hwmem_mem_chunk.size;
+
+ hwmem_set_domain(alloc, HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE,
+ HWMEM_DOMAIN_SYNC, NULL);
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh,
+ unsigned long index,
+ ump_dd_physical_block * block)
+{
+ if (unlikely(0 != index)) {
+ MALI_DEBUG_PRINT(1, ("%s: index == %d (!= 0)\n",__func__, index));
+ return UMP_DD_INVALID;
+ }
+ return ump_dd_phys_blocks_get(memh, block, 1);
+}
+
+
+
+unsigned long ump_dd_size_get(ump_dd_handle memh)
+{
+ struct hwmem_alloc *alloc = (struct hwmem_alloc *)memh;
+ int size;
+
+ hwmem_get_info(alloc, &size, NULL, NULL);
+
+ return size;
+}
+
+
+
+void ump_dd_reference_add(ump_dd_handle memh)
+{
+ /* This is never called from tha mali kernel driver */
+}
+
+
+
+void ump_dd_reference_release(ump_dd_handle memh)
+{
+ struct hwmem_alloc *alloc = (struct hwmem_alloc *)memh;
+
+ hwmem_unpin(alloc);
+ hwmem_release(alloc);
+
+ return;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/readme.txt b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/readme.txt
new file mode 100644
index 00000000000..af40fddd20f
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/readme.txt
@@ -0,0 +1,28 @@
+Building the Mali Device Driver for Linux
+-----------------------------------------
+
+Build the Mali Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> USING_MMU=<mmu_option> USING_UMP=<ump_option> \
+BUILD=<build_option> CONFIG=<your_config> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ mmu_option: 1 = Mali MMU(s) on
+ 0 = Mali MMU(s) off
+ ump_option: 1 = Enable UMP support(*)
+ 0 = disable UMP support
+ build_option: debug = debug build of driver
+ release = release build of driver
+ your_config: Name of the sub-folder to find the required config.h(**) file
+ ("arch-" will be prepended)
+
+(*) For newer Linux Kernels, the Module.symvers file for the UMP device driver
+ must be available. The UMP_SYMVERS_FILE variable in the Makefile should
+ point to this file. This file is generated when the UMP driver is built.
+
+(**) The config.h file contains the configuration parameters needed, like where the
+ Mali GPU is located, interrupts it uses, memory and so on.
+
+The result will be a mali.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_200_regs.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_200_regs.h
new file mode 100644
index 00000000000..6b30802bb2d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_200_regs.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI200_REGS_H_
+#define _MALI200_REGS_H_
+
+/**
+ * Enum for management register addresses.
+ */
+enum mali200_mgmt_reg
+{
+ MALI200_REG_ADDR_MGMT_VERSION = 0x1000,
+ MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x1004,
+ MALI200_REG_ADDR_MGMT_STATUS = 0x1008,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT = 0x100c,
+
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x1020,
+ MALI200_REG_ADDR_MGMT_INT_CLEAR = 0x1024,
+ MALI200_REG_ADDR_MGMT_INT_MASK = 0x1028,
+ MALI200_REG_ADDR_MGMT_INT_STATUS = 0x102c,
+
+ MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW = 0x1044,
+
+ MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x1050,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x1080,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x1084,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x108c,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x10a0,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x10a4,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x10ac,
+
+ MALI200_REG_SIZEOF_REGISTER_BANK = 0x10f0
+
+};
+
+#define MALI200_REG_VAL_PERF_CNT_ENABLE 1
+
+enum mali200_mgmt_ctrl_mgmt {
+ MALI200_REG_VAL_CTRL_MGMT_STOP_BUS = (1<<0),
+#if defined(USING_MALI200)
+ MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES = (1<<3),
+#endif
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET = (1<<5),
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING = (1<<6),
+#if defined(USING_MALI400)
+ MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET = (1<<7),
+#endif
+};
+
+enum mali200_mgmt_irq {
+ MALI200_REG_VAL_IRQ_END_OF_FRAME = (1<<0),
+ MALI200_REG_VAL_IRQ_END_OF_TILE = (1<<1),
+ MALI200_REG_VAL_IRQ_HANG = (1<<2),
+ MALI200_REG_VAL_IRQ_FORCE_HANG = (1<<3),
+ MALI200_REG_VAL_IRQ_BUS_ERROR = (1<<4),
+ MALI200_REG_VAL_IRQ_BUS_STOP = (1<<5),
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT = (1<<6),
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT = (1<<7),
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR = (1<<8),
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1<<9),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW = (1<<10),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW = (1<<11),
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED = (1<<12),
+};
+
+#if defined USING_MALI200
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR))
+#elif defined USING_MALI400
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW |\
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+#else
+#error "No supported mali core defined"
+#endif
+
+#if defined USING_MALI200
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR))
+#elif defined USING_MALI400
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+#else
+#error "No supported mali core defined"
+#endif
+
+#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
+
+enum mali200_mgmt_status {
+ MALI200_REG_VAL_STATUS_RENDERING_ACTIVE = (1<<0),
+ MALI200_REG_VAL_STATUS_BUS_STOPPED = (1<<4),
+};
+
+enum mali200_render_unit
+{
+ MALI200_REG_ADDR_FRAME = 0x0000,
+};
+
+#if defined USING_MALI200
+#define MALI200_NUM_REGS_FRAME ((0x04C/4)+1)
+#elif defined USING_MALI400
+#define MALI200_NUM_REGS_FRAME ((0x058/4)+1)
+#else
+#error "No supported mali core defined"
+#endif
+
+enum mali200_wb_unit {
+ MALI200_REG_ADDR_WB0 = 0x0100,
+ MALI200_REG_ADDR_WB1 = 0x0200,
+ MALI200_REG_ADDR_WB2 = 0x0300
+};
+
+/** The number of registers in one single writeback unit */
+#ifndef MALI200_NUM_REGS_WBx
+#define MALI200_NUM_REGS_WBx ((0x02C/4)+1)
+#endif
+
+/* This should be in the top 16 bit of the version register of Mali PP */
+#if defined USING_MALI200
+#define MALI_PP_PRODUCT_ID 0xC807
+#elif defined USING_MALI400
+#define MALI300_PP_PRODUCT_ID 0xCE07
+#define MALI400_PP_PRODUCT_ID 0xCD07
+#define MALI_PP_PRODUCT_ID MALI400_PP_PRODUCT_ID
+#else
+#error "No supported mali core defined"
+#endif
+
+
+#endif /* _MALI200_REGS_H_ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_gp_regs.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_gp_regs.h
new file mode 100644
index 00000000000..11eb55c424e
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_gp_regs.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALIGP2_CONROL_REGS_H_
+#define _MALIGP2_CONROL_REGS_H_
+
+/**
+ * These are the different geometry processor controll registers.
+ * Their usage is to control and monitor the operation of the
+ * Vertex Shader and the Polygon List Builer in the geometry processor.
+ * Addresses are in 32-bit word relative sizes.
+ * @see [P0081] "Geometry Processor Data Structures" for details
+ */
+
+typedef enum {
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR = 0x00,
+ MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR = 0x04,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR = 0x08,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR = 0x0c,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR = 0x10,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR = 0x14,
+ MALIGP2_REG_ADDR_MGMT_CMD = 0x20,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT = 0x24,
+ MALIGP2_REG_ADDR_MGMT_INT_CLEAR = 0x28,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK = 0x2C,
+ MALIGP2_REG_ADDR_MGMT_INT_STAT = 0x30,
+ MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW = 0x34,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x3C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x40,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x44,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x48,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x4C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x50,
+ MALIGP2_REG_ADDR_MGMT_STATUS = 0x68,
+ MALIGP2_REG_ADDR_MGMT_VERSION = 0x6C,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ = 0x80,
+ MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ = 0x84,
+ MALIGP2_CONTR_AXI_BUS_ERROR_STAT = 0x94,
+ MALIGP2_REGISTER_ADDRESS_SPACE_SIZE = 0x98,
+} maligp_reg_addr_mgmt_addr;
+
+#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1
+
+/**
+ * Commands to geometry processor.
+ * @see MALIGP2_CTRL_REG_CMD
+ */
+typedef enum
+{
+ MALIGP2_REG_VAL_CMD_START_VS = (1<< 0),
+ MALIGP2_REG_VAL_CMD_START_PLBU = (1<< 1),
+ MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC = (1<< 4),
+ MALIGP2_REG_VAL_CMD_RESET = (1<< 5),
+ MALIGP2_REG_VAL_CMD_FORCE_HANG = (1<< 6),
+ MALIGP2_REG_VAL_CMD_STOP_BUS = (1<< 9),
+#if defined(USING_MALI400)
+ MALI400GP_REG_VAL_CMD_SOFT_RESET = (1<<10),
+#endif
+} mgp_contr_reg_val_cmd;
+
+
+/** @defgroup MALIGP2_IRQ
+ * Interrupt status of geometry processor.
+ * @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR,
+ * MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT
+ * @{
+ */
+#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST (1 << 0)
+#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST (1 << 1)
+#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM (1 << 2)
+#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ (1 << 3)
+#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ (1 << 4)
+#define MALIGP2_REG_VAL_IRQ_HANG (1 << 5)
+#define MALIGP2_REG_VAL_IRQ_FORCE_HANG (1 << 6)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT (1 << 7)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT (1 << 8)
+#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR (1 << 9)
+#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR (1 << 10)
+#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR (1 << 11)
+#if defined USING_MALI400
+#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED (1 << 12)
+#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD (1 << 13)
+#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD (1 << 14)
+#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED (1 << 19)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW (1 << 21)
+#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS (1 << 22)
+#elif !defined USING_MALI200
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining all IRQs in MaliGP2 */
+#if defined USING_MALI200
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR)
+#elif defined USING_MALI400
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+#else
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining the IRQs in MaliGP2 which we use*/
+#if defined USING_MALI200
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR)
+#elif defined USING_MALI400
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+#else
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining non IRQs on MaliGP2*/
+#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
+
+/** }@ defgroup MALIGP2_IRQ*/
+
+/** @defgroup MALIGP2_STATUS
+ * The different Status values to the geometry processor.
+ * @see MALIGP2_CTRL_REG_STATUS
+ * @{
+ */
+#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE 0x0002
+#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED 0x0004
+#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE 0x0008
+#define MALIGP2_REG_VAL_STATUS_BUS_ERROR 0x0040
+#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR 0x0100
+/** }@ defgroup MALIGP2_STATUS*/
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
+ MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+ MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
+ MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+ MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+
+/* This should be in the top 16 bit of the version register of gp.*/
+#if defined(USING_MALI200)
+#define MALI_GP_PRODUCT_ID 0xA07
+#elif defined(USING_MALI400)
+#define MALI300_GP_PRODUCT_ID 0xC07
+#define MALI400_GP_PRODUCT_ID 0xB07
+#define MALI_GP_PRODUCT_ID MALI400_GP_PRODUCT_ID
+#else
+#error "No supported mali core defined"
+#endif
+
+/**
+ * The different sources for instrumented on the geometry processor.
+ * @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC
+ */
+
+enum MALIGP2_cont_reg_perf_cnt_src {
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a,
+};
+
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.c
new file mode 100644
index 00000000000..a6b1d76d567
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.h
new file mode 100644
index 00000000000..96b709bc166
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ /*
+ * reset counters and overflow flags
+ */
+
+ u32 mask = (1 << 0) | /* enable all three counters */
+ (0 << 1) | /* reset both Count Registers to 0x0 */
+ (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+ (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+ (0 << 4) | /* Count Register 0 interrupt enable */
+ (0 << 5) | /* Count Register 1 interrupt enable */
+ (0 << 6) | /* Cycle Counter interrupt enable */
+ (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+ (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+ (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+ __asm__ __volatile__ ("MCR p15, 0, %0, c15, c12, 0" : : "r" (mask) );
+
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ u32 result;
+
+ /* this is for the clock cycles */
+ __asm__ __volatile__ ("MRC p15, 0, %0, c15, c12, 1" : "=r" (result));
+
+ return (u64)result;
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.c
new file mode 100644
index 00000000000..a6b1d76d567
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.h
new file mode 100644
index 00000000000..11031d675ca
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ return _mali_osk_time_get_ns();
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile
new file mode 100644
index 00000000000..97225bbbf0b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile
@@ -0,0 +1,115 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+UMP_FILE_PREFIX =
+UDD_FILE_PREFIX = ../mali/
+
+ifneq ($(KBUILD_EXTMOD),)
+include $(KBUILD_EXTMOD)/Makefile.common
+else
+include ./Makefile.common
+endif
+
+# For each arch check: CROSS_COMPILE , KDIR , CFLAGS += -DARCH
+
+ARCH ?= arm
+## @note Should allow overriding of building UMP for non-debug:
+EXTRA_CFLAGS += -DDEBUG -DMALI_STATE_TRACKING=0
+
+# linux build system integration
+
+ifneq ($(KERNELRELEASE),)
+# Inside the kernel build system
+
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD) -I$(KBUILD_EXTMOD)/common -I$(KBUILD_EXTMOD)/linux -I$(KBUILD_EXTMOD)/../mali/common -I$(KBUILD_EXTMOD)/../mali/linux -I$(KBUILD_EXTMOD)/../../ump/include/ump
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+ifeq ($(wildcard $(KBUILD_EXTMOD)/linux/license/gpl/*),)
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD)/linux/license/proprietary
+else
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD)/linux/license/gpl
+endif
+
+SRC += $(UMP_FILE_PREFIX)linux/ump_kernel_linux.c \
+ $(UMP_FILE_PREFIX)linux/ump_kernel_memory_backend_os.c \
+ $(UMP_FILE_PREFIX)linux/ump_kernel_memory_backend_dedicated.c \
+ $(UMP_FILE_PREFIX)linux/ump_memory_backend.c \
+ $(UMP_FILE_PREFIX)linux/ump_ukk_wrappers.c \
+ $(UMP_FILE_PREFIX)linux/ump_ukk_ref_wrappers.c \
+ $(UMP_FILE_PREFIX)linux/ump_osk_atomics.c \
+ $(UMP_FILE_PREFIX)linux/ump_osk_low_level_mem.c \
+ $(UMP_FILE_PREFIX)linux/ump_osk_misc.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_atomics.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_locks.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_memory.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_math.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_misc.c
+
+# Selecting files to compile by parsing the config file
+
+MODULE:=ump.ko
+
+obj-m := $(MODULE:.ko=.o)
+$(MODULE:.ko=-y) := $(SRC:.c=.o)
+
+else
+# Outside the kernel build system
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+ifeq ($(ARCH), arm)
+ # when compiling for ARM we're cross compiling
+ export CROSS_COMPILE ?= arm-none-linux-gnueabi-
+ # default to Virtex5
+ CONFIG ?= pb-virtex5
+else
+ # Compiling for the host
+ CONFIG ?= $(shell uname -m)
+endif
+
+# default cpu to select
+CPU ?= ct11mp
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+# Validate selected config
+ifneq ($(shell [ -d arch-$(CONFIG) ] && [ -f arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(warning Current directory is $(shell pwd))
+$(error No configuration found for config $(CONFIG). Check that arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L arch ] && rm arch)
+$(shell ln -sf arch-$(CONFIG) arch)
+$(shell touch arch/config.h)
+endif
+
+all:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+
+kernelrelease:
+ $(MAKE) -C $(KDIR) kernelrelease
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR)/../mali clean
+
+endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile.common b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile.common
new file mode 100644
index 00000000000..c6aa6337f80
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile.common
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+SRC = $(UMP_FILE_PREFIX)common/ump_kernel_common.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_descriptor_mapping.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_api.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_ref_drv.c
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_REV:=$(shell ((svnversion | grep -qv exported && echo -n 'Revision: ' && svnversion) || git svn info | sed -e 's/$$$$/M/' | grep '^Revision: ' || echo ${MALI_RELEASE_NAME}) 2>/dev/null | sed -e 's/^Revision: //')
+
+EXTRA_CFLAGS += -DSVN_REV=$(SVN_REV)
+EXTRA_CFLAGS += -DSVN_REV_STRING=\"$(SVN_REV)\"
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/arch-pb-virtex5/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/arch-pb-virtex5/config.h
new file mode 100644
index 00000000000..38ae1eeb3c2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/arch-pb-virtex5/config.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT 0
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0xCE000000
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 32UL * 1024UL * 1024UL
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_api.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_api.c
new file mode 100644
index 00000000000..719370c3de7
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_api.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_common.h"
+
+
+
+/* ---------------- UMP kernel space API functions follows ---------------- */
+
+
+
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ DBG_MSG(5, ("Returning secure ID. ID: %u\n", mem->secure_id));
+
+ return mem->secure_id;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
+{
+ ump_dd_mem * mem;
+
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
+ if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ ump_dd_reference_add(mem);
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ return (ump_dd_handle)mem;
+}
+
+
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*) memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ return mem->nr_blocks;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ if (blocks == NULL)
+ {
+ DBG_MSG(1, ("NULL parameter in ump_dd_phys_blocks_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ if (mem->nr_blocks != num_blocks)
+ {
+ DBG_MSG(1, ("Specified number of blocks do not match actual number of blocks\n"));
+ return UMP_DD_INVALID;
+ }
+
+ DBG_MSG(5, ("Returning physical block information. ID: %u\n", mem->secure_id));
+
+ _mali_osk_memcpy(blocks, mem->block_array, sizeof(ump_dd_physical_block) * mem->nr_blocks);
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block * block)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ if (block == NULL)
+ {
+ DBG_MSG(1, ("NULL parameter in ump_dd_phys_block_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ if (index >= mem->nr_blocks)
+ {
+ DBG_MSG(5, ("Invalid index specified in ump_dd_phys_block_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ DBG_MSG(5, ("Returning physical block information. ID: %u, index: %lu\n", mem->secure_id, index));
+
+ *block = mem->block_array[index];
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ DBG_MSG(5, ("Returning size. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+ return mem->size_bytes;
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+ int new_ref;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ new_ref = _ump_osk_atomic_inc_and_read(&mem->ref_count);
+
+ DBG_MSG(4, ("Memory reference incremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
+{
+ int new_ref;
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ /* We must hold this mutex while doing the atomic_dec_and_read, to protect
+ that elements in the ump_descriptor_mapping table is always valid. If they
+ are not, userspace may accidently map in this secure_ids right before its freed
+ giving a mapped backdoor into unallocated memory.*/
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
+
+ DBG_MSG(4, ("Memory reference decremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+
+ if (0 == new_ref)
+ {
+ DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
+
+ ump_descriptor_mapping_free(device.secure_id_map, (int)mem->secure_id);
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ mem->release_func(mem->ctx, mem);
+ _mali_osk_free(mem);
+ }
+ else
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
+
+
+
+/* --------------- Handling of user space requests follows --------------- */
+
+
+_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args )
+{
+ ump_session_data * session_data;
+
+ DEBUG_ASSERT_POINTER( args );
+ DEBUG_ASSERT_POINTER( args->ctx );
+
+ session_data = (ump_session_data *)args->ctx;
+
+ /* check compatability */
+ if (args->version == UMP_IOCTL_API_VERSION)
+ {
+ DBG_MSG(3, ("API version set to newest %d (compatible)\n", GET_VERSION(args->version)));
+ args->compatible = 1;
+ session_data->api_version = args->version;
+ }
+ else if (args->version == MAKE_VERSION_ID(1))
+ {
+ DBG_MSG(2, ("API version set to depricated: %d (compatible)\n", GET_VERSION(args->version)));
+ args->compatible = 1;
+ session_data->api_version = args->version;
+ }
+ else
+ {
+ DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n", GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
+ args->compatible = 0;
+ args->version = UMP_IOCTL_API_VERSION; /* report our version */
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info )
+{
+ ump_session_memory_list_element * session_memory_element;
+ ump_session_memory_list_element * tmp;
+ ump_session_data * session_data;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_INVALID_FUNC;
+ int secure_id;
+
+ DEBUG_ASSERT_POINTER( release_info );
+ DEBUG_ASSERT_POINTER( release_info->ctx );
+
+ /* Retreive the session data */
+ session_data = (ump_session_data*)release_info->ctx;
+
+ /* If there are many items in the memory session list we
+ * could be de-referencing this pointer a lot so keep a local copy
+ */
+ secure_id = release_info->secure_id;
+
+ DBG_MSG(4, ("Releasing memory with IOCTL, ID: %u\n", secure_id));
+
+ /* Iterate through the memory list looking for the requested secure ID */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _MALI_OSK_LIST_FOREACHENTRY(session_memory_element, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
+ {
+ if ( session_memory_element->mem->secure_id == secure_id)
+ {
+ ump_dd_mem *release_mem;
+
+ release_mem = session_memory_element->mem;
+ _mali_osk_list_del(&session_memory_element->list);
+ ump_dd_reference_release(release_mem);
+ _mali_osk_free(session_memory_element);
+
+ ret = _MALI_OSK_ERR_OK;
+ break;
+ }
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG_IF(1, _MALI_OSK_ERR_OK != ret, ("UMP memory with ID %u does not belong to this session.\n", secure_id));
+
+ DBG_MSG(4, ("_ump_ukk_release() returning 0x%x\n", ret));
+ return ret;
+}
+
+_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction )
+{
+ ump_dd_mem * mem;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+
+ DEBUG_ASSERT_POINTER( user_interaction );
+
+ /* We lock the mappings so things don't get removed while we are looking for the memory */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)user_interaction->secure_id, (void**)&mem))
+ {
+ user_interaction->size = mem->size_bytes;
+ DBG_MSG(4, ("Returning size. ID: %u, size: %lu ", (ump_secure_id)user_interaction->secure_id, (unsigned long)user_interaction->size));
+ ret = _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ user_interaction->size = 0;
+ DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n", (ump_secure_id)user_interaction->secure_id));
+ }
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ return ret;
+}
+
+
+
+void _ump_ukk_msync( _ump_uk_msync_s *args )
+{
+ ump_dd_mem * mem = NULL;
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (NULL==mem)
+ {
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n", (ump_secure_id)args->secure_id));
+ return;
+ }
+
+ /* Returns the cache settings back to Userspace */
+ args->is_cached=mem->is_cached;
+
+ /* If this flag is the only one set, we should not do the actual flush, only the readout */
+ if ( _UMP_UK_MSYNC_READOUT_CACHE_ENABLED==args->op )
+ {
+ DBG_MSG(3, ("_ump_ukk_msync READOUT ID: %u Enabled: %d\n", (ump_secure_id)args->secure_id, mem->is_cached));
+ return;
+ }
+
+ /* Nothing to do if the memory is not caches */
+ if ( 0==mem->is_cached )
+ {
+ DBG_MSG(3, ("_ump_ukk_msync IGNORING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
+ return ;
+ }
+ DBG_MSG(3, ("_ump_ukk_msync FLUSHING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
+
+ /* The actual cache flush - Implemented for each OS*/
+ _ump_osk_msync( mem , args->op);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.c
new file mode 100644
index 00000000000..b99c3e7c7d2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+/**
+ * Define the initial and maximum size of number of secure_ids on the system
+ */
+#define UMP_SECURE_ID_TABLE_ENTRIES_INITIAL (128 )
+#define UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM (4096 )
+
+
+/**
+ * Define the initial and maximum size of the ump_session_data::cookies_map,
+ * which is a \ref ump_descriptor_mapping. This limits how many secure_ids
+ * may be mapped into a particular process using _ump_ukk_map_mem().
+ */
+
+#define UMP_COOKIES_PER_SESSION_INITIAL (UMP_SECURE_ID_TABLE_ENTRIES_INITIAL )
+#define UMP_COOKIES_PER_SESSION_MAXIMUM (UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM)
+
+struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void)
+{
+ _mali_osk_errcode_t err;
+
+ /* Perform OS Specific initialization */
+ err = _ump_osk_init();
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("Failed to initiaze the UMP Device Driver"));
+ return err;
+ }
+
+ /* Init the global device */
+ _mali_osk_memset(&device, 0, sizeof(device) );
+
+ /* Create the descriptor map, which will be used for mapping secure ID to ump_dd_mem structs */
+ device.secure_id_map_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0 , 0);
+ if (NULL == device.secure_id_map_lock)
+ {
+ MSG_ERR(("Failed to create OSK lock for secure id lookup table\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ device.secure_id_map = ump_descriptor_mapping_create(UMP_SECURE_ID_TABLE_ENTRIES_INITIAL, UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM);
+ if (NULL == device.secure_id_map)
+ {
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ MSG_ERR(("Failed to create secure id lookup table\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Init memory backend */
+ device.backend = ump_memory_backend_create();
+ if (NULL == device.backend)
+ {
+ MSG_ERR(("Failed to create memory backend\n"));
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ ump_descriptor_mapping_destroy(device.secure_id_map);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void ump_kernel_destructor(void)
+{
+ DEBUG_ASSERT_POINTER(device.secure_id_map);
+ DEBUG_ASSERT_POINTER(device.secure_id_map_lock);
+
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ device.secure_id_map_lock = NULL;
+
+ ump_descriptor_mapping_destroy(device.secure_id_map);
+ device.secure_id_map = NULL;
+
+ device.backend->shutdown(device.backend);
+ device.backend = NULL;
+
+ ump_memory_backend_destroy();
+
+ _ump_osk_term();
+}
+
+/** Creates a new UMP session
+ */
+_mali_osk_errcode_t _ump_ukk_open( void** context )
+{
+ struct ump_session_data * session_data;
+
+ /* allocated struct to track this session */
+ session_data = (struct ump_session_data *)_mali_osk_malloc(sizeof(struct ump_session_data));
+ if (NULL == session_data)
+ {
+ MSG_ERR(("Failed to allocate ump_session_data in ump_file_open()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ session_data->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0);
+ if( NULL == session_data->lock )
+ {
+ MSG_ERR(("Failed to initialize lock for ump_session_data in ump_file_open()\n"));
+ _mali_osk_free(session_data);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ session_data->cookies_map = ump_descriptor_mapping_create( UMP_COOKIES_PER_SESSION_INITIAL, UMP_COOKIES_PER_SESSION_MAXIMUM );
+
+ if ( NULL == session_data->cookies_map )
+ {
+ MSG_ERR(("Failed to create descriptor mapping for _ump_ukk_map_mem cookies\n"));
+
+ _mali_osk_lock_term( session_data->lock );
+ _mali_osk_free( session_data );
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_list);
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_mappings_list);
+
+ /* Since initial version of the UMP interface did not use the API_VERSION ioctl we have to assume
+ that it is this version, and not the "latest" one: UMP_IOCTL_API_VERSION
+ Current and later API versions would do an additional call to this IOCTL and update this variable
+ to the correct one.*/
+ session_data->api_version = MAKE_VERSION_ID(1);
+
+ *context = (void*)session_data;
+
+ DBG_MSG(2, ("New session opened\n"));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_close( void** context )
+{
+ struct ump_session_data * session_data;
+ ump_session_memory_list_element * item;
+ ump_session_memory_list_element * tmp;
+
+ session_data = (struct ump_session_data *)*context;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_close()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ /* Unmap any descriptors mapped in. */
+ if (0 == _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list))
+ {
+ ump_memory_allocation *descriptor;
+ ump_memory_allocation *temp;
+
+ DBG_MSG(1, ("Memory mappings found on session usage list during session termination\n"));
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->list_head_session_memory_mappings_list, ump_memory_allocation, list)
+ {
+ _ump_uk_unmap_mem_s unmap_args;
+ DBG_MSG(4, ("Freeing block with phys address 0x%x size 0x%x mapped in user space at 0x%x\n",
+ descriptor->phys_addr, descriptor->size, descriptor->mapping));
+ unmap_args.ctx = (void*)session_data;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.size = descriptor->size;
+ unmap_args._ukk_private = NULL; /* NOTE: unused */
+ unmap_args.cookie = descriptor->cookie;
+
+ /* NOTE: This modifies the list_head_session_memory_mappings_list */
+ _ump_ukk_unmap_mem( &unmap_args );
+ }
+ }
+
+ /* ASSERT that we really did free everything, because _ump_ukk_unmap_mem()
+ * can fail silently. */
+ DEBUG_ASSERT( _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list) );
+
+ _MALI_OSK_LIST_FOREACHENTRY(item, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
+ {
+ _mali_osk_list_del(&item->list);
+ DBG_MSG(2, ("Releasing UMP memory %u as part of file close\n", item->mem->secure_id));
+ ump_dd_reference_release(item->mem);
+ _mali_osk_free(item);
+ }
+
+ ump_descriptor_mapping_destroy( session_data->cookies_map );
+
+ _mali_osk_lock_term(session_data->lock);
+ _mali_osk_free(session_data);
+
+ DBG_MSG(2, ("Session closed\n"));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
+{
+ struct ump_session_data * session_data;
+ ump_memory_allocation * descriptor; /* Describes current mapping of memory */
+ _mali_osk_errcode_t err;
+ unsigned long offset = 0;
+ unsigned long left;
+ ump_dd_handle handle; /* The real UMP handle for this memory. Its real datatype is ump_dd_mem* */
+ ump_dd_mem * mem; /* The real UMP memory. It is equal to the handle, but with exposed struct */
+ u32 block;
+ int map_id;
+
+ session_data = (ump_session_data *)args->ctx;
+ if( NULL == session_data )
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ descriptor = (ump_memory_allocation*) _mali_osk_calloc( 1, sizeof(ump_memory_allocation));
+ if (NULL == descriptor)
+ {
+ MSG_ERR(("ump_ukk_map_mem: descriptor allocation failed\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ handle = ump_dd_handle_create_from_secure_id(args->secure_id);
+ if ( UMP_DD_HANDLE_INVALID == handle)
+ {
+ _mali_osk_free(descriptor);
+ DBG_MSG(1, ("Trying to map unknown secure ID %u\n", args->secure_id));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mem = (ump_dd_mem*)handle;
+ DEBUG_ASSERT(mem);
+ if (mem->size_bytes != args->size)
+ {
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(handle);
+ DBG_MSG(1, ("Trying to map too much or little. ID: %u, virtual size=%lu, UMP size: %lu\n", args->secure_id, args->size, mem->size_bytes));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ map_id = ump_descriptor_mapping_allocate_mapping( session_data->cookies_map, (void*) descriptor );
+
+ if (map_id < 0)
+ {
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(handle);
+ DBG_MSG(1, ("ump_ukk_map_mem: unable to allocate a descriptor_mapping for return cookie\n"));
+
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ descriptor->size = args->size;
+ descriptor->handle = handle;
+ descriptor->phys_addr = args->phys_addr;
+ descriptor->process_mapping_info = args->_ukk_private;
+ descriptor->ump_session = session_data;
+ descriptor->cookie = (u32)map_id;
+
+ if ( mem->is_cached )
+ {
+ descriptor->is_cached = 1;
+ args->is_cached = 1;
+ DBG_MSG(3, ("Mapping UMP secure_id: %d as cached.\n", args->secure_id));
+ }
+ else
+ {
+ descriptor->is_cached = 0;
+ args->is_cached = 0;
+ DBG_MSG(3, ("Mapping UMP secure_id: %d as Uncached.\n", args->secure_id));
+ }
+
+ _mali_osk_list_init( &descriptor->list );
+
+ err = _ump_osk_mem_mapregion_init( descriptor );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ DBG_MSG(1, ("Failed to initialize memory mapping in _ump_ukk_map_mem(). ID: %u\n", args->secure_id));
+ ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(mem);
+ return err;
+ }
+
+ DBG_MSG(4, ("Mapping virtual to physical memory: ID: %u, size:%lu, first physical addr: 0x%08lx, number of regions: %lu\n",
+ mem->secure_id,
+ mem->size_bytes,
+ ((NULL != mem->block_array) ? mem->block_array->addr : 0),
+ mem->nr_blocks));
+
+ left = descriptor->size;
+ /* loop over all blocks and map them in */
+ for (block = 0; block < mem->nr_blocks; block++)
+ {
+ unsigned long size_to_map;
+
+ if (left > mem->block_array[block].size)
+ {
+ size_to_map = mem->block_array[block].size;
+ }
+ else
+ {
+ size_to_map = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != _ump_osk_mem_mapregion_map(descriptor, offset, (u32 *)&(mem->block_array[block].addr), size_to_map ) )
+ {
+ DBG_MSG(1, ("WARNING: _ump_ukk_map_mem failed to map memory into userspace\n"));
+ ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+ ump_dd_reference_release(mem);
+ _ump_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ left -= size_to_map;
+ offset += size_to_map;
+ }
+
+ /* Add to the ump_memory_allocation tracking list */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_add( &descriptor->list, &session_data->list_head_session_memory_mappings_list );
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->mapping = descriptor->mapping;
+ args->cookie = descriptor->cookie;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args )
+{
+ struct ump_session_data * session_data;
+ ump_memory_allocation * descriptor;
+ ump_dd_handle handle;
+
+ session_data = (ump_session_data *)args->ctx;
+
+ if( NULL == session_data )
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+ return;
+ }
+
+ if (0 != ump_descriptor_mapping_get( session_data->cookies_map, (int)args->cookie, (void**)&descriptor) )
+ {
+ MSG_ERR(("_ump_ukk_map_mem: cookie 0x%X not found for this session\n", args->cookie ));
+ return;
+ }
+
+ DEBUG_ASSERT_POINTER(descriptor);
+
+ handle = descriptor->handle;
+ if ( UMP_DD_HANDLE_INVALID == handle)
+ {
+ DBG_MSG(1, ("WARNING: Trying to unmap unknown handle: UNKNOWN\n"));
+ return;
+ }
+
+ /* Remove the ump_memory_allocation from the list of tracked mappings */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_del( &descriptor->list );
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ ump_descriptor_mapping_free( session_data->cookies_map, (int)args->cookie );
+
+ ump_dd_reference_release(handle);
+
+ _ump_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.h
new file mode 100644
index 00000000000..0c55b14bc94
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_H__
+#define __UMP_KERNEL_H__
+
+#include "ump_kernel_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+#ifdef DEBUG
+ extern int ump_debug_level;
+ #define UMP_DEBUG_PRINT(args) _mali_osk_dbgmsg args
+ #define UMP_DEBUG_CODE(args) args
+ #define DBG_MSG(level,args) do { /* args should be in brackets */ \
+ ((level) <= ump_debug_level)?\
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")), \
+ UMP_DEBUG_PRINT(args):0; \
+ } while (0)
+
+ #define DBG_MSG_IF(level,condition,args) /* args should be in brackets */ \
+ if((condition)&&((level) <= ump_debug_level)) {\
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+ UMP_DEBUG_PRINT(args); \
+ }
+
+ #define DBG_MSG_ELSE(level,args) /* args should be in brackets */ \
+ else if((level) <= ump_debug_level) { \
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+ UMP_DEBUG_PRINT(args); \
+ }
+
+ #define DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) MSG_ERR(("NULL pointer " #pointer)); } while(0)
+ #define DEBUG_ASSERT(condition) do {if(!(condition)) MSG_ERR(("ASSERT failed: " #condition)); } while(0)
+#else /* DEBUG */
+ #define UMP_DEBUG_PRINT(args) do {} while(0)
+ #define UMP_DEBUG_CODE(args)
+ #define DBG_MSG(level,args) do {} while(0)
+ #define DBG_MSG_IF(level,condition,args) do {} while(0)
+ #define DBG_MSG_ELSE(level,args) do {} while(0)
+ #define DEBUG_ASSERT(condition) do {} while(0)
+ #define DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#endif /* DEBUG */
+
+#define MSG_ERR(args) do{ /* args should be in brackets */ \
+ _mali_osk_dbgmsg("UMP: ERR: %s\n" ,__FILE__); \
+ _mali_osk_dbgmsg( " %s()%4d\n", __FUNCTION__, __LINE__) ; \
+ _mali_osk_dbgmsg args ; \
+ _mali_osk_dbgmsg("\n"); \
+ } while(0)
+
+#define MSG(args) do{ /* args should be in brackets */ \
+ _mali_osk_dbgmsg("UMP: "); \
+ _mali_osk_dbgmsg args; \
+ } while (0)
+
+
+
+/*
+ * This struct is used to store per session data.
+ * A session is created when someone open() the device, and
+ * closed when someone close() it or the user space application terminates.
+ */
+typedef struct ump_session_data
+{
+ _mali_osk_list_t list_head_session_memory_list; /**< List of ump allocations made by the process (elements are ump_session_memory_list_element) */
+ _mali_osk_list_t list_head_session_memory_mappings_list; /**< List of ump_memory_allocations mapped in */
+ int api_version;
+ _mali_osk_lock_t * lock;
+ ump_descriptor_mapping * cookies_map; /**< Secure mapping of cookies from _ump_ukk_map_mem() */
+} ump_session_data;
+
+
+
+/*
+ * This struct is used to track the UMP memory references a session has.
+ * We need to track this in order to be able to clean up after user space processes
+ * which don't do it themself (e.g. due to a crash or premature termination).
+ */
+typedef struct ump_session_memory_list_element
+{
+ struct ump_dd_mem * mem;
+ _mali_osk_list_t list;
+} ump_session_memory_list_element;
+
+
+
+/*
+ * Device specific data, created when device driver is loaded, and then kept as the global variable device.
+ */
+typedef struct ump_dev
+{
+ _mali_osk_lock_t * secure_id_map_lock;
+ ump_descriptor_mapping * secure_id_map;
+ ump_memory_backend * backend;
+} ump_dev;
+
+
+
+extern int ump_debug_level;
+extern struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void);
+void ump_kernel_destructor(void);
+int map_errcode( _mali_osk_errcode_t err );
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __UMP_KERNEL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.c
new file mode 100644
index 00000000000..2531f802127
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static ump_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(ump_descriptor_table * table);
+
+ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ ump_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(ump_descriptor_mapping) );
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+ map->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_READERWRITER, 0 , 0);
+ if ( NULL != map->lock )
+ {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_lock_term( map->lock );
+ _mali_osk_free(map);
+}
+
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target)
+{
+ int descriptor = -1;/*-EFAULT;*/
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (descriptor == map->current_nr_mappings)
+ {
+ int nr_mappings_new;
+ /* no free descriptor, try to expand the table */
+ ump_descriptor_table * new_table;
+ ump_descriptor_table * old_table = map->table;
+ nr_mappings_new= map->current_nr_mappings *2;
+
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed)
+ {
+ descriptor = -1;
+ goto unlock_and_exit;
+ }
+
+ new_table = descriptor_table_alloc(nr_mappings_new);
+ if (NULL == new_table)
+ {
+ descriptor = -1;
+ goto unlock_and_exit;
+ }
+
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ map->table = new_table;
+ map->current_nr_mappings = nr_mappings_new;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(descriptor, map->table->usage);
+ map->table->mappings[descriptor] = target;
+
+unlock_and_exit:
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ return descriptor;
+}
+
+int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target)
+{
+ int result = -1;/*-EFAULT;*/
+ DEBUG_ASSERT(map);
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = 0;
+ }
+ else *target = NULL;
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ return result;
+}
+
+int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target)
+{
+ int result = -1;/*-EFAULT;*/
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = target;
+ result = 0;
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ return result;
+}
+
+void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor)
+{
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static ump_descriptor_table * descriptor_table_alloc(int count)
+{
+ ump_descriptor_table * table;
+
+ table = _mali_osk_calloc(1, sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count) );
+
+ if (NULL != table)
+ {
+ table->usage = (u32*)((u8*)table + sizeof(ump_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(ump_descriptor_table * table)
+{
+ _mali_osk_free(table);
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.h
new file mode 100644
index 00000000000..92bbe54bd35
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_descriptor_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct ump_descriptor_table
+{
+ u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} ump_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct ump_descriptor_mapping
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ ump_descriptor_table * table; /**< Pointer to the current mapping table */
+} ump_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ */
+void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor);
+
+#endif /* __UMP_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_memory_backend.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_memory_backend.h
new file mode 100644
index 00000000000..02a64707036
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_memory_backend.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_H__
+
+#include "ump_kernel_interface.h"
+#include "ump_kernel_types.h"
+
+
+typedef struct ump_memory_allocation
+{
+ void * phys_addr;
+ void * mapping;
+ unsigned long size;
+ ump_dd_handle handle;
+ void * process_mapping_info;
+ u32 cookie; /**< necessary on some U/K interface implementations */
+ struct ump_session_data * ump_session; /**< Session that this allocation belongs to */
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+ u32 is_cached;
+} ump_memory_allocation;
+
+typedef struct ump_memory_backend
+{
+ int (*allocate)(void* ctx, ump_dd_mem * descriptor);
+ void (*release)(void* ctx, ump_dd_mem * descriptor);
+ void (*shutdown)(struct ump_memory_backend * backend);
+ int (*pre_allocate_physical_check)(void *ctx, u32 size);
+ u32 (*adjust_to_mali_phys)(void *ctx, u32 cpu_phys);
+ void * ctx;
+} ump_memory_backend;
+
+ump_memory_backend * ump_memory_backend_create ( void );
+void ump_memory_backend_destroy( void );
+
+#endif /*__UMP_KERNEL_MEMORY_BACKEND_H__ */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_ref_drv.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_ref_drv.c
new file mode 100644
index 00000000000..5a997e222a7
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_ref_drv.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define UMP_MINIMUM_SIZE 4096
+#define UMP_MINIMUM_SIZE_MASK (~(UMP_MINIMUM_SIZE-1))
+#define UMP_SIZE_ALIGN(x) (((x)+UMP_MINIMUM_SIZE-1)&UMP_MINIMUM_SIZE_MASK)
+#define UMP_ADDR_ALIGN_OFFSET(x) ((x)&(UMP_MINIMUM_SIZE-1))
+static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor);
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+ ump_dd_mem * mem;
+ unsigned long size_total = 0;
+ int map_id;
+ u32 i;
+
+ /* Go through the input blocks and verify that they are sane */
+ for (i=0; i < num_blocks; i++)
+ {
+ unsigned long addr = blocks[i].addr;
+ unsigned long size = blocks[i].size;
+
+ DBG_MSG(5, ("Adding physical memory to new handle. Address: 0x%08lx, size: %lu\n", addr, size));
+ size_total += blocks[i].size;
+
+ if (0 != UMP_ADDR_ALIGN_OFFSET(addr))
+ {
+ MSG_ERR(("Trying to create UMP memory from unaligned physical address. Address: 0x%08lx\n", addr));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ if (0 != UMP_ADDR_ALIGN_OFFSET(size))
+ {
+ MSG_ERR(("Trying to create UMP memory with unaligned size. Size: %lu\n", size));
+ return UMP_DD_HANDLE_INVALID;
+ }
+ }
+
+ /* Allocate the ump_dd_mem struct for this allocation */
+ mem = _mali_osk_malloc(sizeof(*mem));
+ if (NULL == mem)
+ {
+ DBG_MSG(1, ("Could not allocate ump_dd_mem in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ /* Find a secure ID for this allocation */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*) mem);
+
+ if (map_id < 0)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ /* Now, make a copy of the block information supplied by the user */
+ mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block)* num_blocks);
+ if (NULL == mem->block_array)
+ {
+ ump_descriptor_mapping_free(device.secure_id_map, map_id);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ _mali_osk_memcpy(mem->block_array, blocks, sizeof(ump_dd_physical_block) * num_blocks);
+
+ /* And setup the rest of the ump_dd_mem struct */
+ _mali_osk_atomic_init(&mem->ref_count, 1);
+ mem->secure_id = (ump_secure_id)map_id;
+ mem->size_bytes = size_total;
+ mem->nr_blocks = num_blocks;
+ mem->backend_info = NULL;
+ mem->ctx = NULL;
+ mem->release_func = phys_blocks_release;
+ /* For now UMP handles created by ump_dd_handle_create_from_phys_blocks() is forced to be Uncached */
+ mem->is_cached = 0;
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+ return (ump_dd_handle)mem;
+}
+
+static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor)
+{
+ _mali_osk_free(descriptor->block_array);
+ descriptor->block_array = NULL;
+}
+
+_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction )
+{
+ ump_session_data * session_data = NULL;
+ ump_dd_mem *new_allocation = NULL;
+ ump_session_memory_list_element * session_memory_element = NULL;
+ int map_id;
+
+ DEBUG_ASSERT_POINTER( user_interaction );
+ DEBUG_ASSERT_POINTER( user_interaction->ctx );
+
+ session_data = (ump_session_data *) user_interaction->ctx;
+
+ session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));
+ if (NULL == session_memory_element)
+ {
+ DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+
+ new_allocation = _mali_osk_calloc( 1, sizeof(ump_dd_mem));
+ if (NULL==new_allocation)
+ {
+ _mali_osk_free(session_memory_element);
+ DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Create a secure ID for this allocation */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*)new_allocation);
+
+ if (map_id < 0)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(session_memory_element);
+ _mali_osk_free(new_allocation);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
+ return - _MALI_OSK_ERR_INVALID_FUNC;
+ }
+
+ /* Initialize the part of the new_allocation that we know so for */
+ new_allocation->secure_id = (ump_secure_id)map_id;
+ _mali_osk_atomic_init(&new_allocation->ref_count,1);
+ if ( 0==(UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints) )
+ new_allocation->is_cached = 0;
+ else new_allocation->is_cached = 1;
+
+ /* special case a size of 0, we should try to emulate what malloc does in this case, which is to return a valid pointer that must be freed, but can't be dereferences */
+ if (0 == user_interaction->size)
+ {
+ user_interaction->size = 1; /* emulate by actually allocating the minimum block size */
+ }
+
+ new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size); /* Page align the size */
+
+ /* Now, ask the active memory backend to do the actual memory allocation */
+ if (!device.backend->allocate( device.backend->ctx, new_allocation ) )
+ {
+ DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n", new_allocation->size_bytes, (unsigned long)user_interaction->size));
+ ump_descriptor_mapping_free(device.secure_id_map, map_id);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(new_allocation);
+ _mali_osk_free(session_memory_element);
+ return _MALI_OSK_ERR_INVALID_FUNC;
+ }
+
+ new_allocation->ctx = device.backend->ctx;
+ new_allocation->release_func = device.backend->release;
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* Initialize the session_memory_element, and add it to the session object */
+ session_memory_element->mem = new_allocation;
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ user_interaction->secure_id = new_allocation->secure_id;
+ user_interaction->size = new_allocation->size_bytes;
+ DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n", new_allocation->secure_id, new_allocation->size_bytes));
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_types.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_types.h
new file mode 100644
index 00000000000..dc79b6f289b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_TYPES_H__
+#define __UMP_KERNEL_TYPES_H__
+
+#include "ump_kernel_interface.h"
+#include "mali_osk.h"
+
+/*
+ * This struct is what is "behind" a ump_dd_handle
+ */
+typedef struct ump_dd_mem
+{
+ ump_secure_id secure_id;
+ _mali_osk_atomic_t ref_count;
+ unsigned long size_bytes;
+ unsigned long nr_blocks;
+ ump_dd_physical_block * block_array;
+ void (*release_func)(void * ctx, struct ump_dd_mem * descriptor);
+ void * ctx;
+ void * backend_info;
+ int is_cached;
+} ump_dd_mem;
+
+
+
+#endif /* __UMP_KERNEL_TYPES_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_osk.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_osk.h
new file mode 100644
index 00000000000..73284f02b47
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_osk.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk.h
+ * Defines the OS abstraction layer for the UMP kernel device driver (OSK)
+ */
+
+#ifndef __UMP_OSK_H__
+#define __UMP_OSK_H__
+
+#include <mali_osk.h>
+#include <ump_kernel_memory_backend.h>
+#include <ump_uk_types.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+_mali_osk_errcode_t _ump_osk_init( void );
+
+_mali_osk_errcode_t _ump_osk_term( void );
+
+int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom );
+
+int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom );
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation *descriptor );
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size );
+
+void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor );
+
+void _ump_osk_msync( ump_dd_mem * mem, ump_uk_msync_op op );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_uk_types.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_uk_types.h
new file mode 100644
index 00000000000..b08335f61f1
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_uk_types.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __UMP_UK_TYPES_H__
+#define __UMP_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* Helpers for API version handling */
+#define MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+#define IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+#define GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+#define IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * So for version 1 the value would be 0x00010001
+ */
+#define UMP_IOCTL_API_VERSION MAKE_VERSION_ID(2)
+
+typedef enum
+{
+ _UMP_IOC_QUERY_API_VERSION = 1,
+ _UMP_IOC_ALLOCATE,
+ _UMP_IOC_RELEASE,
+ _UMP_IOC_SIZE_GET,
+ _UMP_IOC_MAP_MEM, /* not used in Linux */
+ _UMP_IOC_UNMAP_MEM, /* not used in Linux */
+ _UMP_IOC_MSYNC,
+}_ump_uk_functions;
+
+typedef enum
+{
+ UMP_REF_DRV_UK_CONSTRAINT_NONE = 0,
+ UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR = 1,
+ UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE = 4,
+} ump_uk_alloc_constraints;
+
+typedef enum
+{
+ _UMP_UK_MSYNC_CLEAN = 0,
+ _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1,
+ _UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128,
+} ump_uk_msync_op;
+
+/**
+ * Get API version ([in,out] u32 api_version, [out] u32 compatible)
+ */
+typedef struct _ump_uk_api_version_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 version; /**< Set to the user space version on entry, stores the device driver version on exit */
+ u32 compatible; /**< Non-null if the device is compatible with the client */
+} _ump_uk_api_version_s;
+
+/**
+ * ALLOCATE ([out] u32 secure_id, [in,out] u32 size, [in] contraints)
+ */
+typedef struct _ump_uk_allocate_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Return value from DD to Userdriver */
+ u32 size; /**< Input and output. Requested size; input. Returned size; output */
+ ump_uk_alloc_constraints constraints; /**< Only input to Devicedriver */
+} _ump_uk_allocate_s;
+
+/**
+ * SIZE_GET ([in] u32 secure_id, [out]size )
+ */
+typedef struct _ump_uk_size_get_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+ u32 size; /**< Returned size; output */
+} _ump_uk_size_get_s;
+
+/**
+ * Release ([in] u32 secure_id)
+ */
+typedef struct _ump_uk_release_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+} _ump_uk_release_s;
+
+typedef struct _ump_uk_map_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ void *phys_addr; /**< [in] physical address */
+ unsigned long size; /**< [in] size */
+ u32 secure_id; /**< [in] secure_id to assign to mapping */
+ void * _ukk_private; /**< Only used inside linux port between kernel frontend and common part to store vma */
+ u32 cookie;
+ u32 is_cached; /**< [in,out] caching of CPU mappings */
+} _ump_uk_map_mem_s;
+
+typedef struct _ump_uk_unmap_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping;
+ u32 size;
+ void * _ukk_private;
+ u32 cookie;
+} _ump_uk_unmap_mem_s;
+
+typedef struct _ump_uk_msync_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] mapping addr */
+ void *address; /**< [in] flush start addr */
+ u32 size; /**< [in] size to flush */
+ ump_uk_msync_op op; /**< [in] flush operation */
+ u32 cookie; /**< [in] cookie stored with reference to the kernel mapping internals */
+ u32 secure_id; /**< [in] cookie stored with reference to the kernel mapping internals */
+ u32 is_cached; /**< [out] caching of CPU mappings */
+} _ump_uk_msync_s;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UK_TYPES_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_ukk.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_ukk.h
new file mode 100644
index 00000000000..a3317fc7b21
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_ukk.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __UMP_UKK_H__
+#define __UMP_UKK_H__
+
+#include "mali_osk.h"
+#include "ump_uk_types.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+_mali_osk_errcode_t _ump_ukk_open( void** context );
+
+_mali_osk_errcode_t _ump_ukk_close( void** context );
+
+_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction );
+
+_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info );
+
+_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction );
+
+_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args );
+
+_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args );
+
+void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args );
+
+void _ump_ukk_msync( _ump_uk_msync_s *args );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/license/gpl/ump_kernel_license.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/license/gpl/ump_kernel_license.h
new file mode 100644
index 00000000000..17b930d2c57
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/license/gpl/ump_kernel_license.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __UMP_KERNEL_LICENSE_H__
+#define __UMP_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define UMP_KERNEL_LINUX_LICENSE "GPL"
+#define UMP_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_LICENSE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ioctl.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ioctl.h
new file mode 100644
index 00000000000..b11429816ab
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ioctl.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_IOCTL_H__
+#define __UMP_IOCTL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#include "../common/ump_uk_types.h"
+
+#ifndef __user
+#define __user
+#endif
+
+
+/**
+ * @file UMP_ioctl.h
+ * This file describes the interface needed to use the Linux device driver.
+ * The interface is used by the userpace UMP driver.
+ */
+
+#define UMP_IOCTL_NR 0x90
+
+
+#define UMP_IOC_QUERY_API_VERSION _IOR(UMP_IOCTL_NR, _UMP_IOC_QUERY_API_VERSION, _ump_uk_api_version_s)
+#define UMP_IOC_ALLOCATE _IOWR(UMP_IOCTL_NR, _UMP_IOC_ALLOCATE, _ump_uk_allocate_s)
+#define UMP_IOC_RELEASE _IOR(UMP_IOCTL_NR, _UMP_IOC_RELEASE, _ump_uk_release_s)
+#define UMP_IOC_SIZE_GET _IOWR(UMP_IOCTL_NR, _UMP_IOC_SIZE_GET, _ump_uk_size_get_s)
+#define UMP_IOC_MSYNC _IOW(UMP_IOCTL_NR, _UMP_IOC_MSYNC, _ump_uk_size_get_s)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_IOCTL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.c
new file mode 100644
index 00000000000..76d6b2f913a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.c
@@ -0,0 +1,409 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/ioport.h> /* request_mem_region */
+#include <linux/mm.h> /* memory management functions and types */
+#include <asm/uaccess.h> /* user space access */
+#include <asm/atomic.h>
+#include <linux/device.h>
+#include "arch/config.h" /* Configuration for current platform. The symlinc for arch is set by Makefile */
+#include "ump_ioctl.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+#include "ump_kernel_license.h"
+
+#include "ump_osk.h"
+#include "ump_ukk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk_wrappers.h"
+#include "ump_ukk_ref_wrappers.h"
+
+
+/* Module parameter to control log level */
+int ump_debug_level = 2;
+module_param(ump_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(ump_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+int ump_major = 0;
+module_param(ump_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_major, "Device major number");
+
+/* Name of the UMP device driver */
+static char ump_dev_name[] = "ump"; /* should be const, but the functions we call requires non-cost */
+
+
+
+/*
+ * The data which we attached to each virtual memory mapping request we get.
+ * Each memory mapping has a reference to the UMP memory it maps.
+ * We release this reference when the last memory mapping is unmapped.
+ */
+typedef struct ump_vma_usage_tracker
+{
+ int references;
+ ump_dd_handle handle;
+} ump_vma_usage_tracker;
+
+struct ump_device
+{
+ struct cdev cdev;
+#if UMP_LICENSE_IS_GPL
+ struct class * ump_class;
+#endif
+};
+
+/* The global variable containing the global device data */
+static struct ump_device ump_device;
+
+
+/* Forward declare static functions */
+static int ump_file_open(struct inode *inode, struct file *filp);
+static int ump_file_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma);
+
+
+/* This variable defines the file operations this UMP device driver offer */
+static struct file_operations ump_fops =
+{
+ .owner = THIS_MODULE,
+ .open = ump_file_open,
+ .release = ump_file_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = ump_file_ioctl,
+#else
+ .ioctl = ump_file_ioctl,
+#endif
+ .mmap = ump_file_mmap
+};
+
+
+/* This function is called by Linux to initialize this module.
+ * All we do is initialize the UMP device driver.
+ */
+static int ump_initialize_module(void)
+{
+ _mali_osk_errcode_t err;
+
+ DBG_MSG(2, ("Inserting UMP device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__));
+
+ err = ump_kernel_constructor();
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("UMP device driver init failed\n"));
+ return map_errcode(err);
+ }
+
+ MSG(("UMP device driver %s loaded\n", SVN_REV_STRING));
+ return 0;
+}
+
+
+
+/*
+ * This function is called by Linux to unload/terminate/exit/cleanup this module.
+ * All we do is terminate the UMP device driver.
+ */
+static void ump_cleanup_module(void)
+{
+ DBG_MSG(2, ("Unloading UMP device driver\n"));
+ ump_kernel_destructor();
+ DBG_MSG(2, ("Module unloaded\n"));
+}
+
+
+
+/*
+ * Initialize the UMP device driver.
+ */
+int ump_kernel_device_initialize(void)
+{
+ int err;
+ dev_t dev = 0;
+
+ if (0 == ump_major)
+ {
+ /* auto select a major */
+ err = alloc_chrdev_region(&dev, 0, 1, ump_dev_name);
+ ump_major = MAJOR(dev);
+ }
+ else
+ {
+ /* use load time defined major number */
+ dev = MKDEV(ump_major, 0);
+ err = register_chrdev_region(dev, 1, ump_dev_name);
+ }
+
+ if (0 == err)
+ {
+ memset(&ump_device, 0, sizeof(ump_device));
+
+ /* initialize our char dev data */
+ cdev_init(&ump_device.cdev, &ump_fops);
+ ump_device.cdev.owner = THIS_MODULE;
+ ump_device.cdev.ops = &ump_fops;
+
+ /* register char dev with the kernel */
+ err = cdev_add(&ump_device.cdev, dev, 1/*count*/);
+ if (0 == err)
+ {
+
+#if UMP_LICENSE_IS_GPL
+ ump_device.ump_class = class_create(THIS_MODULE, ump_dev_name);
+ if (IS_ERR(ump_device.ump_class))
+ {
+ err = PTR_ERR(ump_device.ump_class);
+ }
+ else
+ {
+ struct device * mdev;
+ mdev = device_create(ump_device.ump_class, NULL, dev, NULL, ump_dev_name);
+ if (!IS_ERR(mdev))
+ {
+ return 0;
+ }
+
+ err = PTR_ERR(mdev);
+ }
+ cdev_del(&ump_device.cdev);
+#else
+ return 0;
+#endif
+ }
+
+ unregister_chrdev_region(dev, 1);
+ }
+
+ return err;
+}
+
+
+
+/*
+ * Terminate the UMP device driver
+ */
+void ump_kernel_device_terminate(void)
+{
+ dev_t dev = MKDEV(ump_major, 0);
+
+#if UMP_LICENSE_IS_GPL
+ device_destroy(ump_device.ump_class, dev);
+ class_destroy(ump_device.ump_class);
+#endif
+
+ /* unregister char device */
+ cdev_del(&ump_device.cdev);
+
+ /* free major */
+ unregister_chrdev_region(dev, 1);
+}
+
+/*
+ * Open a new session. User space has called open() on us.
+ */
+static int ump_file_open(struct inode *inode, struct file *filp)
+{
+ struct ump_session_data * session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev))
+ {
+ MSG_ERR(("Minor not zero in ump_file_open()\n"));
+ return -ENODEV;
+ }
+
+ /* Call the OS-Independent UMP Open function */
+ err = _ump_ukk_open((void**) &session_data );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("Ump failed to open a new session\n"));
+ return map_errcode( err );
+ }
+
+ filp->private_data = (void*)session_data;
+ filp->f_pos = 0;
+
+ return 0; /* success */
+}
+
+
+
+/*
+ * Close a session. User space has called close() or crashed/terminated.
+ */
+static int ump_file_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ err = _ump_ukk_close((void**) &filp->private_data );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ return map_errcode( err );
+ }
+
+ return 0; /* success */
+}
+
+
+
+/*
+ * Handle IOCTL requests.
+ */
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err = -ENOTTY;
+ void __user * argument;
+ struct ump_session_data * session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ (void)inode; /* inode not used */
+#endif
+
+ session_data = (struct ump_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("No session data attached to file object\n"));
+ return -ENOTTY;
+ }
+
+ /* interpret the argument as a user pointer to something */
+ argument = (void __user *)arg;
+
+ switch (cmd)
+ {
+ case UMP_IOC_QUERY_API_VERSION:
+ err = ump_get_api_version_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_ALLOCATE :
+ err = ump_allocate_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_RELEASE:
+ err = ump_release_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_SIZE_GET:
+ err = ump_size_get_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_MSYNC:
+ err = ump_msync_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ default:
+ DBG_MSG(1, ("No handler for IOCTL. cmd: 0x%08x, arg: 0x%08lx\n", cmd, arg));
+ err = -EFAULT;
+ break;
+ }
+
+ return err;
+}
+
+int map_errcode( _mali_osk_errcode_t err )
+{
+ switch(err)
+ {
+ case _MALI_OSK_ERR_OK : return 0;
+ case _MALI_OSK_ERR_FAULT: return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
+ default: return -EFAULT;
+ }
+}
+
+/*
+ * Handle from OS to map specified virtual memory to specified UMP memory.
+ */
+static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+ _ump_uk_map_mem_s args;
+ _mali_osk_errcode_t err;
+ struct ump_session_data * session_data;
+
+ /* Validate the session data */
+ session_data = (struct ump_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("mmap() called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ /* Re-pack the arguments that mmap() packed for us */
+ args.ctx = session_data;
+ args.phys_addr = 0;
+ args.size = vma->vm_end - vma->vm_start;
+ args._ukk_private = vma;
+ args.secure_id = vma->vm_pgoff;
+ args.is_cached = 0;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ {
+ args.is_cached = 1;
+ vma->vm_flags = vma->vm_flags | VM_SHARED | VM_MAYSHARE ;
+ DBG_MSG(3, ("UMP Map function: Forcing the CPU to use cache\n"));
+ }
+
+ DBG_MSG(4, ("UMP vma->flags: %x\n", vma->vm_flags ));
+
+ /* Call the common mmap handler */
+ err = _ump_ukk_map_mem( &args );
+ if ( _MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("_ump_ukk_map_mem() failed in function ump_file_mmap()"));
+ return map_errcode( err );
+ }
+
+ return 0; /* success */
+}
+
+/* Export UMP kernel space API functions */
+EXPORT_SYMBOL(ump_dd_secure_id_get);
+EXPORT_SYMBOL(ump_dd_handle_create_from_secure_id);
+EXPORT_SYMBOL(ump_dd_phys_block_count_get);
+EXPORT_SYMBOL(ump_dd_phys_block_get);
+EXPORT_SYMBOL(ump_dd_phys_blocks_get);
+EXPORT_SYMBOL(ump_dd_size_get);
+EXPORT_SYMBOL(ump_dd_reference_add);
+EXPORT_SYMBOL(ump_dd_reference_release);
+
+/* Export our own extended kernel space allocator */
+EXPORT_SYMBOL(ump_dd_handle_create_from_phys_blocks);
+
+/* Setup init and exit functions for this module */
+module_init(ump_initialize_module);
+module_exit(ump_cleanup_module);
+
+/* And some module informatio */
+MODULE_LICENSE(UMP_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.h
new file mode 100644
index 00000000000..464c035974b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_H__
+#define __UMP_KERNEL_H__
+
+int ump_kernel_device_initialize(void);
+void ump_kernel_device_terminate(void);
+
+
+#endif /* __UMP_KERNEL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.c
new file mode 100644
index 00000000000..5c8a7f3783f
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+#define UMP_BLOCK_SIZE (256UL * 1024UL) /* 256kB, remember to keep the ()s */
+
+
+
+typedef struct block_info
+{
+ struct block_info * next;
+} block_info;
+
+
+
+typedef struct block_allocator
+{
+ struct semaphore mutex;
+ block_info * all_blocks;
+ block_info * first_free;
+ u32 base;
+ u32 num_blocks;
+ u32 num_free;
+} block_allocator;
+
+
+static void block_allocator_shutdown(ump_memory_backend * backend);
+static int block_allocator_allocate(void* ctx, ump_dd_mem * mem);
+static void block_allocator_release(void * ctx, ump_dd_mem * handle);
+static inline u32 get_phys(block_allocator * allocator, block_info * block);
+
+
+
+/*
+ * Create dedicated memory backend
+ */
+ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size)
+{
+ ump_memory_backend * backend;
+ block_allocator * allocator;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = (size + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1);
+ num_blocks = usable_size / UMP_BLOCK_SIZE;
+
+ if (0 == usable_size)
+ {
+ DBG_MSG(1, ("Memory block of size %u is unusable\n", size));
+ return NULL;
+ }
+
+ DBG_MSG(5, ("Creating dedicated UMP memory backend. Base address: 0x%08x, size: 0x%08x\n", base_address, size));
+ DBG_MSG(6, ("%u usable bytes which becomes %u blocks\n", usable_size, num_blocks));
+
+ backend = kzalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL != backend)
+ {
+ allocator = kmalloc(sizeof(block_allocator), GFP_KERNEL);
+ if (NULL != allocator)
+ {
+ allocator->all_blocks = kmalloc(sizeof(block_allocator) * num_blocks, GFP_KERNEL);
+ if (NULL != allocator->all_blocks)
+ {
+ int i;
+
+ allocator->first_free = NULL;
+ allocator->num_blocks = num_blocks;
+ allocator->num_free = num_blocks;
+ allocator->base = base_address;
+ sema_init(&allocator->mutex, 1);
+
+ for (i = 0; i < num_blocks; i++)
+ {
+ allocator->all_blocks[i].next = allocator->first_free;
+ allocator->first_free = &allocator->all_blocks[i];
+ }
+
+ backend->ctx = allocator;
+ backend->allocate = block_allocator_allocate;
+ backend->release = block_allocator_release;
+ backend->shutdown = block_allocator_shutdown;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+
+ return backend;
+ }
+ kfree(allocator);
+ }
+ kfree(backend);
+ }
+
+ return NULL;
+}
+
+
+
+/*
+ * Destroy specified dedicated memory backend
+ */
+static void block_allocator_shutdown(ump_memory_backend * backend)
+{
+ block_allocator * allocator;
+
+ BUG_ON(!backend);
+ BUG_ON(!backend->ctx);
+
+ allocator = (block_allocator*)backend->ctx;
+
+ DBG_MSG_IF(1, allocator->num_free != allocator->num_blocks, ("%u blocks still in use during shutdown\n", allocator->num_blocks - allocator->num_free));
+
+ kfree(allocator->all_blocks);
+ kfree(allocator);
+ kfree(backend);
+}
+
+
+
+static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
+{
+ block_allocator * allocator;
+ u32 left;
+ block_info * last_allocated = NULL;
+ int i = 0;
+
+ BUG_ON(!ctx);
+ BUG_ON(!mem);
+
+ allocator = (block_allocator*)ctx;
+ left = mem->size_bytes;
+
+ BUG_ON(!left);
+ BUG_ON(!&allocator->mutex);
+
+ mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
+ mem->block_array = (ump_dd_physical_block*)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
+ if (NULL == mem->block_array)
+ {
+ MSG_ERR(("Failed to allocate block array\n"));
+ return 0;
+ }
+
+ if (down_interruptible(&allocator->mutex))
+ {
+ MSG_ERR(("Could not get mutex to do block_allocate\n"));
+ return 0;
+ }
+
+ mem->size_bytes = 0;
+
+ while ((left > 0) && (allocator->first_free))
+ {
+ block_info * block;
+
+ block = allocator->first_free;
+ allocator->first_free = allocator->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+ allocator->num_free--;
+
+ mem->block_array[i].addr = get_phys(allocator, block);
+ mem->block_array[i].size = UMP_BLOCK_SIZE;
+ mem->size_bytes += UMP_BLOCK_SIZE;
+
+ i++;
+
+ if (left < UMP_BLOCK_SIZE) left = 0;
+ else left -= UMP_BLOCK_SIZE;
+ }
+
+ if (left)
+ {
+ block_info * block;
+ /* release all memory back to the pool */
+ while (last_allocated)
+ {
+ block = last_allocated->next;
+ last_allocated->next = allocator->first_free;
+ allocator->first_free = last_allocated;
+ last_allocated = block;
+ allocator->num_free++;
+ }
+
+ vfree(mem->block_array);
+ mem->backend_info = NULL;
+ mem->block_array = NULL;
+
+ DBG_MSG(4, ("Could not find a mem-block for the allocation.\n"));
+ up(&allocator->mutex);
+
+ return 0;
+ }
+
+ mem->backend_info = last_allocated;
+
+ up(&allocator->mutex);
+ mem->is_cached=0;
+
+ return 1;
+}
+
+
+
+static void block_allocator_release(void * ctx, ump_dd_mem * handle)
+{
+ block_allocator * allocator;
+ block_info * block, * next;
+
+ BUG_ON(!ctx);
+ BUG_ON(!handle);
+
+ allocator = (block_allocator*)ctx;
+ block = (block_info*)handle->backend_info;
+ BUG_ON(!block);
+
+ if (down_interruptible(&allocator->mutex))
+ {
+ MSG_ERR(("Allocator release: Failed to get mutex - memory leak\n"));
+ return;
+ }
+
+ while (block)
+ {
+ next = block->next;
+
+ BUG_ON( (block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
+
+ block->next = allocator->first_free;
+ allocator->first_free = block;
+ allocator->num_free++;
+
+ block = next;
+ }
+ DBG_MSG(3, ("%d blocks free after release call\n", allocator->num_free));
+ up(&allocator->mutex);
+
+ vfree(handle->block_array);
+ handle->block_array = NULL;
+}
+
+
+
+/*
+ * Helper function for calculating the physical base adderss of a memory block
+ */
+static inline u32 get_phys(block_allocator * allocator, block_info * block)
+{
+ return allocator->base + ((block - allocator->all_blocks) * UMP_BLOCK_SIZE);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.h
new file mode 100644
index 00000000000..58ebe15e5a2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_dedicated.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__ */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.c
new file mode 100644
index 00000000000..99c9c223036
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+typedef struct os_allocator
+{
+ struct semaphore mutex;
+ u32 num_pages_max; /**< Maximum number of pages to allocate from the OS */
+ u32 num_pages_allocated; /**< Number of pages allocated from the OS */
+} os_allocator;
+
+
+
+static void os_free(void* ctx, ump_dd_mem * descriptor);
+static int os_allocate(void* ctx, ump_dd_mem * descriptor);
+static void os_memory_backend_destroy(ump_memory_backend * backend);
+
+
+
+/*
+ * Create OS memory backend
+ */
+ump_memory_backend * ump_os_memory_backend_create(const int max_allocation)
+{
+ ump_memory_backend * backend;
+ os_allocator * info;
+
+ info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
+ if (NULL == info)
+ {
+ return NULL;
+ }
+
+ info->num_pages_max = max_allocation >> PAGE_SHIFT;
+ info->num_pages_allocated = 0;
+
+ sema_init(&info->mutex, 1);
+
+ backend = kmalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL == backend)
+ {
+ kfree(info);
+ return NULL;
+ }
+
+ backend->ctx = info;
+ backend->allocate = os_allocate;
+ backend->release = os_free;
+ backend->shutdown = os_memory_backend_destroy;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+
+ return backend;
+}
+
+
+
+/*
+ * Destroy specified OS memory backend
+ */
+static void os_memory_backend_destroy(ump_memory_backend * backend)
+{
+ os_allocator * info = (os_allocator*)backend->ctx;
+
+ DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
+
+ kfree(info);
+ kfree(backend);
+}
+
+
+
+/*
+ * Allocate UMP memory
+ */
+static int os_allocate(void* ctx, ump_dd_mem * descriptor)
+{
+ u32 left;
+ os_allocator * info;
+ int pages_allocated = 0;
+ int is_cached;
+
+ BUG_ON(!descriptor);
+ BUG_ON(!ctx);
+
+ info = (os_allocator*)ctx;
+ left = descriptor->size_bytes;
+ is_cached = descriptor->is_cached;
+
+ if (down_interruptible(&info->mutex))
+ {
+ DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+ return 0; /* failure */
+ }
+
+ descriptor->backend_info = NULL;
+ descriptor->nr_blocks = ((left + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+ DBG_MSG(5, ("Allocating page array. Size: %lu\n", descriptor->nr_blocks * sizeof(ump_dd_physical_block)));
+
+ descriptor->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * descriptor->nr_blocks);
+ if (NULL == descriptor->block_array)
+ {
+ up(&info->mutex);
+ DBG_MSG(1, ("Block array could not be allocated\n"));
+ return 0; /* failure */
+ }
+
+ while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max))
+ {
+ struct page * new_page;
+
+ if (is_cached)
+ {
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN);
+ } else
+ {
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+ }
+ if (NULL == new_page)
+ {
+ break;
+ }
+
+ /* Ensure page caches are flushed. */
+ if ( is_cached )
+ {
+ descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
+ descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+ } else
+ {
+ descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
+ descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+ }
+
+ DBG_MSG(5, ("Allocated page 0x%08lx cached: %d\n", descriptor->block_array[pages_allocated].addr, is_cached));
+
+ if (left < PAGE_SIZE)
+ {
+ left = 0;
+ }
+ else
+ {
+ left -= PAGE_SIZE;
+ }
+
+ pages_allocated++;
+ }
+
+ DBG_MSG(5, ("Alloce for ID:%2d got %d pages, cached: %d\n", descriptor->secure_id, pages_allocated));
+
+ if (left)
+ {
+ DBG_MSG(1, ("Failed to allocate needed pages\n"));
+
+ while(pages_allocated)
+ {
+ pages_allocated--;
+ if ( !is_cached )
+ {
+ dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT) );
+ }
+
+ up(&info->mutex);
+
+ return 0; /* failure */
+ }
+
+ info->num_pages_allocated += pages_allocated;
+
+ DBG_MSG(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ up(&info->mutex);
+
+ return 1; /* success*/
+}
+
+
+/*
+ * Free specified UMP memory
+ */
+static void os_free(void* ctx, ump_dd_mem * descriptor)
+{
+ os_allocator * info;
+ int i;
+
+ BUG_ON(!ctx);
+ BUG_ON(!descriptor);
+
+ info = (os_allocator*)ctx;
+
+ BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
+
+ if (down_interruptible(&info->mutex))
+ {
+ DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+ return;
+ }
+
+ DBG_MSG(5, ("Releasing %lu OS pages\n", descriptor->nr_blocks));
+
+ info->num_pages_allocated -= descriptor->nr_blocks;
+
+ up(&info->mutex);
+
+ for ( i = 0; i < descriptor->nr_blocks; i++)
+ {
+ DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
+ if ( ! descriptor->is_cached)
+ {
+ dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ __free_page(pfn_to_page(descriptor->block_array[i].addr>>PAGE_SHIFT) );
+ }
+
+ vfree(descriptor->block_array);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.h
new file mode 100644
index 00000000000..d6083477d72
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_os.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_os_memory_backend_create(const int max_allocation);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_OS_H__ */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_memory_backend.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_memory_backend.c
new file mode 100644
index 00000000000..1c1190a537f
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_memory_backend.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/ioport.h> /* request_mem_region */
+
+#include "arch/config.h" /* Configuration for current platform. The symlink for arch is set by Makefile */
+
+#include "ump_osk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+
+/* Configure which dynamic memory allocator to use */
+int ump_backend = ARCH_UMP_BACKEND_DEFAULT;
+module_param(ump_backend, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_backend, "0 = dedicated memory backend (default), 1 = OS memory backend");
+
+/* The base address of the memory block for the dedicated memory backend */
+unsigned int ump_memory_address = ARCH_UMP_MEMORY_ADDRESS_DEFAULT;
+module_param(ump_memory_address, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_address, "The physical address to map for the dedicated memory backend");
+
+/* The size of the memory block for the dedicated memory backend */
+unsigned int ump_memory_size = ARCH_UMP_MEMORY_SIZE_DEFAULT;
+module_param(ump_memory_size, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_size, "The size of fixed memory to map in the dedicated memory backend");
+
+ump_memory_backend* ump_memory_backend_create ( void )
+{
+ ump_memory_backend * backend = NULL;
+
+ /* Create the dynamic memory allocator backend */
+ if (0 == ump_backend)
+ {
+ DBG_MSG(2, ("Using dedicated memory backend\n"));
+
+ DBG_MSG(2, ("Requesting dedicated memory: 0x%08x, size: %u\n", ump_memory_address, ump_memory_size));
+ /* Ask the OS if we can use the specified physical memory */
+ if (NULL == request_mem_region(ump_memory_address, ump_memory_size, "UMP Memory"))
+ {
+ MSG_ERR(("Failed to request memory region (0x%08X - 0x%08X). Is Mali DD already loaded?\n", ump_memory_address, ump_memory_address + ump_memory_size - 1));
+ return NULL;
+ }
+ backend = ump_block_allocator_create(ump_memory_address, ump_memory_size);
+ }
+ else if (1 == ump_backend)
+ {
+ DBG_MSG(2, ("Using OS memory backend, allocation limit: %d\n", ump_memory_size));
+ backend = ump_os_memory_backend_create(ump_memory_size);
+ }
+
+ return backend;
+}
+
+void ump_memory_backend_destroy( void )
+{
+ if (0 == ump_backend)
+ {
+ DBG_MSG(2, ("Releasing dedicated memory: 0x%08x\n", ump_memory_address));
+ release_mem_region(ump_memory_address, ump_memory_size);
+ }
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_atomics.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_atomics.c
new file mode 100644
index 00000000000..b3300ab691a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_atomics.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_atomics.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+#include "ump_osk.h"
+#include <asm/atomic.h>
+
+int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_low_level_mem.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_low_level_mem.c
new file mode 100644
index 00000000000..7a1c5e97886
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_low_level_mem.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/memory.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+typedef struct ump_vma_usage_tracker
+{
+ atomic_t references;
+ ump_memory_allocation *descriptor;
+} ump_vma_usage_tracker;
+
+static void ump_vma_open(struct vm_area_struct * vma);
+static void ump_vma_close(struct vm_area_struct * vma);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+static struct vm_operations_struct ump_vm_ops =
+{
+ .open = ump_vma_open,
+ .close = ump_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .fault = ump_cpu_page_fault_handler
+#else
+ .nopfn = ump_cpu_page_fault_handler
+#endif
+};
+
+/*
+ * Page fault for VMA region
+ * This should never happen since we always map in the entire virtual memory range.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ void __user * address;
+ address = vmf->virtual_address;
+#endif
+ MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
+ MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ return VM_FAULT_SIGBUS;
+#else
+ return NOPFN_SIGBUS;
+#endif
+}
+
+static void ump_vma_open(struct vm_area_struct * vma)
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ int new_val;
+
+ vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+ BUG_ON(NULL == vma_usage_tracker);
+
+ new_val = atomic_inc_return(&vma_usage_tracker->references);
+
+ DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+}
+
+static void ump_vma_close(struct vm_area_struct * vma)
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ _ump_uk_unmap_mem_s args;
+ int new_val;
+
+ vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+ BUG_ON(NULL == vma_usage_tracker);
+
+ new_val = atomic_dec_return(&vma_usage_tracker->references);
+
+ DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+
+ if (0 == new_val)
+ {
+ ump_memory_allocation * descriptor;
+
+ descriptor = vma_usage_tracker->descriptor;
+
+ args.ctx = descriptor->ump_session;
+ args.cookie = descriptor->cookie;
+ args.mapping = descriptor->mapping;
+ args.size = descriptor->size;
+
+ args._ukk_private = NULL; /** @note unused */
+
+ DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
+ _ump_ukk_unmap_mem( & args );
+
+ /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
+ }
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ struct vm_area_struct *vma;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
+ if (NULL == vma_usage_tracker)
+ {
+ DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
+ return -_MALI_OSK_ERR_FAULT;
+ }
+
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ vma->vm_private_data = vma_usage_tracker;
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+
+ if (0==descriptor->is_cached)
+ {
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ }
+ DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
+
+ /* Setup the functions which handle further VMA handling */
+ vma->vm_ops = &ump_vm_ops;
+
+ /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+ descriptor->mapping = (void __user*)vma->vm_start;
+
+ atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
+ vma_usage_tracker->descriptor = descriptor;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
+{
+ struct vm_area_struct* vma;
+ ump_vma_usage_tracker * vma_usage_tracker;
+
+ if (NULL == descriptor) return;
+
+ /* Linux does the right thing as part of munmap to remove the mapping
+ * All that remains is that we remove the vma_usage_tracker setup in init() */
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+
+ vma_usage_tracker = vma->vm_private_data;
+
+ /* We only get called if mem_mapregion_init succeeded */
+ kfree(vma_usage_tracker);
+ return;
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
+{
+ struct vm_area_struct *vma;
+ _mali_osk_errcode_t retval;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
+
+ DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
+ ump_dd_secure_id_get(descriptor->handle),
+ (unsigned long)vma,
+ (unsigned long)(vma->vm_start + offset),
+ (unsigned long)*phys_addr,
+ size,
+ (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
+
+ return retval;
+}
+
+
+void _ump_osk_msync( ump_dd_mem * mem, ump_uk_msync_op op )
+{
+ int i;
+ DBG_MSG(3, ("Flushing nr of blocks: %u. First: paddr: 0x%08x vaddr: 0x%08x size:%dB\n", mem->nr_blocks, mem->block_array[0].addr, phys_to_virt(mem->block_array[0].addr), mem->block_array[0].size));
+
+ /* TODO: Use args->size and args->address to select a subrange of this allocation to flush */
+ for (i=0 ; i<mem->nr_blocks; i++)
+ {
+ /* TODO: Find out which flush method is best of 1)Dma OR 2)Normal flush functions */
+ /* TODO: Use args->op to select the flushing method: CLEAN_AND_INVALIDATE or CLEAN */
+ /*#define USING_DMA_FLUSH*/
+ #ifdef USING_DMA_FLUSH
+ DEBUG_ASSERT( (PAGE_SIZE==mem->block_array[i].size));
+ dma_map_page(NULL, pfn_to_page(mem->block_array[i].addr >> PAGE_SHIFT), 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
+ /*dma_unmap_page(NULL, mem->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);*/
+ #else
+ /* Normal style flush */
+ ump_dd_physical_block *block;
+ u32 start_p, end_p;
+ const void *start_v, *end_v;
+ block = &mem->block_array[i];
+
+ start_p = (u32)block->addr;
+ start_v = phys_to_virt( start_p ) ;
+
+ end_p = start_p + block->size-1;
+ end_v = phys_to_virt( end_p ) ;
+
+ dmac_flush_range(start_v, end_v);
+ outer_flush_range(start_p, end_p);
+ #endif
+ }
+
+ return ;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_misc.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_misc.c
new file mode 100644
index 00000000000..78569c4457b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_misc.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_misc.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+
+#include "ump_osk.h"
+
+#include <linux/kernel.h>
+#include "ump_kernel_linux.h"
+
+/* is called from ump_kernel_constructor in common code */
+_mali_osk_errcode_t _ump_osk_init( void )
+{
+ if (0 != ump_kernel_device_initialize())
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_osk_term( void )
+{
+ ump_kernel_device_terminate();
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.c
new file mode 100644
index 00000000000..155635026aa
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+
+#include <asm/uaccess.h> /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+/*
+ * IOCTL operation; Allocate UMP memory
+ */
+int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_allocate_s user_interaction;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ err = _ump_ukk_allocate( &user_interaction );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ DBG_MSG(1, ("_ump_ukk_allocate() failed in ump_ioctl_allocate()\n"));
+ return map_errcode(err);
+ }
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ /* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */
+ _ump_uk_release_s release_args;
+
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));
+
+ release_args.ctx = (void *) session_data;
+ release_args.secure_id = user_interaction.secure_id;
+
+ err = _ump_ukk_release( &release_args );
+ if(_MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("_ump_ukk_release() also failed when trying to release newly allocated memory in ump_ioctl_allocate()\n"));
+ }
+
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.h
new file mode 100644
index 00000000000..b2bef1152c3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+#ifndef __UMP_UKK_REF_WRAPPERS_H__
+#define __UMP_UKK_REF_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_REF_WRAPPERS_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.c
new file mode 100644
index 00000000000..d14b631246c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#include <asm/uaccess.h> /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+/*
+ * IOCTL operation; Negotiate version of IOCTL API
+ */
+int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_api_version_s version_info;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_get_api_version()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&version_info, argument, sizeof(version_info)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ version_info.ctx = (void*) session_data;
+ err = _ump_uku_get_api_version( &version_info );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_uku_get_api_version() failed in ump_ioctl_get_api_version()\n"));
+ return map_errcode(err);
+ }
+
+ version_info.ctx = NULL;
+
+ /* Copy ouput data back to user space */
+ if (0 != copy_to_user(argument, &version_info, sizeof(version_info)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+
+/*
+ * IOCTL operation; Release reference to specified UMP memory.
+ */
+int ump_release_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_release_s release_args;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_release()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&release_args, argument, sizeof(release_args)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ release_args.ctx = (void*) session_data;
+ err = _ump_ukk_release( &release_args );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_ukk_release() failed in ump_ioctl_release()\n"));
+ return map_errcode(err);
+ }
+
+
+ return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Return size for specified UMP memory.
+ */
+int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_size_get_s user_interaction;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_size_get()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+ err = _ump_ukk_size_get( &user_interaction );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_ukk_size_get() failed in ump_ioctl_size_get()\n"));
+ return map_errcode(err);
+ }
+
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_size_get()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Return size for specified UMP memory.
+ */
+ int ump_msync_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_msync_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_msync()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_msync( &user_interaction );
+
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_msync()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.h
new file mode 100644
index 00000000000..31afe2dca56
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#ifndef __UMP_UKK_WRAPPERS_H__
+#define __UMP_UKK_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+
+int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_release_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_msync_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+#endif /* __UMP_UKK_WRAPPERS_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/readme.txt b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/readme.txt
new file mode 100644
index 00000000000..bf1bf61d60e
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/readme.txt
@@ -0,0 +1,17 @@
+Building the UMP Device Driver for Linux
+----------------------------------------
+
+Build the UMP Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> CONFIG=<your_config> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ your_config: Name of the sub-folder to find the required config.h file
+ ("arch-" will be prepended)
+
+The config.h file contains the configuration parameters needed, like the
+memory backend to use, and the amount of memory.
+
+The result will be a ump.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
diff --git a/drivers/gpu/mali/mali400ko/mali.spec b/drivers/gpu/mali/mali400ko/mali.spec
new file mode 100644
index 00000000000..62e8fa0e1b4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/mali.spec
@@ -0,0 +1,57 @@
+%define kernel_target u8500
+%define kernel_version %(find /lib/modules -name "*%{kernel_target}" | cut -c 14-)
+
+Name: mali400ko
+License: GPL
+Summary: Mali400 kernel module
+Version: 1.0
+Release: 1
+URL: http://stericsson.com
+BuildRoot: %{_tmppath}/%{name}
+Requires: kernel
+BuildRequires: kernel-u8500-devel
+Requires(post): ldconfig
+Requires(postun): ldconfig
+
+Source0: mali400ko.tar.gz
+
+%description
+Mali400 kernel module.
+
+%files
+%defattr(-,root,root)
+/lib/modules/%{kernel_version}/extra/mali.ko
+/lib/modules/%{kernel_version}/extra/mali_drm.ko
+
+%prep
+%setup -q
+
+%build
+export CROSS_COMPILE=""
+export KERNELDIR=/usr/src/kernels/%{kernel_version}
+export KERNEL_BUILD_DIR=/usr/src/kernels/%{kernel_version}
+export USING_UMP=1
+export USING_HWMEM=1
+#make V=0 mali-devicedrv
+
+%install
+export CROSS_COMPILE=""
+export KERNELDIR=/usr/src/kernels/%{kernel_version}
+export KERNEL_BUILD_DIR=/usr/src/kernels/%{kernel_version}
+export USING_UMP=1
+export USING_HWMEM=1
+export INSTALL_MOD_DIR=extra
+export INSTALL_MOD_PATH=$RPM_BUILD_ROOT
+make V=0 install-mali install-mali_drm
+
+# Remove kernel modules.* files
+rm -f $RPM_BUILD_ROOT/lib/modules/%{kernel_version}/modules.*
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%post
+
+%postun
+
+
diff --git a/drivers/gpu/mali/mali400ko/x11/mali_drm/README.txt b/drivers/gpu/mali/mali400ko/x11/mali_drm/README.txt
new file mode 100644
index 00000000000..47009fe7686
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/x11/mali_drm/README.txt
@@ -0,0 +1,24 @@
+Notes on integrating the Mali DRM module:
+
+The Mali DRM is a platform device, meaning that you have to add an entry for it in your kernel architecture specification.
+
+Example: (arch/arm/mach-<platform>/mach-<platform>.c)
+
+#ifdef CONFIG_DRM_MALI
+static struct platform_device <platform>_device_mali_drm = {
+ .name = "mali_drm",
+ .id = -1,
+};
+#endif
+
+static struct platform_device *<platform>_devices[] __initdata = {
+...
+#ifdef CONFIG_DRM_MALI
+ &<platform>_device_mali_drm,
+#endif
+...
+};
+
+Where <platform> is substituted with the selected platform.
+
+The "mali" folder should be placed under drivers/gpu/drm/
diff --git a/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/Makefile b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/Makefile
new file mode 100644
index 00000000000..0f3ace966df
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/Makefile
@@ -0,0 +1,17 @@
+#
+# * Copyright (C) 2010 ARM Limited. All rights reserved.
+# *
+# * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+# *
+# * A copy of the licence is included with the program, and can also be obtained from Free Software
+# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+#
+# Makefile for the Mali drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+obj-y += mali_drm.o
+mali_drm-objs := mali_drv.o
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD) -I$(KBUILD_EXTMOD)/include -I$(KBUILD_EXTMOD)/../drm/include/
diff --git a/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.c b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.c
new file mode 100644
index 00000000000..583fb55c50a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.c
@@ -0,0 +1,155 @@
+/**
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_drv.c
+ * Implementation of the Linux device driver entrypoints for Mali DRM
+ */
+#include <linux/module.h>
+#include <linux/vermagic.h>
+#include <drm/drmP.h>
+#include "mali_drv.h"
+
+static struct platform_device *dev0;
+
+void mali_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+}
+
+void mali_drm_lastclose(struct drm_device *dev)
+{
+}
+
+static int mali_drm_suspend(struct drm_device *dev, pm_message_t state)
+{
+ return 0;
+}
+
+static int mali_drm_resume(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int mali_drm_load(struct drm_device *dev, unsigned long chipset)
+{
+ return 0;
+}
+
+static int mali_drm_unload(struct drm_device *dev)
+{
+ return 0;
+}
+
+static struct drm_driver driver =
+{
+ .load = mali_drm_load,
+ .unload = mali_drm_unload,
+ .context_dtor = NULL,
+ .reclaim_buffers = NULL,
+ .reclaim_buffers_idlelocked = NULL,
+ .preclose = mali_drm_preclose,
+ .lastclose = mali_drm_lastclose,
+ .suspend = mali_drm_suspend,
+ .resume = mali_drm_resume,
+ .ioctls = NULL,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ },
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct platform_driver platform_drm_driver;
+
+int mali_drm_init(struct platform_device *dev)
+{
+ printk(KERN_INFO "Mali DRM initialize, driver name: %s, version %d.%d\n", DRIVER_NAME, DRIVER_MAJOR, DRIVER_MINOR);
+ if (dev == dev0) {
+ driver.num_ioctls = 0;
+ return drm_platform_init(&driver, dev0);
+ }
+ return 0;
+
+}
+
+void mali_drm_exit(struct platform_device *dev)
+{
+ if (dev0 == dev)
+ drm_platform_exit(&driver, dev);
+}
+
+static int __devinit mali_platform_drm_probe(struct platform_device *dev)
+{
+ return mali_drm_init(dev);
+}
+
+static int mali_platform_drm_remove(struct platform_device *dev)
+{
+ mali_drm_exit(dev);
+
+ return 0;
+}
+
+static int mali_platform_drm_suspend(struct platform_device *dev, pm_message_t state)
+{
+ return 0;
+}
+
+static int mali_platform_drm_resume(struct platform_device *dev)
+{
+ return 0;
+}
+
+static struct platform_driver platform_drm_driver = {
+ .probe = mali_platform_drm_probe,
+ .remove = __devexit_p(mali_platform_drm_remove),
+ .suspend = mali_platform_drm_suspend,
+ .resume = mali_platform_drm_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init mali_platform_drm_init(void)
+{
+ dev0 = platform_device_register_simple("mali_drm", 0, NULL, 0);
+ return platform_driver_register(&platform_drm_driver);
+}
+
+static void __exit mali_platform_drm_exit(void)
+{
+ platform_driver_unregister(&platform_drm_driver);
+ platform_device_unregister(dev0);
+}
+
+#ifdef MODULE
+module_init(mali_platform_drm_init);
+#else
+late_initcall(mali_platform_drm_init);
+#endif
+module_exit(mali_platform_drm_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_ALIAS(DRIVER_ALIAS);
+MODULE_INFO(vermagic, VERMAGIC_STRING);
diff --git a/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.h b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.h
new file mode 100644
index 00000000000..188f427ee6d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.h
@@ -0,0 +1,25 @@
+/**
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI_DRV_H_
+#define _MALI_DRV_H_
+
+#define DRIVER_AUTHOR "ARM Ltd."
+#define DRIVER_NAME "mali_drm"
+#define DRIVER_DESC "DRM module for Mali-200, Mali-400"
+#define DRIVER_LICENSE "GPL v2"
+#define DRIVER_ALIAS "platform:mali_drm"
+#define DRIVER_DATE "20101111"
+#define DRIVER_VERSION "0.2"
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 1
+
+#endif /* _MALI_DRV_H_ */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 91be41f6080..1367dbd0f82 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -39,6 +39,44 @@ config HWMON_DEBUG_CHIP
comment "Native drivers"
+config SENSORS_AB8500
+ tristate "AB8500 thermal monitoring"
+ depends on AB8500_GPADC
+ default n
+ help
+ If you say yes here you get support for the thermal sensor part
+ of the AB8500 chip. The driver includes thermal management for
+ AB8500 die and two GPADC channels. The GPADC channel are preferably
+ used to access sensors outside the AB8500 chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called abx500-temp.
+
+config SENSORS_AB5500
+ tristate "AB5500 thermal monitoring"
+ depends on AB5500_GPADC
+ default n
+ help
+ If you say yes here you get support for the thermal sensor part
+ of the AB5500 chip. The driver includes thermal management for
+ AB5500 die, pcb and RF XTAL temperature.
+
+ This driver can also be built as a module. If so, the module
+ will be called abx500-temp.
+
+config SENSORS_DBX500
+ tristate "DBX500 thermal monitoring"
+ depends on MFD_DB8500_PRCMU || MFD_DB5500_PRCMU
+ default n
+ help
+ If you say yes here you get support for the thermal sensor part
+ of the DBX500 chip. The driver includes thermal management for
+ DBX500 die.
+
+ This driver can also be built as a module. If so, the module
+ will be called dbx500_temp.
+
+
config SENSORS_ABITUGURU
tristate "Abit uGuru (rev 1 & 2)"
depends on X86 && DMI && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 8251ce8cd03..d13b8e0324f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -19,6 +19,9 @@ obj-$(CONFIG_SENSORS_W83795) += w83795.o
obj-$(CONFIG_SENSORS_W83781D) += w83781d.o
obj-$(CONFIG_SENSORS_W83791D) += w83791d.o
+obj-$(CONFIG_SENSORS_AB8500) += abx500.o ab8500.o
+obj-$(CONFIG_SENSORS_AB5500) += abx500.o ab5500.o
+obj-$(CONFIG_SENSORS_DBX500) += dbx500.o
obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o
obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
obj-$(CONFIG_SENSORS_AD7314) += ad7314.o
diff --git a/drivers/hwmon/ab5500.c b/drivers/hwmon/ab5500.c
new file mode 100644
index 00000000000..0122f315ac6
--- /dev/null
+++ b/drivers/hwmon/ab5500.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Martin Persson <martin.persson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU Gereral Public License (GPL) version 2
+ *
+ * Note:
+ *
+ * If/when the AB5500 thermal warning temperature is reached (threshold
+ * 125C cannot be changed by SW), an interrupt is set and the driver
+ * notifies user space via a sysfs event. If a shut down is not
+ * triggered by user space and temperature reaches beyond critical
+ * limit(130C) pm_power off is called.
+ *
+ * If/when AB5500 thermal shutdown temperature is reached a hardware
+ * shutdown of the AB5500 will occur.
+ */
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include "abx500.h"
+#include <asm/mach-types.h>
+
+/* AB5500 driver monitors GPADC - XTAL_TEMP, PCB_TEMP,
+ * BTEMP_BALL, BAT_CTRL and DIE_TEMP
+ */
+#define NUM_MONITORED_SENSORS 5
+
+#define SHUTDOWN_AUTO_MIN_LIMIT -25
+#define SHUTDOWN_AUTO_MAX_LIMIT 130
+
+static int ab5500_output_convert(int val, u8 sensor)
+{
+ int res = val;
+ /* GPADC returns die temperature in Celsius
+ * convert it to millidegree celsius
+ */
+ if (sensor == DIE_TEMP)
+ res = val * 1000;
+
+ return res;
+}
+
+static int ab5500_read_sensor(struct abx500_temp *data, u8 sensor)
+{
+ int val;
+ /*
+ * Special treatment for BAT_CTRL node, since this
+ * temperature measurement is more complex than just
+ * an ADC readout
+ */
+ if (sensor == BAT_CTRL)
+ val = ab5500_btemp_get_batctrl_temp(data->ab5500_btemp);
+ else
+ val = ab5500_gpadc_convert(data->ab5500_gpadc, sensor);
+
+ if (val < 0)
+ return val;
+ else
+ return ab5500_output_convert(val, sensor);
+}
+
+static ssize_t ab5500_show_name(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ return sprintf(buf, "ab5500\n");
+}
+
+static ssize_t ab5500_show_label(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ char *name;
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int index = attr->index;
+
+ /*
+ * Make sure these labels correspond to the attribute indexes
+ * used when calling SENSOR_DEVICE_ATRR.
+ * Temperature sensors outside ab8500 (read via GPADC) are marked
+ * with prefix ext_
+ */
+ switch (index) {
+ case 1:
+ name = "xtal_temp";
+ break;
+ case 2:
+ name = "pcb_temp";
+ break;
+ case 3:
+ name = "bat_temp";
+ break;
+ case 4:
+ name = "bat_ctrl";
+ break;
+ case 5:
+ name = "ab5500";
+ break;
+ default:
+ return -EINVAL;
+ }
+ return sprintf(buf, "%s\n", name);
+}
+
+static int temp_shutdown_trig(int mux)
+{
+ pm_power_off();
+ return 0;
+}
+
+static int ab5500_temp_shutdown_auto(struct abx500_temp *data)
+{
+ int ret;
+ struct adc_auto_input *auto_ip;
+
+ auto_ip = kzalloc(sizeof(struct adc_auto_input), GFP_KERNEL);
+ if (!auto_ip) {
+ dev_err(&data->pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ auto_ip->mux = DIE_TEMP;
+ auto_ip->freq = MS500;
+ auto_ip->min = SHUTDOWN_AUTO_MIN_LIMIT;
+ auto_ip->max = SHUTDOWN_AUTO_MAX_LIMIT;
+ auto_ip->auto_adc_callback = temp_shutdown_trig;
+ data->gpadc_auto = auto_ip;
+ ret = ab5500_gpadc_convert_auto(data->ab5500_gpadc,
+ data->gpadc_auto);
+ if (ret < 0)
+ kfree(auto_ip);
+
+ return ret;
+}
+
+static int ab5500_is_visible(struct attribute *attr, int n)
+{
+ return attr->mode;
+}
+
+static int ab5500_temp_irq_handler(int irq, struct abx500_temp *data)
+{
+ /*
+ * Make sure the magic numbers below corresponds to the node
+ * used for AB5500 thermal warning from HW.
+ */
+ mutex_lock(&data->lock);
+ data->crit_alarm[4] = 1;
+ mutex_unlock(&data->lock);
+ sysfs_notify(&data->pdev->dev.kobj, NULL, "temp5_crit_alarm");
+ dev_info(&data->pdev->dev, "ABX500 thermal warning,"
+ " power off system now!\n");
+ return 0;
+}
+
+int __init ab5500_hwmon_init(struct abx500_temp *data)
+{
+ int err;
+
+ data->ab5500_gpadc = ab5500_gpadc_get("ab5500-adc.0");
+ if (IS_ERR(data->ab5500_gpadc))
+ return PTR_ERR(data->ab5500_gpadc);
+
+ data->ab5500_btemp = ab5500_btemp_get();
+ if (IS_ERR(data->ab5500_btemp))
+ return PTR_ERR(data->ab5500_btemp);
+
+ err = ab5500_temp_shutdown_auto(data);
+ if (err < 0) {
+ dev_err(&data->pdev->dev, "Failed to register"
+ " auto trigger(%d)\n", err);
+ return err;
+ }
+
+ /*
+ * Setup HW defined data.
+ *
+ * Reference hardware (HREF):
+ *
+ * XTAL_TEMP, PCB_TEMP, BTEMP_BALL refer to millivolts and
+ * BAT_CTRL and DIE_TEMP refer to millidegrees
+ *
+ * Make sure indexes correspond to the attribute indexes
+ * used when calling SENSOR_DEVICE_ATRR
+ */
+ data->gpadc_addr[0] = XTAL_TEMP;
+ data->gpadc_addr[1] = PCB_TEMP;
+ data->gpadc_addr[2] = BTEMP_BALL;
+ data->gpadc_addr[3] = BAT_CTRL;
+ data->gpadc_addr[4] = DIE_TEMP;
+ data->monitored_sensors = NUM_MONITORED_SENSORS;
+
+ data->ops.read_sensor = ab5500_read_sensor;
+ data->ops.irq_handler = ab5500_temp_irq_handler;
+ data->ops.show_name = ab5500_show_name;
+ data->ops.show_label = ab5500_show_label;
+ data->ops.is_visible = ab5500_is_visible;
+
+ return 0;
+}
diff --git a/drivers/hwmon/ab8500.c b/drivers/hwmon/ab8500.c
new file mode 100644
index 00000000000..c6694206b4d
--- /dev/null
+++ b/drivers/hwmon/ab8500.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Martin Persson <martin.persson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU Gereral Public License (GPL) version 2
+ *
+ * Note:
+ *
+ * If/when the AB8500 thermal warning temperature is reached (threshold
+ * cannot be changed by SW), an interrupt is set and the driver
+ * notifies user space via a sysfs event. If a shut down is not
+ * triggered by user space within a certain time frame,
+ * pm_power off is called.
+ *
+ * If/when AB8500 thermal shutdown temperature is reached a hardware
+ * shutdown of the AB8500 will occur.
+ */
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/ab8500/gpadc.h>
+#include <linux/mfd/ab8500/bm.h>
+#include "abx500.h"
+#include <asm/mach-types.h>
+
+#define DEFAULT_POWER_OFF_DELAY 10000
+
+/*
+ * The driver monitors GPADC - ADC_AUX1, ADC_AUX2, BTEMP_BALL
+ * and BAT_CTRL.
+ */
+#define NUM_MONITORED_SENSORS 4
+
+static int ab8500_read_sensor(struct abx500_temp *data, u8 sensor)
+{
+ int val;
+ /*
+ * Special treatment for the BAT_CTRL node, since this
+ * temperature measurement is more complex than just
+ * an ADC readout
+ */
+ if (sensor == BAT_CTRL)
+ val = ab8500_btemp_get_batctrl_temp(data->ab8500_btemp);
+ else
+ val = ab8500_gpadc_convert(data->ab8500_gpadc, sensor);
+
+ return val;
+}
+
+static void ab8500_thermal_power_off(struct work_struct *work)
+{
+ struct abx500_temp *data = container_of(work, struct abx500_temp,
+ power_off_work.work);
+
+ dev_warn(&data->pdev->dev, "Power off due to AB8500 thermal warning\n");
+ pm_power_off();
+}
+
+static ssize_t ab8500_show_name(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ return sprintf(buf, "ab8500\n");
+}
+
+static ssize_t ab8500_show_label(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ char *name;
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int index = attr->index;
+
+ /*
+ * Make sure these labels correspond to the attribute indexes
+ * used when calling SENSOR_DEVICE_ATRR.
+ * Temperature sensors outside ab8500 (read via GPADC) are marked
+ * with prefix ext_
+ */
+ switch (index) {
+ case 1:
+ name = "ext_rtc_xtal";
+ break;
+ case 2:
+ name = "ext_db8500";
+ break;
+ case 3:
+ name = "bat_temp";
+ break;
+ case 4:
+ name = "bat_ctrl";
+ break;
+ case 5:
+ name = "ab8500";
+ break;
+ default:
+ return -EINVAL;
+ }
+ return sprintf(buf, "%s\n", name);
+}
+
+static int ab8500_is_visible(struct attribute *attr, int n)
+{
+ if (!strcmp(attr->name, "temp5_input") ||
+ !strcmp(attr->name, "temp5_min") ||
+ !strcmp(attr->name, "temp5_max") ||
+ !strcmp(attr->name, "temp5_max_hyst") ||
+ !strcmp(attr->name, "temp5_min_alarm") ||
+ !strcmp(attr->name, "temp5_max_alarm") ||
+ !strcmp(attr->name, "temp5_max_hyst_alarm"))
+ return 0;
+
+ return attr->mode;
+}
+
+static int ab8500_temp_irq_handler(int irq, struct abx500_temp *data)
+{
+ unsigned long delay_in_jiffies;
+ /*
+ * Make sure the magic numbers below corresponds to the node
+ * used for AB8500 thermal warning from HW.
+ */
+ mutex_lock(&data->lock);
+ data->crit_alarm[4] = 1;
+ mutex_unlock(&data->lock);
+
+ hwmon_notify(data->crit_alarm[4], NULL);
+ sysfs_notify(&data->pdev->dev.kobj, NULL, "temp5_crit_alarm");
+ dev_info(&data->pdev->dev, "AB8500 thermal warning,"
+ " power off in %lu s\n", data->power_off_delay);
+ delay_in_jiffies = msecs_to_jiffies(data->power_off_delay);
+ schedule_delayed_work(&data->power_off_work, delay_in_jiffies);
+ return 0;
+}
+
+int __init ab8500_hwmon_init(struct abx500_temp *data)
+{
+ data->ab8500_gpadc = ab8500_gpadc_get();
+ if (IS_ERR(data->ab8500_gpadc))
+ return PTR_ERR(data->ab8500_gpadc);
+
+ data->ab8500_btemp = ab8500_btemp_get();
+ if (IS_ERR(data->ab8500_btemp))
+ return PTR_ERR(data->ab8500_btemp);
+
+ INIT_DELAYED_WORK(&data->power_off_work, ab8500_thermal_power_off);
+
+ /*
+ * Setup HW defined data.
+ *
+ * Reference hardware (HREF):
+ *
+ * GPADC - ADC_AUX1, connected to NTC R2148 next to RTC_XTAL on HREF
+ * GPADC - ADC_AUX2, connected to NTC R2150 near DB8500 on HREF
+ * Hence, temp#_min/max/max_hyst refer to millivolts and not
+ * millidegrees
+ * This is not the case for BAT_CTRL where millidegrees is used
+ *
+ * HREF HW does not support reading AB8500 temperature. BUT an
+ * AB8500 IRQ will be launched if die crit temp limit is reached.
+ *
+ * Make sure indexes correspond to the attribute indexes
+ * used when calling SENSOR_DEVICE_ATRR
+ */
+ data->gpadc_addr[0] = ADC_AUX1;
+ data->gpadc_addr[1] = ADC_AUX2;
+ data->gpadc_addr[2] = BTEMP_BALL;
+ data->gpadc_addr[3] = BAT_CTRL;
+ data->gpadc_addr[4] = DIE_TEMP;
+ data->power_off_delay = DEFAULT_POWER_OFF_DELAY;
+ data->monitored_sensors = NUM_MONITORED_SENSORS;
+
+ data->ops.read_sensor = ab8500_read_sensor;
+ data->ops.irq_handler = ab8500_temp_irq_handler;
+ data->ops.show_name = ab8500_show_name;
+ data->ops.show_label = ab8500_show_label;
+ data->ops.is_visible = ab8500_is_visible;
+
+ return 0;
+}
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
new file mode 100644
index 00000000000..de4e6280b4b
--- /dev/null
+++ b/drivers/hwmon/abx500.c
@@ -0,0 +1,698 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Martin Persson <martin.persson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU Gereral Public License (GPL) version 2
+ *
+ * Note:
+ *
+ * ABX500 does not provide auto ADC, so to monitor the required
+ * temperatures, a periodic work is used. It is more important
+ * to not wake up the CPU than to perform this job, hence the use
+ * of a deferred delay.
+ *
+ * A deferred delay for thermal monitor is considered safe because:
+ * If the chip gets too hot during a sleep state it's most likely
+ * due to external factors, such as the surrounding temperature.
+ * I.e. no SW decisions will make any difference.
+ *
+ * If/when the ABX500 thermal warning temperature is reached (threshold
+ * cannot be changed by SW), an interrupt is set and the driver
+ * notifies user space via a sysfs event.
+ *
+ * If/when ABX500 thermal shutdown temperature is reached a hardware
+ * shutdown of the ABX500 will occur.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <asm/mach-types.h>
+
+#include "abx500.h"
+
+#define DEFAULT_MONITOR_DELAY 1000
+
+/*
+ * Thresholds are considered inactive if set to 0.
+ * To avoid confusion for user space applications,
+ * the temp monitor delay is set to 0 if all thresholds
+ * are 0.
+ */
+static bool find_active_thresholds(struct abx500_temp *data)
+{
+ int i;
+ for (i = 0; i < data->monitored_sensors; i++)
+ if (data->max[i] != 0 || data->max_hyst[i] != 0
+ || data->min[i] != 0)
+ return true;
+
+ dev_dbg(&data->pdev->dev, "No active thresholds,"
+ "cancel deferred job (if it exists)"
+ "and reset temp monitor delay\n");
+ cancel_delayed_work_sync(&data->work);
+ return false;
+}
+
+static inline void schedule_monitor(struct abx500_temp *data)
+{
+ unsigned long delay_in_jiffies;
+ delay_in_jiffies = msecs_to_jiffies(data->gpadc_monitor_delay);
+ schedule_delayed_work(&data->work, delay_in_jiffies);
+}
+
+static inline void gpadc_monitor_exit(struct abx500_temp *data)
+{
+ cancel_delayed_work_sync(&data->work);
+}
+
+static void gpadc_monitor(struct work_struct *work)
+{
+ unsigned long delay_in_jiffies;
+ int val, i, ret;
+ /* Container for alarm node name */
+ char alarm_node[30];
+
+ bool updated_min_alarm = false;
+ bool updated_max_alarm = false;
+ bool updated_max_hyst_alarm = false;
+ struct abx500_temp *data = container_of(work, struct abx500_temp,
+ work.work);
+
+ for (i = 0; i < data->monitored_sensors; i++) {
+ /* Thresholds are considered inactive if set to 0 */
+ if (data->max[i] == 0 && data->max_hyst[i] == 0
+ && data->min[i] == 0)
+ continue;
+
+ val = data->ops.read_sensor(data, data->gpadc_addr[i]);
+ if (val < 0) {
+ dev_err(&data->pdev->dev, "GPADC read failed\n");
+ continue;
+ }
+
+ mutex_lock(&data->lock);
+ if (data->min[i] != 0) {
+ if (val < data->min[i]) {
+ if (data->min_alarm[i] == 0) {
+ data->min_alarm[i] = 1;
+ updated_min_alarm = true;
+ }
+ } else {
+ if (data->min_alarm[i] == 1) {
+ data->min_alarm[i] = 0;
+ updated_min_alarm = true;
+ }
+ }
+
+ }
+ if (data->max[i] != 0) {
+ if (val > data->max[i]) {
+ if (data->max_alarm[i] == 0) {
+ data->max_alarm[i] = 1;
+ updated_max_alarm = true;
+ }
+ } else {
+ if (data->max_alarm[i] == 1) {
+ data->max_alarm[i] = 0;
+ updated_max_alarm = true;
+ }
+ }
+
+ }
+ if (data->max_hyst[i] != 0) {
+ if (val > data->max_hyst[i]) {
+ if (data->max_hyst_alarm[i] == 0) {
+ data->max_hyst_alarm[i] = 1;
+ updated_max_hyst_alarm = true;
+ }
+ } else {
+ if (data->max_hyst_alarm[i] == 1) {
+ data->max_hyst_alarm[i] = 0;
+ updated_max_hyst_alarm = true;
+ }
+ }
+ }
+ mutex_unlock(&data->lock);
+
+ /* hwmon attr index starts at 1, thus "i+1" below */
+ if (updated_min_alarm) {
+ ret = snprintf(alarm_node, 16, "temp%d_min_alarm",
+ (i + 1));
+ if (ret < 0) {
+ dev_err(&data->pdev->dev,
+ "Unable to update alarm node (%d)",
+ ret);
+ break;
+ }
+ sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
+ }
+ if (updated_max_alarm) {
+ ret = snprintf(alarm_node, 16, "temp%d_max_alarm",
+ (i + 1));
+ if (ret < 0) {
+ dev_err(&data->pdev->dev,
+ "Unable to update alarm node (%d)",
+ ret);
+ break;
+ }
+ hwmon_notify(data->max_alarm[i], NULL);
+ sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
+ }
+ if (updated_max_hyst_alarm) {
+ ret = snprintf(alarm_node, 21, "temp%d_max_hyst_alarm",
+ (i + 1));
+ if (ret < 0) {
+ dev_err(&data->pdev->dev,
+ "Unable to update alarm node (%d)",
+ ret);
+ break;
+ }
+ sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
+ }
+ }
+ delay_in_jiffies = msecs_to_jiffies(data->gpadc_monitor_delay);
+ schedule_delayed_work(&data->work, delay_in_jiffies);
+}
+
+static ssize_t set_temp_monitor_delay(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int res;
+ unsigned long delay_in_s;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+
+ res = strict_strtoul(buf, 10, &delay_in_s);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ data->gpadc_monitor_delay = delay_in_s * 1000;
+
+ if (find_active_thresholds(data))
+ schedule_monitor(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t set_temp_power_off_delay(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int res;
+ unsigned long delay_in_s;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+
+ res = strict_strtoul(buf, 10, &delay_in_s);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ data->power_off_delay = delay_in_s * 1000;
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t show_temp_monitor_delay(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ /* return time in s, not ms */
+ return sprintf(buf, "%lu\n", (data->gpadc_monitor_delay) / 1000);
+}
+
+static ssize_t show_temp_power_off_delay(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ /* return time in s, not ms */
+ return sprintf(buf, "%lu\n", (data->power_off_delay) / 1000);
+}
+
+/* HWMON sysfs interface */
+static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ /*
+ * To avoid confusion between sensor label and chip name, the function
+ * "show_label" is not used to return the chip name.
+ */
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ return data->ops.show_name(dev, devattr, buf);
+}
+
+static ssize_t show_label(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ return data->ops.show_label(dev, devattr, buf);
+}
+
+static ssize_t show_input(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int val;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ u8 gpadc_addr = data->gpadc_addr[attr->index - 1];
+
+ val = data->ops.read_sensor(data, gpadc_addr);
+ if (val < 0)
+ dev_err(&data->pdev->dev, "GPADC read failed\n");
+
+ return sprintf(buf, "%d\n", val);
+}
+
+/* set functions (RW nodes) */
+static ssize_t set_min(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ /*
+ * Threshold is considered inactive if set to 0
+ * hwmon attr index starts at 1, thus "attr->index-1" below
+ */
+ if (val == 0)
+ data->min_alarm[attr->index - 1] = 0;
+
+ data->min[attr->index - 1] = val;
+
+ if (val == 0)
+ (void) find_active_thresholds(data);
+ else
+ schedule_monitor(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ /*
+ * Threshold is considered inactive if set to 0
+ * hwmon attr index starts at 1, thus "attr->index-1" below
+ */
+ if (val == 0)
+ data->max_alarm[attr->index - 1] = 0;
+
+ data->max[attr->index - 1] = val;
+
+ if (val == 0)
+ (void) find_active_thresholds(data);
+ else
+ schedule_monitor(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t set_max_hyst(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ /*
+ * Threshold is considered inactive if set to 0
+ * hwmon attr index starts at 1, thus "attr->index-1" below
+ */
+ if (val == 0)
+ data->max_hyst_alarm[attr->index - 1] = 0;
+
+ data->max_hyst[attr->index - 1] = val;
+
+ if (val == 0)
+ (void) find_active_thresholds(data);
+ else
+ schedule_monitor(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+/*
+ * show functions (RO nodes)
+ */
+static ssize_t show_min(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->min[attr->index - 1]);
+}
+
+static ssize_t show_max(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->max[attr->index - 1]);
+}
+
+static ssize_t show_max_hyst(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->max_hyst[attr->index - 1]);
+}
+
+/* Alarms */
+static ssize_t show_min_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->min_alarm[attr->index - 1]);
+}
+
+static ssize_t show_max_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->max_alarm[attr->index - 1]);
+}
+
+static ssize_t show_max_hyst_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->max_hyst_alarm[attr->index - 1]);
+}
+
+static ssize_t show_crit_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->crit_alarm[attr->index - 1]);
+}
+
+static mode_t abx500_attrs_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ return data->ops.is_visible(a, n);
+}
+
+static SENSOR_DEVICE_ATTR(temp_monitor_delay, S_IRUGO | S_IWUSR,
+ show_temp_monitor_delay, set_temp_monitor_delay, 0);
+static SENSOR_DEVICE_ATTR(temp_power_off_delay, S_IRUGO | S_IWUSR,
+ show_temp_power_off_delay,
+ set_temp_power_off_delay, 0);
+
+/* Chip name, required by hwmon*/
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+
+/* GPADC - SENSOR1 */
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_min, set_min, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_max, set_max, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 1);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_min_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_max_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 1);
+
+/* GPADC - SENSOR2 */
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min, set_min, 2);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max, set_max, 2);
+static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 2);
+static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_min_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_max_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 2);
+
+/* GPADC - SENSOR3 */
+static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, show_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_input, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min, set_min, 3);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max, set_max, 3);
+static SENSOR_DEVICE_ATTR(temp3_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 3);
+static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_min_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_max_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 3);
+
+/* GPADC - SENSOR4 */
+static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, show_label, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_input, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_min, S_IWUSR | S_IRUGO, show_min, set_min, 4);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IWUSR | S_IRUGO, show_max, set_max, 4);
+static SENSOR_DEVICE_ATTR(temp4_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 4);
+static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_min_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_max_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 4);
+
+/* GPADC - SENSOR5 */
+static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO, show_label, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_input, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp5_min, S_IWUSR | S_IRUGO, show_min, set_min, 4);
+static SENSOR_DEVICE_ATTR(temp5_max, S_IWUSR | S_IRUGO, show_max, set_max, 4);
+static SENSOR_DEVICE_ATTR(temp5_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 4);
+static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_min_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_max_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp5_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO,
+ show_crit_alarm, NULL, 5);
+
+struct attribute *abx500_temp_attributes[] = {
+ &sensor_dev_attr_name.dev_attr.attr,
+ &sensor_dev_attr_temp_monitor_delay.dev_attr.attr,
+ &sensor_dev_attr_temp_power_off_delay.dev_attr.attr,
+ /* GPADC SENSOR1 */
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst_alarm.dev_attr.attr,
+ /* GPADC SENSOR2 */
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_hyst_alarm.dev_attr.attr,
+ /* GPADC SENSOR3 */
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_hyst_alarm.dev_attr.attr,
+ /* GPADC SENSOR4 */
+ &sensor_dev_attr_temp4_label.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_min.dev_attr.attr,
+ &sensor_dev_attr_temp4_max.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_hyst_alarm.dev_attr.attr,
+ /* GPADC SENSOR5*/
+ &sensor_dev_attr_temp5_label.dev_attr.attr,
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+ &sensor_dev_attr_temp5_min.dev_attr.attr,
+ &sensor_dev_attr_temp5_max.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp5_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_hyst_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group abx500_temp_group = {
+ .attrs = abx500_temp_attributes,
+ .is_visible = abx500_attrs_visible,
+};
+
+static irqreturn_t abx500_temp_irq_handler(int irq, void *irq_data)
+{
+ struct platform_device *pdev = irq_data;
+ struct abx500_temp *data = platform_get_drvdata(pdev);
+ data->ops.irq_handler(irq, data);
+ return IRQ_HANDLED;
+}
+
+static int setup_irqs(struct platform_device *pdev)
+{
+ int ret;
+ int irq = platform_get_irq_byname(pdev, "ABX500_TEMP_WARM");
+
+ if (irq < 0)
+ dev_err(&pdev->dev, "Get irq by name failed\n");
+
+ ret = request_threaded_irq(irq, NULL, abx500_temp_irq_handler,
+ IRQF_NO_SUSPEND, "abx500-temp", pdev);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int __devinit abx500_temp_probe(struct platform_device *pdev)
+{
+ struct abx500_temp *data;
+ int err;
+
+ data = kzalloc(sizeof(struct abx500_temp), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+ mutex_init(&data->lock);
+
+ /* Chip specific initialization */
+ if (!machine_is_u5500())
+ err = ab8500_hwmon_init(data);
+ else
+ err = ab5500_hwmon_init(data);
+ if (err < 0) {
+ dev_err(&pdev->dev, "abx500 init failed");
+ goto exit;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
+ goto exit;
+ }
+
+ INIT_DELAYED_WORK_DEFERRABLE(&data->work, gpadc_monitor);
+ data->gpadc_monitor_delay = DEFAULT_MONITOR_DELAY;
+
+ platform_set_drvdata(pdev, data);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &abx500_temp_group);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Create sysfs group failed (%d)\n", err);
+ goto exit_platform_data;
+ }
+
+ err = setup_irqs(pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "irq setup failed (%d)\n", err);
+ goto exit_sysfs_group;
+ }
+ return 0;
+
+exit_sysfs_group:
+ sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group);
+exit_platform_data:
+ hwmon_device_unregister(data->hwmon_dev);
+ platform_set_drvdata(pdev, NULL);
+exit:
+ kfree(data->gpadc_auto);
+ kfree(data);
+ return err;
+}
+
+static int __devexit abx500_temp_remove(struct platform_device *pdev)
+{
+ struct abx500_temp *data = platform_get_drvdata(pdev);
+
+ gpadc_monitor_exit(data);
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group);
+ platform_set_drvdata(pdev, NULL);
+ kfree(data->gpadc_auto);
+ kfree(data);
+ return 0;
+}
+
+/* No action required in suspend/resume, thus the lack of functions */
+static struct platform_driver abx500_temp_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "abx500-temp",
+ },
+ .probe = abx500_temp_probe,
+ .remove = __devexit_p(abx500_temp_remove),
+};
+
+static int __init abx500_temp_init(void)
+{
+ return platform_driver_register(&abx500_temp_driver);
+}
+
+static void __exit abx500_temp_exit(void)
+{
+ platform_driver_unregister(&abx500_temp_driver);
+}
+
+MODULE_AUTHOR("Martin Persson <martin.persson@stericsson.com>");
+MODULE_DESCRIPTION("ABX500 temperature driver");
+MODULE_LICENSE("GPL");
+
+module_init(abx500_temp_init)
+module_exit(abx500_temp_exit)
diff --git a/drivers/hwmon/abx500.h b/drivers/hwmon/abx500.h
new file mode 100644
index 00000000000..9fe28dac28f
--- /dev/null
+++ b/drivers/hwmon/abx500.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * License terms: GNU General Public License v2
+ * Author: Martin Persson <martin.persson@stericsson.com>
+ */
+
+#ifndef _ABX500_H
+#define _ABX500_H
+
+#define NUM_SENSORS 5
+
+struct ab8500_gpadc;
+struct ab5500_gpadc;
+struct ab8500_btemp;
+struct ab5500_btemp;
+struct adc_auto_input;
+struct abx500_temp;
+
+/**
+ * struct abx500_temp_ops - abx500 chip specific ops
+ * @read_sensor: reads gpadc output
+ * @irq_handler: irq handler
+ * @show_name: hwmon device name
+ * @show_label: hwmon attribute label
+ * @is_visible: is attribute visible
+ */
+struct abx500_temp_ops {
+ int (*read_sensor)(struct abx500_temp *, u8);
+ int (*irq_handler)(int, struct abx500_temp *);
+ ssize_t (*show_name)(struct device *,
+ struct device_attribute *, char *);
+ ssize_t (*show_label) (struct device *,
+ struct device_attribute *, char *);
+ int (*is_visible)(struct attribute *, int);
+};
+
+/**
+ * struct abx500_temp - representation of temp mon device
+ * @pdev: platform device
+ * @hwmon_dev: hwmon device
+ * @ab8500_gpadc: gpadc interface for ab8500
+ * @ab5500_gpadc: gpadc interface for ab5500
+ * @btemp: battery temperature interface for ab8500
+ * @adc_auto_input: gpadc auto trigger
+ * @gpadc_addr: gpadc channel address
+ * @temp: sensor temperature input value
+ * @min: sensor temperature min value
+ * @max: sensor temperature max value
+ * @max_hyst: sensor temperature hysteresis value for max limit
+ * @crit: sensor temperature critical value
+ * @min_alarm: sensor temperature min alarm
+ * @max_alarm: sensor temperature max alarm
+ * @max_hyst_alarm: sensor temperature hysteresis alarm
+ * @crit_alarm: sensor temperature critical value alarm
+ * @work: delayed work scheduled to monitor temperature periodically
+ * @power_off_work: delayed work scheduled to power off the system
+ when critical temperature is reached
+ * @lock: mutex
+ * @gpadc_monitor_delay: delay between temperature readings in ms
+ * @power_off_delay: delay before power off in ms
+ * @monitored_sensors: number of monitored sensors
+ */
+struct abx500_temp {
+ struct platform_device *pdev;
+ struct device *hwmon_dev;
+ struct ab8500_gpadc *ab8500_gpadc;
+ struct ab5500_gpadc *ab5500_gpadc;
+ struct ab8500_btemp *ab8500_btemp;
+ struct ab5500_btemp *ab5500_btemp;
+ struct adc_auto_input *gpadc_auto;
+ struct abx500_temp_ops ops;
+ u8 gpadc_addr[NUM_SENSORS];
+ unsigned long temp[NUM_SENSORS];
+ unsigned long min[NUM_SENSORS];
+ unsigned long max[NUM_SENSORS];
+ unsigned long max_hyst[NUM_SENSORS];
+ unsigned long crit[NUM_SENSORS];
+ unsigned long min_alarm[NUM_SENSORS];
+ unsigned long max_alarm[NUM_SENSORS];
+ unsigned long max_hyst_alarm[NUM_SENSORS];
+ unsigned long crit_alarm[NUM_SENSORS];
+ struct delayed_work work;
+ struct delayed_work power_off_work;
+ struct mutex lock;
+ /* Delay (ms) between temperature readings */
+ unsigned long gpadc_monitor_delay;
+ /* Delay (ms) before power off */
+ unsigned long power_off_delay;
+ int monitored_sensors;
+};
+
+int ab8500_hwmon_init(struct abx500_temp *data) __init;
+int ab5500_hwmon_init(struct abx500_temp *data) __init;
+
+#endif /* _ABX500_H */
diff --git a/drivers/hwmon/dbx500.c b/drivers/hwmon/dbx500.c
new file mode 100644
index 00000000000..c034b48f8dd
--- /dev/null
+++ b/drivers/hwmon/dbx500.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ *
+ * Author: WenHai Fang <wenhai.h.fang@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU Gereral Public License (GPL) version 2
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <mach/hardware.h>
+
+/*
+ * Default measure period to 0xFF x cycle32k
+ */
+#define DEFAULT_MEASURE_TIME 0xFF
+
+/*
+ * Default critical sensor temperature
+ */
+#define DEFAULT_CRITICAL_TEMP 85
+
+/* This driver monitors DB thermal*/
+#define NUM_SENSORS 1
+
+struct dbx500_temp {
+ struct platform_device *pdev;
+ struct device *hwmon_dev;
+ unsigned char min[NUM_SENSORS];
+ unsigned char max[NUM_SENSORS];
+ unsigned char crit[NUM_SENSORS];
+ unsigned char min_alarm[NUM_SENSORS];
+ unsigned char max_alarm[NUM_SENSORS];
+ unsigned short measure_time;
+ bool monitoring_active;
+ struct mutex lock;
+};
+
+static inline void start_temp_monitoring(struct dbx500_temp *data,
+ const int index)
+{
+ unsigned int i;
+
+ /* determine if there are any sensors worth monitoring */
+ for (i = 0; i < NUM_SENSORS; i++)
+ if (data->min[i] || data->max[i])
+ goto start_monitoring;
+
+ return;
+
+start_monitoring:
+ /* kick off the monitor job */
+ data->min_alarm[index] = 0;
+ data->max_alarm[index] = 0;
+
+ (void) prcmu_start_temp_sense(data->measure_time);
+ data->monitoring_active = true;
+}
+
+static inline void stop_temp_monitoring(struct dbx500_temp *data)
+{
+ if (data->monitoring_active) {
+ (void) prcmu_stop_temp_sense();
+ data->monitoring_active = false;
+ }
+}
+
+/* HWMON sysfs interface */
+static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ return sprintf(buf, "dbx500\n");
+}
+
+static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ return show_name(dev, devattr, buf);
+}
+
+/* set functions (RW nodes) */
+static ssize_t set_min(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ val &= 0xFF;
+ if (val > data->max[attr->index - 1])
+ val = data->max[attr->index - 1];
+
+ data->min[attr->index - 1] = val;
+
+ stop_temp_monitoring(data);
+
+ (void) prcmu_config_hotmon(data->min[attr->index - 1],
+ data->max[attr->index - 1]);
+
+ start_temp_monitoring(data, (attr->index - 1));
+
+ mutex_unlock(&data->lock);
+ return count;
+}
+
+static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ val &= 0xFF;
+ if (val < data->min[attr->index - 1])
+ val = data->min[attr->index - 1];
+
+ data->max[attr->index - 1] = val;
+
+ stop_temp_monitoring(data);
+
+ (void) prcmu_config_hotmon(data->min[attr->index - 1],
+ data->max[attr->index - 1]);
+
+ start_temp_monitoring(data, (attr->index - 1));
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t set_crit(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ val &= 0xFF;
+ data->crit[attr->index - 1] = val;
+ (void) prcmu_config_hotdog(data->crit[attr->index - 1]);
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+/*
+ * show functions (RO nodes)
+ * Notice that min/max/crit refer to degrees
+ */
+static ssize_t show_min(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->min[attr->index - 1]);
+}
+
+static ssize_t show_max(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->max[attr->index - 1]);
+}
+
+static ssize_t show_crit(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->crit[attr->index - 1]);
+}
+
+/* Alarms */
+static ssize_t show_min_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->min_alarm[attr->index - 1]);
+}
+
+static ssize_t show_max_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->max_alarm[attr->index - 1]);
+}
+
+/* Chip name, required by hwmon*/
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_min, set_min, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_max, set_max, 1);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO,
+ show_crit, set_crit, 1);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_min_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_max_alarm, NULL, 1);
+
+static struct attribute *dbx500_temp_attributes[] = {
+ &sensor_dev_attr_name.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dbx500_temp_group = {
+ .attrs = dbx500_temp_attributes,
+};
+
+static irqreturn_t prcmu_hotmon_low_irq_handler(int irq, void *irq_data)
+{
+ struct platform_device *pdev = irq_data;
+ struct dbx500_temp *data = platform_get_drvdata(pdev);
+
+ mutex_lock(&data->lock);
+ data->min_alarm[0] = 1;
+ mutex_unlock(&data->lock);
+
+ sysfs_notify(&pdev->dev.kobj, NULL, "temp1_min_alarm");
+ dev_dbg(&pdev->dev, "DBX500 thermal low warning\n");
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t prcmu_hotmon_high_irq_handler(int irq, void *irq_data)
+{
+ struct platform_device *pdev = irq_data;
+ struct dbx500_temp *data = platform_get_drvdata(pdev);
+
+ mutex_lock(&data->lock);
+ data->max_alarm[0] = 1;
+ mutex_unlock(&data->lock);
+
+ hwmon_notify(data->max_alarm[0], NULL);
+ sysfs_notify(&pdev->dev.kobj, NULL, "temp1_max_alarm");
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit dbx500_temp_probe(struct platform_device *pdev)
+{
+ struct dbx500_temp *data;
+ int err = 0, i;
+ int irq;
+
+ dev_dbg(&pdev->dev, "dbx500_temp: Function dbx500_temp_probe.\n");
+
+ data = kzalloc(sizeof(struct dbx500_temp), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Get IRQ_HOTMON_LOW failed\n");
+ goto exit;
+ }
+
+ err = request_threaded_irq(irq, NULL,
+ prcmu_hotmon_low_irq_handler,
+ IRQF_NO_SUSPEND,
+ "dbx500_temp_low", pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "dbx500: Failed allocate HOTMON_LOW.\n");
+ goto exit;
+ } else {
+ dev_dbg(&pdev->dev, "dbx500: Succeed allocate HOTMON_LOW.\n");
+ }
+
+ irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Get IRQ_HOTMON_HIGH failed\n");
+ goto exit;
+ }
+
+ err = request_threaded_irq(irq, NULL,
+ prcmu_hotmon_high_irq_handler,
+ IRQF_NO_SUSPEND,
+ "dbx500_temp_high", pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "dbx500: Failed allocate HOTMON_HIGH.\n");
+ goto exit;
+ } else {
+ dev_dbg(&pdev->dev, "dbx500: Succeed allocate HOTMON_HIGH.\n");
+ }
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
+ goto exit;
+ }
+
+ for (i = 0; i < NUM_SENSORS; i++) {
+ data->min[i] = 0;
+ data->max[i] = 0;
+ data->crit[i] = DEFAULT_CRITICAL_TEMP;
+ data->min_alarm[i] = 0;
+ data->max_alarm[i] = 0;
+ }
+
+ mutex_init(&data->lock);
+
+ data->pdev = pdev;
+ data->measure_time = DEFAULT_MEASURE_TIME;
+ data->monitoring_active = false;
+
+ /* set PRCMU to disable platform when we get to the critical temp */
+ (void) prcmu_config_hotdog(DEFAULT_CRITICAL_TEMP);
+
+ platform_set_drvdata(pdev, data);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &dbx500_temp_group);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Create sysfs group failed (%d)\n", err);
+ goto exit_platform_data;
+ }
+
+ return 0;
+
+exit_platform_data:
+ platform_set_drvdata(pdev, NULL);
+exit:
+ kfree(data);
+ return err;
+}
+
+static int __devexit dbx500_temp_remove(struct platform_device *pdev)
+{
+ struct dbx500_temp *data = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &dbx500_temp_group);
+ platform_set_drvdata(pdev, NULL);
+ kfree(data);
+ return 0;
+}
+
+/* No action required in suspend/resume, thus the lack of functions */
+static struct platform_driver dbx500_temp_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "dbx500_temp",
+ },
+ .probe = dbx500_temp_probe,
+ .remove = __devexit_p(dbx500_temp_remove),
+};
+
+static int __init dbx500_temp_init(void)
+{
+ return platform_driver_register(&dbx500_temp_driver);
+}
+
+static void __exit dbx500_temp_exit(void)
+{
+ platform_driver_unregister(&dbx500_temp_driver);
+}
+
+MODULE_AUTHOR("WenHai Fang <wenhai.h.fang@stericsson.com>");
+MODULE_DESCRIPTION("DBX500 temperature driver");
+MODULE_LICENSE("GPL");
+
+module_init(dbx500_temp_init)
+module_exit(dbx500_temp_exit)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 6460487e41b..ac718a57b88 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -21,6 +21,7 @@
#include <linux/gfp.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/notifier.h>
#define HWMON_ID_PREFIX "hwmon"
#define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
@@ -29,6 +30,8 @@ static struct class *hwmon_class;
static DEFINE_IDA(hwmon_ida);
+static BLOCKING_NOTIFIER_HEAD(hwmon_notifier_list);
+
/**
* hwmon_device_register - register w/ hwmon
* @dev: the device to register
@@ -73,6 +76,24 @@ void hwmon_device_unregister(struct device *dev)
"hwmon_device_unregister() failed: bad class ID!\n");
}
+int hwmon_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&hwmon_notifier_list, nb);
+}
+EXPORT_SYMBOL(hwmon_notifier_register);
+
+int hwmon_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&hwmon_notifier_list, nb);
+}
+EXPORT_SYMBOL(hwmon_notifier_unregister);
+
+void hwmon_notify(unsigned long val, void *v)
+{
+ blocking_notifier_call_chain(&hwmon_notifier_list, val, v);
+}
+EXPORT_SYMBOL(hwmon_notify);
+
static void __init hwmon_pci_quirks(void)
{
#if defined CONFIG_X86 && defined CONFIG_PCI
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 5267ab93d55..e6f2abba469 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -431,7 +431,7 @@ static int read_i2c(struct nmk_i2c_dev *dev)
if (timeout == 0) {
/* Controller timed out */
- dev_err(&dev->pdev->dev, "read from slave 0x%x timed out\n",
+ dev_err(&dev->pdev->dev, "Read from Slave 0x%x timed out\n",
dev->cli.slave_adr);
status = -ETIMEDOUT;
}
@@ -518,7 +518,7 @@ static int write_i2c(struct nmk_i2c_dev *dev)
if (timeout == 0) {
/* Controller timed out */
- dev_err(&dev->pdev->dev, "write to slave 0x%x timed out\n",
+ dev_err(&dev->pdev->dev, "Write to slave 0x%x timed out\n",
dev->cli.slave_adr);
status = -ETIMEDOUT;
}
@@ -634,6 +634,8 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
clk_enable(dev->clk);
+ dev->busy = true;
+
status = init_hw(dev);
if (status)
goto out;
@@ -1047,6 +1049,7 @@ static struct platform_driver nmk_i2c_driver = {
},
.probe = nmk_i2c_probe,
.remove = __devexit_p(nmk_i2c_remove),
+ .suspend = nmk_i2c_suspend,
};
static int __init nmk_i2c_init(void)
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 615c21f2a55..27daff7cebe 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -151,6 +151,16 @@ config KEYBOARD_BFIN
To compile this driver as a module, choose M here: the
module will be called bf54x-keys.
+config KEYBOARD_DB5500
+ tristate "DB5500 keyboard"
+ depends on UX500_SOC_DB5500
+ help
+ Say Y here to enable the on-chip keypad controller on the
+ ST-Ericsson U5500 platform.
+
+ To compile this driver as a module, choose M here: the
+ module will be called db5500_keypad.
+
config KEYBOARD_LKKBD
tristate "DECstation/VAXstation LK201/LK401 keyboard"
select SERIO
@@ -365,7 +375,7 @@ config KEYBOARD_NEWTON
To compile this driver as a module, choose M here: the
module will be called newtonkbd.
-config KEYBOARD_NOMADIK
+config KEYBOARD_NOMADIK_SKE
tristate "ST-Ericsson Nomadik SKE keyboard"
depends on PLAT_NOMADIK
help
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index ddde0fd476f..e675356acb2 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
+obj-$(CONFIG_KEYBOARD_DB5500) += db5500_keypad.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
@@ -30,7 +31,7 @@ obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o
obj-$(CONFIG_KEYBOARD_MCS) += mcs_touchkey.o
obj-$(CONFIG_KEYBOARD_MPR121) += mpr121_touchkey.o
obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
-obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o
+obj-$(CONFIG_KEYBOARD_NOMADIK_SKE) += nomadik-ske-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o
obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
diff --git a/drivers/input/keyboard/db5500_keypad.c b/drivers/input/keyboard/db5500_keypad.c
new file mode 100644
index 00000000000..45d2f652d60
--- /dev/null
+++ b/drivers/input/keyboard/db5500_keypad.c
@@ -0,0 +1,790 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License, version 2
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <mach/db5500-keypad.h>
+#include <linux/regulator/consumer.h>
+
+#define KEYPAD_CTR 0x0
+#define KEYPAD_IRQ_CLEAR 0x4
+#define KEYPAD_INT_ENABLE 0x8
+#define KEYPAD_INT_STATUS 0xC
+#define KEYPAD_ARRAY_01 0x18
+
+#define KEYPAD_NUM_ARRAY_REGS 5
+
+#define KEYPAD_CTR_WRITE_IRQ_ENABLE (1 << 10)
+#define KEYPAD_CTR_WRITE_CONTROL (1 << 8)
+#define KEYPAD_CTR_SCAN_ENABLE (1 << 7)
+
+#define KEYPAD_ARRAY_CHANGEBIT (1 << 15)
+
+#define KEYPAD_DEBOUNCE_PERIOD_MIN 5 /* ms */
+#define KEYPAD_DEBOUNCE_PERIOD_MAX 80 /* ms */
+
+#define KEYPAD_GND_ROW 8
+
+#define KEYPAD_ROW_SHIFT 3
+#define KEYPAD_KEYMAP_SIZE \
+ (KEYPAD_MAX_ROWS * KEYPAD_MAX_COLS)
+
+#define KEY_PRESSED_DELAY 10
+/**
+ * struct db5500_keypad - data structure used by keypad driver
+ * @irq: irq number
+ * @base: keypad registers base address
+ * @input: pointer to input device object
+ * @board: keypad platform data
+ * @keymap: matrix scan code table for keycodes
+ * @clk: clock structure pointer
+ * @regulator : regulator used by keypad
+ * @switch_work : delayed work variable for switching to gpio
+ * @gpio_work : delayed work variable for reporting key event in gpio mode
+ * @previous_set: previous set of registers
+ * @enable : flag to enable the driver event
+ * @valid_key : hold the state of valid key press
+ * @db5500_rows : rows gpio array for db5500 keypad
+ * @db5500_cols : cols gpio array for db5500 keypad
+ * @gpio_input_irq : array for gpio irqs
+ * @gpio_row : gpio row
+ * @gpio_col : gpio_col
+ */
+struct db5500_keypad {
+ int irq;
+ void __iomem *base;
+ struct input_dev *input;
+ const struct db5500_keypad_platform_data *board;
+ unsigned short keymap[KEYPAD_KEYMAP_SIZE];
+ struct clk *clk;
+ struct regulator *regulator;
+ struct delayed_work switch_work;
+ struct delayed_work gpio_work;
+ u8 previous_set[KEYPAD_MAX_ROWS];
+ bool enable;
+ bool valid_key;
+ int db5500_rows[KEYPAD_MAX_ROWS];
+ int db5500_cols[KEYPAD_MAX_COLS];
+ int gpio_input_irq[KEYPAD_MAX_ROWS];
+ int gpio_row;
+ int gpio_col;
+};
+
+/**
+ * db5500_keypad_report() - reports the keypad event
+ * @keypad: pointer to device structure
+ * @row: row value of keypad
+ * @curr: current event
+ * @previous: previous event
+ *
+ * This function uses to reports the event of the keypad
+ * and returns NONE.
+ *
+ * By default all column reads are 1111 1111b. Any press will pull the column
+ * down, leading to a 0 in any of these locations. We invert these values so
+ * that a 1 means means "column pressed". *
+ * If curr changes from the previous from 0 to 1, we report it as a key press.
+ * If curr changes from the previous from 1 to 0, we report it as a key
+ * release.
+ */
+static void db5500_keypad_report(struct db5500_keypad *keypad, int row,
+ u8 curr, u8 previous)
+{
+ struct input_dev *input = keypad->input;
+ u8 changed = curr ^ previous;
+
+ while (changed) {
+ int col = __ffs(changed);
+ bool press = curr & BIT(col);
+ int code = MATRIX_SCAN_CODE(row, col, KEYPAD_ROW_SHIFT);
+
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code], press);
+ input_sync(input);
+
+ changed &= ~BIT(col);
+ }
+}
+
+static void db5500_keypad_scan(struct db5500_keypad *keypad)
+{
+ u8 current_set[ARRAY_SIZE(keypad->previous_set)];
+ int tries = 100;
+ bool changebit;
+ u32 data_reg;
+ u8 allrows;
+ u8 common;
+ int i;
+
+ writel(0x1, keypad->base + KEYPAD_IRQ_CLEAR);
+
+again:
+ if (!tries--) {
+ dev_warn(&keypad->input->dev, "values failed to stabilize\n");
+ return;
+ }
+
+ changebit = readl(keypad->base + KEYPAD_ARRAY_01)
+ & KEYPAD_ARRAY_CHANGEBIT;
+
+ for (i = 0; i < KEYPAD_NUM_ARRAY_REGS; i++) {
+ data_reg = readl(keypad->base + KEYPAD_ARRAY_01 + 4 * i);
+
+ /* If the change bit changed, we need to reread the data */
+ if (changebit != !!(data_reg & KEYPAD_ARRAY_CHANGEBIT))
+ goto again;
+
+ current_set[2 * i] = ~(data_reg & 0xff);
+
+ /* Last array reg has only one valid set of columns */
+ if (i != KEYPAD_NUM_ARRAY_REGS - 1)
+ current_set[2 * i + 1] = ~((data_reg & 0xff0000) >> 16);
+ }
+
+ allrows = current_set[KEYPAD_GND_ROW];
+
+ /*
+ * Sometimes during a GND row release, an incorrect report is received
+ * where the ARRAY8 all rows setting does not match the other ARRAY*
+ * rows. Ignore this report; the correct one has been observed to
+ * follow it.
+ */
+ common = 0xff;
+ for (i = 0; i < KEYPAD_GND_ROW; i++)
+ common &= current_set[i];
+
+ if ((allrows & common) != common)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(current_set); i++) {
+ /*
+ * If there is an allrows press (GND row), we need to ignore
+ * the allrows values from the reset of the ARRAYs.
+ */
+ if (i < KEYPAD_GND_ROW && allrows)
+ current_set[i] &= ~allrows;
+
+ if (keypad->previous_set[i] == current_set[i])
+ continue;
+
+ db5500_keypad_report(keypad, i, current_set[i],
+ keypad->previous_set[i]);
+ }
+
+ /* update the reference set of array registers */
+ memcpy(keypad->previous_set, current_set, sizeof(keypad->previous_set));
+
+ return;
+}
+
+/**
+ * db5500_keypad_writel() - write into keypad registers
+ * @keypad: pointer to device structure
+ * @val: value to write into register
+ * @reg: register offset
+ *
+ * This function uses to write into the keypad registers
+ * and returns NONE.
+ */
+static void db5500_keypad_writel(struct db5500_keypad *keypad, u32 val, u32 reg)
+{
+ int timeout = 4;
+ int allowedbit;
+
+ switch (reg) {
+ case KEYPAD_CTR:
+ allowedbit = KEYPAD_CTR_WRITE_CONTROL;
+ break;
+ case KEYPAD_INT_ENABLE:
+ allowedbit = KEYPAD_CTR_WRITE_IRQ_ENABLE;
+ break;
+ default:
+ BUG();
+ }
+
+ do {
+ u32 ctr = readl(keypad->base + KEYPAD_CTR);
+
+ if (ctr & allowedbit)
+ break;
+
+ udelay(50);
+ } while (--timeout);
+
+ /* Five 32k clk cycles (~150us) required, we waited 200us */
+ WARN_ON(!timeout);
+
+ writel(val, keypad->base + reg);
+}
+
+/**
+ * db5500_keypad_chip_init() - initialize the keypad chip
+ * @keypad: pointer to device structure
+ *
+ * This function uses to initializes the keypad controller
+ * and returns integer.
+ */
+static int db5500_keypad_chip_init(struct db5500_keypad *keypad)
+{
+ int debounce = keypad->board->debounce_ms;
+ int debounce_hits = 0;
+
+ if (debounce < KEYPAD_DEBOUNCE_PERIOD_MIN)
+ debounce = KEYPAD_DEBOUNCE_PERIOD_MIN;
+
+ if (debounce > KEYPAD_DEBOUNCE_PERIOD_MAX) {
+ debounce_hits = DIV_ROUND_UP(debounce,
+ KEYPAD_DEBOUNCE_PERIOD_MAX) - 1;
+ debounce = KEYPAD_DEBOUNCE_PERIOD_MAX;
+ }
+
+ /* Convert the milliseconds to the bit mask */
+ debounce = DIV_ROUND_UP(debounce, KEYPAD_DEBOUNCE_PERIOD_MIN) - 1;
+
+ clk_enable(keypad->clk);
+
+ db5500_keypad_writel(keypad,
+ KEYPAD_CTR_SCAN_ENABLE
+ | ((debounce_hits & 0x7) << 4)
+ | debounce,
+ KEYPAD_CTR);
+
+ db5500_keypad_writel(keypad, 0x1, KEYPAD_INT_ENABLE);
+
+ return 0;
+}
+
+static void db5500_mode_enable(struct db5500_keypad *keypad, bool enable)
+{
+ int i;
+
+ if (!enable) {
+ db5500_keypad_writel(keypad, 0, KEYPAD_CTR);
+ db5500_keypad_writel(keypad, 0, KEYPAD_INT_ENABLE);
+ if (keypad->board->exit)
+ keypad->board->exit();
+ for (i = 0; i < keypad->board->krow; i++) {
+ enable_irq(keypad->gpio_input_irq[i]);
+ enable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ clk_disable(keypad->clk);
+ regulator_disable(keypad->regulator);
+ } else {
+ regulator_enable(keypad->regulator);
+ clk_enable(keypad->clk);
+ for (i = 0; i < keypad->board->krow; i++) {
+ disable_irq_nosync(keypad->gpio_input_irq[i]);
+ disable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ if (keypad->board->init)
+ keypad->board->init();
+ db5500_keypad_chip_init(keypad);
+ }
+}
+
+static void db5500_gpio_switch_work(struct work_struct *work)
+{
+ struct db5500_keypad *keypad = container_of(work,
+ struct db5500_keypad, switch_work.work);
+
+ db5500_mode_enable(keypad, false);
+ keypad->enable = false;
+}
+
+static void db5500_gpio_release_work(struct work_struct *work)
+{
+ int code;
+ struct db5500_keypad *keypad = container_of(work,
+ struct db5500_keypad, gpio_work.work);
+ struct input_dev *input = keypad->input;
+
+ code = MATRIX_SCAN_CODE(keypad->gpio_col, keypad->gpio_row,
+ KEYPAD_ROW_SHIFT);
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code], 1);
+ input_sync(input);
+ input_report_key(input, keypad->keymap[code], 0);
+ input_sync(input);
+}
+
+static int db5500_read_get_gpio_row(struct db5500_keypad *keypad)
+{
+ int row;
+ int value = 0;
+ int ret;
+
+ /* read all rows GPIO data register values */
+ for (row = 0; row < keypad->board->krow; row++) {
+ ret = gpio_get_value(keypad->db5500_rows[row]);
+ value += (1 << row) * ret;
+ }
+
+ /* get the exact row */
+ for (row = 0; row < keypad->board->krow; row++) {
+ if (((1 << row) & value) == 0)
+ return row;
+ }
+
+ return -1;
+}
+
+static void db5500_set_cols(struct db5500_keypad *keypad, int col)
+{
+ int i, ret;
+ int value;
+
+ /*
+ * Set all columns except the requested column
+ * output pin as high
+ */
+ for (i = 0; i < keypad->board->kcol; i++) {
+ if (i == col)
+ value = 0;
+ else
+ value = 1;
+ ret = gpio_request(keypad->db5500_cols[i], "db5500-kpd");
+
+ if (ret < 0) {
+ pr_err("db5500_set_cols: gpio request failed\n");
+ continue;
+ }
+
+ gpio_direction_output(keypad->db5500_cols[i], value);
+ gpio_free(keypad->db5500_cols[i]);
+ }
+}
+
+static void db5500_free_cols(struct db5500_keypad *keypad)
+{
+ int i, ret;
+
+ for (i = 0; i < keypad->board->kcol; i++) {
+ ret = gpio_request(keypad->db5500_cols[i], "db5500-kpd");
+
+ if (ret < 0) {
+ pr_err("db5500_free_cols: gpio request failed\n");
+ continue;
+ }
+
+ gpio_direction_output(keypad->db5500_cols[i], 0);
+ gpio_free(keypad->db5500_cols[i]);
+ }
+}
+
+static void db5500_manual_scan(struct db5500_keypad *keypad)
+{
+ int row;
+ int col;
+
+ keypad->valid_key = false;
+
+ for (col = 0; col < keypad->board->kcol; col++) {
+ db5500_set_cols(keypad, col);
+ row = db5500_read_get_gpio_row(keypad);
+ if (row >= 0) {
+ keypad->valid_key = true;
+ keypad->gpio_row = row;
+ keypad->gpio_col = col;
+ break;
+ }
+ }
+ db5500_free_cols(keypad);
+}
+
+static irqreturn_t db5500_keypad_gpio_irq(int irq, void *dev_id)
+{
+ struct db5500_keypad *keypad = dev_id;
+
+ if (!gpio_get_value(IRQ_TO_GPIO(irq))) {
+ db5500_manual_scan(keypad);
+ if (!keypad->enable) {
+ keypad->enable = true;
+ db5500_mode_enable(keypad, true);
+ }
+
+ /*
+ * Schedule the work queue to change it to
+ * report the key pressed, if it is not detected in keypad mode.
+ */
+ if (keypad->valid_key) {
+ schedule_delayed_work(&keypad->gpio_work,
+ KEY_PRESSED_DELAY);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t db5500_keypad_irq(int irq, void *dev_id)
+{
+ struct db5500_keypad *keypad = dev_id;
+
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->switch_work);
+ db5500_keypad_scan(keypad);
+
+ /*
+ * Schedule the work queue to change it to
+ * GPIO mode, if there is no activity in keypad mode
+ */
+ if (keypad->enable)
+ schedule_delayed_work(&keypad->switch_work,
+ keypad->board->switch_delay);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * db5500_keypad_probe() - Initialze the the keypad driver
+ * @pdev: pointer to platform device structure
+ *
+ * This function will allocate and initialize the instance
+ * data and request the irq and register to input subsystem driver.
+ */
+static int __devinit db5500_keypad_probe(struct platform_device *pdev)
+{
+ struct db5500_keypad_platform_data *plat;
+ struct db5500_keypad *keypad;
+ struct resource *res;
+ struct input_dev *input;
+ void __iomem *base;
+ struct clk *clk;
+ int ret;
+ int irq;
+ int i;
+
+ plat = pdev->dev.platform_data;
+ if (!plat) {
+ dev_err(&pdev->dev, "invalid keypad platform data\n");
+ ret = -EINVAL;
+ goto out_ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get keypad irq\n");
+ ret = -EINVAL;
+ goto out_ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "missing platform resources\n");
+ ret = -EINVAL;
+ goto out_ret;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request I/O memory\n");
+ ret = -EBUSY;
+ goto out_ret;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto out_freerequest_memregions;
+ }
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to clk_get\n");
+ ret = PTR_ERR(clk);
+ goto out_iounmap;
+ }
+
+ keypad = kzalloc(sizeof(struct db5500_keypad), GFP_KERNEL);
+ if (!keypad) {
+ dev_err(&pdev->dev, "failed to allocate keypad memory\n");
+ ret = -ENOMEM;
+ goto out_freeclk;
+ }
+
+ input = input_allocate_device();
+ if (!input) {
+ dev_err(&pdev->dev, "failed to input_allocate_device\n");
+ ret = -ENOMEM;
+ goto out_freekeypad;
+ }
+
+ keypad->regulator = regulator_get(&pdev->dev, "v-ape");
+ if (IS_ERR(keypad->regulator)) {
+ dev_err(&pdev->dev, "regulator_get failed\n");
+ keypad->regulator = NULL;
+ ret = -EINVAL;
+ goto out_regulator_get;
+ } else {
+ ret = regulator_enable(keypad->regulator);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "regulator_enable failed\n");
+ goto out_regulator_enable;
+ }
+ }
+
+ input->id.bustype = BUS_HOST;
+ input->name = "db5500-keypad";
+ input->dev.parent = &pdev->dev;
+
+ input->keycode = keypad->keymap;
+ input->keycodesize = sizeof(keypad->keymap[0]);
+ input->keycodemax = ARRAY_SIZE(keypad->keymap);
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+
+ __set_bit(EV_KEY, input->evbit);
+ if (!plat->no_autorepeat)
+ __set_bit(EV_REP, input->evbit);
+
+ matrix_keypad_build_keymap(plat->keymap_data, KEYPAD_ROW_SHIFT,
+ input->keycode, input->keybit);
+
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to register input device: %d\n", ret);
+ goto out_freeinput;
+ }
+
+ keypad->irq = irq;
+ keypad->board = plat;
+ keypad->input = input;
+ keypad->base = base;
+ keypad->clk = clk;
+
+ INIT_DELAYED_WORK(&keypad->switch_work, db5500_gpio_switch_work);
+ INIT_DELAYED_WORK(&keypad->gpio_work, db5500_gpio_release_work);
+
+ clk_enable(keypad->clk);
+if (!keypad->board->init) {
+ dev_err(&pdev->dev, "init funtion not defined\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (keypad->board->init() < 0) {
+ dev_err(&pdev->dev, "keyboard init config failed\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (!keypad->board->exit) {
+ dev_err(&pdev->dev, "exit funtion not defined\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (keypad->board->exit() < 0) {
+ dev_err(&pdev->dev, "keyboard exit config failed\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ for (i = 0; i < keypad->board->krow; i++) {
+ keypad->db5500_rows[i] = *plat->gpio_input_pins;
+ keypad->gpio_input_irq[i] =
+ GPIO_TO_IRQ(keypad->db5500_rows[i]);
+ plat->gpio_input_pins++;
+ }
+
+ for (i = 0; i < keypad->board->kcol; i++) {
+ keypad->db5500_cols[i] = *plat->gpio_output_pins;
+ plat->gpio_output_pins++;
+ }
+
+ for (i = 0; i < keypad->board->krow; i++) {
+ ret = request_threaded_irq(keypad->gpio_input_irq[i],
+ NULL, db5500_keypad_gpio_irq,
+ IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND,
+ "db5500-keypad-gpio", keypad);
+ if (ret) {
+ dev_err(&pdev->dev, "allocate gpio irq %d failed\n",
+ keypad->gpio_input_irq[i]);
+ goto out_unregisterinput;
+ }
+ enable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+
+ ret = request_threaded_irq(keypad->irq, NULL, db5500_keypad_irq,
+ IRQF_ONESHOT, "db5500-keypad", keypad);
+ if (ret) {
+ dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
+ goto out_unregisterinput;
+ }
+
+ platform_set_drvdata(pdev, keypad);
+
+ clk_disable(keypad->clk);
+ regulator_disable(keypad->regulator);
+ return 0;
+
+out_unregisterinput:
+ input_unregister_device(input);
+ input = NULL;
+ clk_disable(keypad->clk);
+out_freeinput:
+ input_free_device(input);
+out_regulator_enable:
+ regulator_put(keypad->regulator);
+out_regulator_get:
+ input_free_device(input);
+out_freekeypad:
+ kfree(keypad);
+out_freeclk:
+ clk_put(clk);
+out_iounmap:
+ iounmap(base);
+out_freerequest_memregions:
+ release_mem_region(res->start, resource_size(res));
+out_ret:
+ return ret;
+}
+
+/**
+ * db5500_keypad_remove() - Removes the keypad driver
+ * @pdev: pointer to platform device structure
+ *
+ * This function uses to remove the keypad
+ * driver and returns integer.
+ */
+static int __devexit db5500_keypad_remove(struct platform_device *pdev)
+{
+ struct db5500_keypad *keypad = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->switch_work);
+ free_irq(keypad->irq, keypad);
+ input_unregister_device(keypad->input);
+
+ clk_disable(keypad->clk);
+ clk_put(keypad->clk);
+
+ if (keypad->board->exit)
+ keypad->board->exit();
+
+ regulator_put(keypad->regulator);
+
+ iounmap(keypad->base);
+
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(keypad);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/**
+ * db5500_keypad_suspend() - suspend the keypad controller
+ * @dev: pointer to device structure
+ *
+ * This function is used to suspend the
+ * keypad controller and returns integer
+ */
+static int db5500_keypad_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct db5500_keypad *keypad = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(irq);
+ else {
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->switch_work);
+ disable_irq(irq);
+ if (keypad->enable) {
+ db5500_mode_enable(keypad, false);
+ keypad->enable = false;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * db5500_keypad_resume() - resume the keypad controller
+ * @dev: pointer to device structure
+ *
+ * This function is used to resume the keypad
+ * controller and returns integer.
+ */
+static int db5500_keypad_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct db5500_keypad *keypad = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(irq);
+ else {
+ if (!keypad->enable) {
+ keypad->enable = true;
+ db5500_mode_enable(keypad, true);
+ }
+ enable_irq(irq);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops db5500_keypad_dev_pm_ops = {
+ .suspend = db5500_keypad_suspend,
+ .resume = db5500_keypad_resume,
+};
+#endif
+
+static struct platform_driver db5500_keypad_driver = {
+ .driver = {
+ .name = "db5500-keypad",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &db5500_keypad_dev_pm_ops,
+#endif
+ },
+ .probe = db5500_keypad_probe,
+ .remove = __devexit_p(db5500_keypad_remove),
+};
+
+/**
+ * db5500_keypad_init() - Initialize the keypad driver
+ *
+ * This function uses to initializes the db5500
+ * keypad driver and returns integer.
+ */
+static int __init db5500_keypad_init(void)
+{
+ return platform_driver_register(&db5500_keypad_driver);
+}
+module_init(db5500_keypad_init);
+
+/**
+ * db5500_keypad_exit() - De-initialize the keypad driver
+ *
+ * This function uses to de-initialize the db5500
+ * keypad driver and returns none.
+ */
+static void __exit db5500_keypad_exit(void)
+{
+ platform_driver_unregister(&db5500_keypad_driver);
+}
+module_exit(db5500_keypad_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_DESCRIPTION("DB5500 Keypad Driver");
+MODULE_ALIAS("platform:db5500-keypad");
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index fcdec5e2b29..66d4c067b98 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -1,4 +1,4 @@
-/*
+/**
* Copyright (C) ST-Ericsson SA 2010
*
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/delay.h>
@@ -19,8 +20,10 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <plat/ske.h>
+#include <linux/gpio/nomadik.h>
/* SKE_CR bits */
#define SKE_KPMLT (0x1 << 6)
@@ -48,17 +51,36 @@
#define SKE_ASR3 0x2C
#define SKE_NUM_ASRX_REGISTERS (4)
+#define KEY_PRESSED_DELAY 10
+
+
+#define KEY_REPORTED 1
+#define KEY_PRESSED 2
/**
* struct ske_keypad - data structure used by keypad driver
- * @irq: irq no
- * @reg_base: ske regsiters base address
- * @input: pointer to input device object
- * @board: keypad platform device
- * @keymap: matrix scan code table for keycodes
- * @clk: clock structure pointer
+ * @dev: Pointer to the structure device
+ * @irq: irq no
+ * @reg_base: ske regsiters base address
+ * @input: pointer to input device object
+ * @board: keypad platform device
+ * @keymap: matrix scan code table for keycodes
+ * @clk: clock structure pointer
+ * @enable: flag to enable the driver event
+ * @regulator: pointer to the regulator used for ske kyepad
+ * @gpio_input_irq: array for gpio irqs
+ * @key_pressed: hold the key state
+ * @work: delayed work variable for gpio switch
+ * @ske_rows: rows gpio array for ske
+ * @ske_cols: columns gpio array for ske
+ * @gpio_row: gpio row
+ * @gpio_col: gpio column
+ * @gpio_work: delayed work variable for release gpio key
+ * @keys: matrix holding key status
+ * @scan_work: delayed work for scaning new key actions
*/
struct ske_keypad {
+ struct device *dev;
int irq;
void __iomem *reg_base;
struct input_dev *input;
@@ -66,6 +88,18 @@ struct ske_keypad {
unsigned short keymap[SKE_KPD_KEYMAP_SIZE];
struct clk *clk;
spinlock_t ske_keypad_lock;
+ bool enable;
+ struct regulator *regulator;
+ int gpio_input_irq[SKE_KPD_MAX_ROWS];
+ int key_pressed;
+ struct delayed_work work;
+ int ske_rows[SKE_KPD_MAX_ROWS];
+ int ske_cols[SKE_KPD_MAX_COLS];
+ int gpio_row;
+ int gpio_col;
+ struct delayed_work gpio_work;
+ u8 keys[SKE_KPD_MAX_ROWS][SKE_KPD_MAX_COLS];
+ struct delayed_work scan_work;
};
static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr,
@@ -83,7 +117,7 @@ static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr,
spin_unlock(&keypad->ske_keypad_lock);
}
-/*
+/**
* ske_keypad_chip_init: init keypad controller configuration
*
* Enable Multi key press detection, auto scan mode
@@ -91,7 +125,7 @@ static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr,
static int __devinit ske_keypad_chip_init(struct ske_keypad *keypad)
{
u32 value;
- int timeout = 50;
+ int timeout = keypad->board->debounce_ms;
/* check SKE_RIS to be 0 */
while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--)
@@ -100,7 +134,7 @@ static int __devinit ske_keypad_chip_init(struct ske_keypad *keypad)
if (!timeout)
return -EINVAL;
- /*
+ /**
* set debounce value
* keypad dbounce is configured in DBCR[15:8]
* dbounce value in steps of 32/32.768 ms
@@ -115,7 +149,7 @@ static int __devinit ske_keypad_chip_init(struct ske_keypad *keypad)
/* enable multi key detection */
ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPMLT);
- /*
+ /**
* set up the number of columns
* KPCN[5:3] defines no. of keypad columns to be auto scanned
*/
@@ -134,14 +168,121 @@ static int __devinit ske_keypad_chip_init(struct ske_keypad *keypad)
return 0;
}
+static void ske_mode_enable(struct ske_keypad *keypad, bool enable)
+{
+ int i;
+
+ if (!enable) {
+ dev_dbg(keypad->dev, "%s disable keypad\n", __func__);
+ writel(0, keypad->reg_base + SKE_CR);
+ if (keypad->board->exit)
+ keypad->board->exit();
+ for (i = 0; i < keypad->board->krow; i++) {
+ enable_irq(keypad->gpio_input_irq[i]);
+ enable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ clk_disable(keypad->clk);
+ regulator_disable(keypad->regulator);
+ } else {
+ dev_dbg(keypad->dev, "%s enable keypad\n", __func__);
+ regulator_enable(keypad->regulator);
+ clk_enable(keypad->clk);
+ for (i = 0; i < keypad->board->krow; i++) {
+ disable_irq_nosync(keypad->gpio_input_irq[i]);
+ disable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ if (keypad->board->init)
+ keypad->board->init();
+ ske_keypad_chip_init(keypad);
+ }
+}
+static void ske_enable(struct ske_keypad *keypad, bool enable)
+{
+ keypad->enable = enable;
+ if (keypad->enable) {
+ enable_irq(keypad->irq);
+ ske_mode_enable(keypad, true);
+ } else {
+ ske_mode_enable(keypad, false);
+ disable_irq(keypad->irq);
+ }
+}
+
+static ssize_t ske_show_attr_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ske_keypad *keypad = platform_get_drvdata(pdev);
+ return sprintf(buf, "%d\n", keypad->enable);
+}
+
+static ssize_t ske_store_attr_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ske_keypad *keypad = platform_get_drvdata(pdev);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (keypad->enable != val) {
+ keypad->enable = val ? true : false;
+ ske_enable(keypad, keypad->enable);
+ }
+ return count;
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
+ ske_show_attr_enable, ske_store_attr_enable);
+
+static struct attribute *ske_keypad_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static struct attribute_group ske_attr_group = {
+ .attrs = ske_keypad_attrs,
+};
+
+static void ske_keypad_report(struct ske_keypad *keypad, u8 status, int col)
+{
+ int row = 0, code, pos;
+ u32 ske_ris;
+ int num_of_rows;
+
+ /* find out the row */
+ num_of_rows = hweight8(status);
+ do {
+ pos = __ffs(status);
+ row = pos;
+ status &= ~(1 << pos);
+
+ code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT);
+ ske_ris = readl(keypad->reg_base + SKE_RIS);
+ keypad->key_pressed = ske_ris & SKE_KPRISA;
+
+ dev_dbg(keypad->dev,
+ "%s key_pressed:%d code:%d row:%d col:%d\n",
+ __func__, keypad->key_pressed, code, row, col);
+
+ if (keypad->key_pressed)
+ keypad->keys[row][col] |= KEY_PRESSED;
+
+ num_of_rows--;
+ } while (num_of_rows);
+}
+
static void ske_keypad_read_data(struct ske_keypad *keypad)
{
- struct input_dev *input = keypad->input;
- u16 status;
- int col = 0, row = 0, code;
- int ske_asr, ske_ris, key_pressed, i;
+ u8 status;
+ int col = 0;
+ int ske_asr, i;
- /*
+ /**
* Read the auto scan registers
*
* Each SKE_ASRx (x=0 to x=3) contains two row values.
@@ -153,59 +294,262 @@ static void ske_keypad_read_data(struct ske_keypad *keypad)
if (!ske_asr)
continue;
- /* now that ASRx is zero, find out the column x and row y*/
- if (ske_asr & 0xff) {
+ /* now that ASRx is zero, find out the coloumn x and row y */
+ status = ske_asr & 0xff;
+ if (status) {
col = i * 2;
- status = ske_asr & 0xff;
- } else {
+ ske_keypad_report(keypad, status, col);
+ }
+ status = (ske_asr & 0xff00) >> 8;
+ if (status) {
col = (i * 2) + 1;
- status = (ske_asr & 0xff00) >> 8;
+ ske_keypad_report(keypad, status, col);
}
+ }
+}
- /* find out the row */
- row = __ffs(status);
+static void ske_keypad_scan_work(struct work_struct *work)
+{
+ int timeout = 10;
+ int i, j, code;
+ struct ske_keypad *keypad = container_of(work,
+ struct ske_keypad, scan_work.work);
+ struct input_dev *input = keypad->input;
- code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT);
- ske_ris = readl(keypad->reg_base + SKE_RIS);
- key_pressed = ske_ris & SKE_KPRISA;
+ /* Wait for autoscan to complete */
+ while (readl(keypad->reg_base + SKE_CR) & SKE_KPASON)
+ cpu_relax();
+
+ /* SKEx registers are stable and can be read */
+ ske_keypad_read_data(keypad);
+
+ /* Check for key actions */
+ for (i = 0; i < SKE_KPD_MAX_ROWS; i++) {
+ for (j = 0; j < SKE_KPD_MAX_COLS; j++) {
+ switch (keypad->keys[i][j]) {
+ case KEY_REPORTED:
+ /**
+ * Key was reported but is no longer pressed,
+ * report it as released.
+ */
+ code = MATRIX_SCAN_CODE(i, j,
+ SKE_KEYPAD_ROW_SHIFT);
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code],
+ 0);
+ input_sync(input);
+ keypad->keys[i][j] = 0;
+ dev_dbg(keypad->dev,
+ "%s Key release reported, code:%d\n",
+ __func__, code);
+ break;
+ case KEY_PRESSED:
+ /* Key pressed but not yet reported, report */
+ code = MATRIX_SCAN_CODE(i, j,
+ SKE_KEYPAD_ROW_SHIFT);
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code],
+ 1);
+ input_sync(input);
+ dev_dbg(keypad->dev,
+ "%s Key press reported, code:%d\n",
+ __func__, code);
+ /* Intentional fall though */
+ case (KEY_REPORTED | KEY_PRESSED):
+ /**
+ * Key pressed and reported, just reset
+ * KEY_PRESSED for next scan
+ */
+ keypad->keys[i][j] = KEY_REPORTED;
+ break;
+ }
+ }
+ }
+
+ if (keypad->key_pressed) {
+ /* Key still pressed, schedule work to poll changes in 50 ms */
+ schedule_delayed_work(&keypad->scan_work,
+ msecs_to_jiffies(50));
+ } else {
+ /* For safty measure, clear interrupt once more */
+ ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA);
+
+ /* Wait for raw interrupt to clear */
+ while ((readl(keypad->reg_base + SKE_RIS) & SKE_KPRISA) &&
+ --timeout) {
+ udelay(10);
+ }
+
+ if (!timeout)
+ dev_err(keypad->dev,
+ "%s Timeed out waiting on irq to clear\n",
+ __func__);
+
+ /* enable auto scan interrupts */
+ ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
+
+ /**
+ * Schedule the work queue to change it to GPIO mode
+ * if there is no activity in SKE mode
+ */
+ if (!keypad->key_pressed && keypad->enable)
+ schedule_delayed_work(&keypad->work,
+ keypad->board->switch_delay);
+ }
+}
+
+static void ske_gpio_switch_work(struct work_struct *work)
+{
+ struct ske_keypad *keypad = container_of(work,
+ struct ske_keypad, work.work);
+
+ ske_mode_enable(keypad, false);
+ keypad->enable = false;
+}
+
+static void ske_gpio_release_work(struct work_struct *work)
+{
+ int code;
+ struct ske_keypad *keypad = container_of(work,
+ struct ske_keypad, gpio_work.work);
+ struct input_dev *input = keypad->input;
+
+ code = MATRIX_SCAN_CODE(keypad->gpio_row, keypad->gpio_col,
+ SKE_KEYPAD_ROW_SHIFT);
+
+ dev_dbg(keypad->dev, "%s Key press reported, code:%d\n",
+ __func__, code);
+
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code], 1);
+ input_sync(input);
+ input_report_key(input, keypad->keymap[code], 0);
+ input_sync(input);
+}
+
+static int ske_read_get_gpio_row(struct ske_keypad *keypad)
+{
+ int row;
+ int value = 0;
+ int ret;
+
+ /* read all rows GPIO data register values */
+ for (row = 0; row < SKE_KPD_MAX_ROWS ; row++) {
+ ret = gpio_get_value(keypad->ske_rows[row]);
+ value += (1 << row) * ret;
+ }
+
+ /* get the exact row */
+ for (row = 0; row < keypad->board->krow; row++) {
+ if (((1 << row) & value) == 0)
+ return row;
+ }
+
+ return -1;
+}
+
+static void ske_set_cols(struct ske_keypad *keypad, int col)
+{
+ int i ;
+ int value;
- input_event(input, EV_MSC, MSC_SCAN, code);
- input_report_key(input, keypad->keymap[code], key_pressed);
- input_sync(input);
+ /**
+ * Set all columns except the requested column
+ * output pin as high
+ */
+ for (i = 0; i < SKE_KPD_MAX_COLS; i++) {
+ if (i == col)
+ value = 0;
+ else
+ value = 1;
+ gpio_request(keypad->ske_cols[i], "ske-kp");
+ gpio_direction_output(keypad->ske_cols[i], value);
+ gpio_free(keypad->ske_cols[i]);
+ }
+}
+
+static void ske_free_cols(struct ske_keypad *keypad)
+{
+ int i ;
+
+ for (i = 0; i < SKE_KPD_MAX_COLS; i++) {
+ gpio_request(keypad->ske_cols[i], "ske-kp");
+ gpio_direction_output(keypad->ske_cols[i], 0);
+ gpio_free(keypad->ske_cols[i]);
+ }
+}
+
+static void ske_manual_scan(struct ske_keypad *keypad)
+{
+ int row;
+ int col;
+
+ for (col = 0; col < keypad->board->kcol; col++) {
+ ske_set_cols(keypad, col);
+ row = ske_read_get_gpio_row(keypad);
+ if (row >= 0) {
+ keypad->key_pressed = 1;
+ keypad->gpio_row = row;
+ keypad->gpio_col = col;
+ break;
+ }
}
+ ske_free_cols(keypad);
}
+static irqreturn_t ske_keypad_gpio_irq(int irq, void *dev_id)
+{
+ struct ske_keypad *keypad = dev_id;
+
+ if (!gpio_get_value(NOMADIK_IRQ_TO_GPIO(irq))) {
+ ske_manual_scan(keypad);
+ if (!keypad->enable) {
+ keypad->enable = true;
+ ske_mode_enable(keypad, true);
+ /**
+ * Schedule the work queue to change it back to GPIO
+ * mode if there is no activity in SKE mode
+ */
+ schedule_delayed_work(&keypad->work,
+ keypad->board->switch_delay);
+ }
+ /**
+ * Schedule delayed work to report key press if it is not
+ * detected in SKE mode.
+ */
+ if (keypad->key_pressed)
+ schedule_delayed_work(&keypad->gpio_work,
+ KEY_PRESSED_DELAY);
+ }
+
+ return IRQ_HANDLED;
+}
static irqreturn_t ske_keypad_irq(int irq, void *dev_id)
{
struct ske_keypad *keypad = dev_id;
- int retries = 20;
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->work);
/* disable auto scan interrupt; mask the interrupt generated */
- ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
+ ske_keypad_set_bits(keypad, SKE_IMSC, SKE_KPIMA, 0x0);
ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA);
- while ((readl(keypad->reg_base + SKE_CR) & SKE_KPASON) && --retries)
- msleep(5);
-
- if (retries) {
- /* SKEx registers are stable and can be read */
- ske_keypad_read_data(keypad);
- }
-
- /* enable auto scan interrupts */
- ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
+ schedule_delayed_work(&keypad->scan_work, 0);
return IRQ_HANDLED;
}
static int __devinit ske_keypad_probe(struct platform_device *pdev)
{
- const struct ske_keypad_platform_data *plat = pdev->dev.platform_data;
struct ske_keypad *keypad;
+ struct resource *res = NULL;
struct input_dev *input;
- struct resource *res;
+ struct clk *clk;
+ void __iomem *reg_base;
+ int ret = 0;
int irq;
- int error;
+ int i;
+ struct ske_keypad_platform_data *plat = pdev->dev.platform_data;
if (!plat) {
dev_err(&pdev->dev, "invalid keypad platform data\n");
@@ -219,42 +563,56 @@ static int __devinit ske_keypad_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
+ if (res == NULL) {
dev_err(&pdev->dev, "missing platform resources\n");
- return -EINVAL;
+ return -ENXIO;
}
- keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL);
- input = input_allocate_device();
- if (!keypad || !input) {
- dev_err(&pdev->dev, "failed to allocate keypad memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request I/O memory\n");
+ return -EBUSY;
}
- keypad->irq = irq;
- keypad->board = plat;
- keypad->input = input;
- spin_lock_init(&keypad->ske_keypad_lock);
+ reg_base = ioremap(res->start, resource_size(res));
+ if (!reg_base) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto out_freerequest_memregions;
+ }
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- error = -EBUSY;
- goto err_free_mem;
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to clk_get\n");
+ ret = PTR_ERR(clk);
+ goto out_freeioremap;
}
- keypad->reg_base = ioremap(res->start, resource_size(res));
- if (!keypad->reg_base) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- error = -ENXIO;
- goto err_free_mem_region;
+ /* resources are sane; we begin allocation */
+ keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL);
+ if (!keypad) {
+ dev_err(&pdev->dev, "failed to allocate keypad memory\n");
+ goto out_freeclk;
}
+ keypad->dev = &pdev->dev;
- keypad->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(keypad->clk)) {
- dev_err(&pdev->dev, "failed to get clk\n");
- error = PTR_ERR(keypad->clk);
- goto err_iounmap;
+ input = input_allocate_device();
+ if (!input) {
+ dev_err(&pdev->dev, "failed to input_allocate_device\n");
+ ret = -ENOMEM;
+ goto out_freekeypad;
+ }
+ keypad->regulator = regulator_get(&pdev->dev, "v-ape");
+ if (IS_ERR(keypad->regulator)) {
+ dev_err(&pdev->dev, "regulator_get failed\n");
+ keypad->regulator = NULL;
+ goto out_regulator_get;
+ } else {
+ ret = regulator_enable(keypad->regulator);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "regulator_enable failed\n");
+ goto out_regulator_enable;
+ }
}
input->id.bustype = BUS_HOST;
@@ -266,38 +624,91 @@ static int __devinit ske_keypad_probe(struct platform_device *pdev)
input->keycodemax = ARRAY_SIZE(keypad->keymap);
input_set_capability(input, EV_MSC, MSC_SCAN);
+ input_set_drvdata(input, keypad);
__set_bit(EV_KEY, input->evbit);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
matrix_keypad_build_keymap(plat->keymap_data, SKE_KEYPAD_ROW_SHIFT,
- input->keycode, input->keybit);
+ input->keycode, input->keybit);
+
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to register input device: %d\n", ret);
+ goto out_freeinput;
+ }
+
+ keypad->irq = irq;
+ keypad->board = plat;
+ keypad->input = input;
+ keypad->reg_base = reg_base;
+ keypad->clk = clk;
+ INIT_DELAYED_WORK(&keypad->work, ske_gpio_switch_work);
+ INIT_DELAYED_WORK(&keypad->gpio_work, ske_gpio_release_work);
+ INIT_DELAYED_WORK(&keypad->scan_work, ske_keypad_scan_work);
+ /* allocations are sane, we begin HW initialization */
clk_enable(keypad->clk);
- /* go through board initialization helpers */
- if (keypad->board->init)
- keypad->board->init();
+ if (!keypad->board->init) {
+ dev_err(&pdev->dev, "init funtion not defined\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (keypad->board->init() < 0) {
+ dev_err(&pdev->dev, "keyboard init config failed\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (!keypad->board->exit) {
+ dev_err(&pdev->dev, "exit funtion not defined\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
- error = ske_keypad_chip_init(keypad);
- if (error) {
- dev_err(&pdev->dev, "unable to init keypad hardware\n");
- goto err_clk_disable;
+ if (keypad->board->exit() < 0) {
+ dev_err(&pdev->dev, "keyboard exit config failed\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+ for (i = 0; i < SKE_KPD_MAX_ROWS; i++) {
+ keypad->ske_rows[i] = *plat->gpio_input_pins;
+ keypad->ske_cols[i] = *plat->gpio_output_pins;
+ keypad->gpio_input_irq[i] =
+ NOMADIK_GPIO_TO_IRQ(keypad->ske_rows[i]);
+ plat->gpio_input_pins++;
+ plat->gpio_output_pins++;
+ }
+
+ for (i = 0; i < keypad->board->krow; i++) {
+ ret = request_threaded_irq(keypad->gpio_input_irq[i],
+ NULL, ske_keypad_gpio_irq,
+ IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND,
+ "ske-keypad-gpio", keypad);
+ if (ret) {
+ dev_err(&pdev->dev, "allocate gpio irq %d failed\n",
+ keypad->gpio_input_irq[i]);
+ goto out_unregisterinput;
+ }
+ enable_irq_wake(keypad->gpio_input_irq[i]);
}
- error = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq,
- IRQF_ONESHOT, "ske-keypad", keypad);
- if (error) {
+ ret = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq,
+ IRQF_ONESHOT, "ske-keypad", keypad);
+ if (ret) {
dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
- goto err_clk_disable;
+ goto out_unregisterinput;
}
- error = input_register_device(input);
- if (error) {
- dev_err(&pdev->dev,
- "unable to register input device: %d\n", error);
- goto err_free_irq;
+ /* sysfs implementation for dynamic enable/disable the input event */
+ ret = sysfs_create_group(&pdev->dev.kobj, &ske_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create sysfs entries\n");
+ goto out_free_irq;
}
if (plat->wakeup_enable)
@@ -305,21 +716,32 @@ static int __devinit ske_keypad_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, keypad);
+ clk_disable(keypad->clk);
+ regulator_disable(keypad->regulator);
+
return 0;
-err_free_irq:
+out_free_irq:
free_irq(keypad->irq, keypad);
-err_clk_disable:
+out_unregisterinput:
+ input_unregister_device(input);
+ input = NULL;
clk_disable(keypad->clk);
- clk_put(keypad->clk);
-err_iounmap:
- iounmap(keypad->reg_base);
-err_free_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
+out_freeinput:
+ regulator_disable(keypad->regulator);
+out_regulator_enable:
+ regulator_put(keypad->regulator);
+out_regulator_get:
input_free_device(input);
+out_freekeypad:
kfree(keypad);
- return error;
+out_freeclk:
+ clk_put(keypad->clk);
+out_freeioremap:
+ iounmap(reg_base);
+out_freerequest_memregions:
+ release_mem_region(res->start, resource_size(res));
+ return ret;
}
static int __devexit ske_keypad_remove(struct platform_device *pdev)
@@ -327,16 +749,22 @@ static int __devexit ske_keypad_remove(struct platform_device *pdev)
struct ske_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->work);
+ cancel_delayed_work_sync(&keypad->scan_work);
free_irq(keypad->irq, keypad);
input_unregister_device(keypad->input);
+ sysfs_remove_group(&pdev->dev.kobj, &ske_attr_group);
clk_disable(keypad->clk);
clk_put(keypad->clk);
if (keypad->board->exit)
keypad->board->exit();
+ regulator_put(keypad->regulator);
+
iounmap(keypad->reg_base);
release_mem_region(res->start, resource_size(res));
kfree(keypad);
@@ -353,8 +781,16 @@ static int ske_keypad_suspend(struct device *dev)
if (device_may_wakeup(dev))
enable_irq_wake(irq);
- else
- ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
+ else {
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->work);
+ cancel_delayed_work_sync(&keypad->scan_work);
+ disable_irq(irq);
+ if (keypad->enable) {
+ ske_mode_enable(keypad, false);
+ keypad->enable = false;
+ }
+ }
return 0;
}
@@ -367,8 +803,13 @@ static int ske_keypad_resume(struct device *dev)
if (device_may_wakeup(dev))
disable_irq_wake(irq);
- else
- ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
+ else {
+ if (!keypad->enable) {
+ keypad->enable = true;
+ ske_mode_enable(keypad, true);
+ }
+ enable_irq(irq);
+ }
return 0;
}
@@ -393,7 +834,7 @@ struct platform_driver ske_keypad_driver = {
static int __init ske_keypad_init(void)
{
- return platform_driver_probe(&ske_keypad_driver, ske_keypad_probe);
+ return platform_driver_register(&ske_keypad_driver);
}
module_init(ske_keypad_init);
@@ -404,6 +845,6 @@ static void __exit ske_keypad_exit(void)
module_exit(ske_keypad_exit);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com> / Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com>");
MODULE_DESCRIPTION("Nomadik Scroll-Key-Encoder Keypad Driver");
MODULE_ALIAS("platform:nomadik-ske-keypad");
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index ab7610ca10e..c1de4d77ca1 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -108,10 +108,52 @@ struct stmpe_keypad {
unsigned int rows;
unsigned int cols;
+ bool enable;
unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
};
+static ssize_t stmpe_show_attr_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ return sprintf(buf, "%d\n", keypad->enable);
+}
+
+static ssize_t stmpe_store_attr_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ struct stmpe *stmpe = keypad->stmpe;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (keypad->enable != val) {
+ keypad->enable = val;
+ if (!val)
+ stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
+ else
+ stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
+ }
+ return count;
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
+ stmpe_show_attr_enable, stmpe_store_attr_enable);
+
+static struct attribute *stmpe_keypad_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static struct attribute_group stmpe_attr_group = {
+ .attrs = stmpe_keypad_attrs,
+};
+
static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
{
const struct stmpe_keypad_variant *variant = keypad->variant;
@@ -285,7 +327,7 @@ static int __devinit stmpe_keypad_probe(struct platform_device *pdev)
goto out_freekeypad;
}
- input->name = "STMPE keypad";
+ input->name = "STMPE-keypad";
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
@@ -332,10 +374,20 @@ static int __devinit stmpe_keypad_probe(struct platform_device *pdev)
goto out_unregisterinput;
}
+ /* sysfs implementation for dynamic enable/disable the input event */
+ ret = sysfs_create_group(&pdev->dev.kobj, &stmpe_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create sysfs entries\n");
+ goto out_free_irq;
+ }
+
+ keypad->enable = true;
platform_set_drvdata(pdev, keypad);
return 0;
+out_free_irq:
+ free_irq(irq, keypad);
out_unregisterinput:
input_unregister_device(input);
input = NULL;
@@ -354,6 +406,7 @@ static int __devexit stmpe_keypad_remove(struct platform_device *pdev)
stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
+ sysfs_remove_group(&pdev->dev.kobj, &stmpe_attr_group);
free_irq(irq, keypad);
input_unregister_device(keypad->input);
platform_set_drvdata(pdev, NULL);
@@ -362,9 +415,43 @@ static int __devexit stmpe_keypad_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int stmpe_keypad_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ struct stmpe *stmpe = keypad->stmpe;
+
+ if (!device_may_wakeup(stmpe->dev))
+ stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
+
+ return 0;
+}
+
+static int stmpe_keypad_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ struct stmpe *stmpe = keypad->stmpe;
+
+ if (!device_may_wakeup(stmpe->dev))
+ stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
+
+ return 0;
+}
+
+static const struct dev_pm_ops stmpe_keypad_dev_pm_ops = {
+ .suspend = stmpe_keypad_suspend,
+ .resume = stmpe_keypad_resume,
+};
+#endif
+
static struct platform_driver stmpe_keypad_driver = {
.driver.name = "stmpe-keypad",
.driver.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .driver.pm = &stmpe_keypad_dev_pm_ops,
+#endif
.probe = stmpe_keypad_probe,
.remove = __devexit_p(stmpe_keypad_remove),
};
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 22d875fde53..a5ad6408fe6 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -22,12 +22,26 @@ config INPUT_88PM860X_ONKEY
To compile this driver as a module, choose M here: the module
will be called 88pm860x_onkey.
+config INPUT_AB8500_ACCDET
+ bool "AB8500 AV Accessory detection"
+ depends on AB8500_CORE && AB8500_GPADC && GPIO_AB8500
+ help
+ Say Y here to enable AV accessory detection features for ST-Ericsson's
+ AB8500 Mix-Sig PMIC.
+
+config INPUT_AB5500_ACCDET
+ bool "AB5500 AV Accessory detection"
+ depends on AB5500_CORE && AB5500_GPADC
+ help
+ Say Y here to enable AV accessory detection features for ST-Ericsson's
+ AB5500 Mix-Sig PMIC.
+
config INPUT_AB8500_PONKEY
- tristate "AB8500 Pon (PowerOn) Key"
- depends on AB8500_CORE
+ tristate "AB5500/AB8500 Pon (PowerOn) Key"
+ depends on AB5500_CORE || AB8500_CORE
help
- Say Y here to use the PowerOn Key for ST-Ericsson's AB8500
- Mix-Sig PMIC.
+ Say Y here to use the PowerOn Key for ST-Ericsson's AB5500/AB8500
+ Mix-Sig PMICs.
To compile this driver as a module, choose M here: the module
will be called ab8500-ponkey.
@@ -544,4 +558,14 @@ config INPUT_XEN_KBDDEV_FRONTEND
To compile this driver as a module, choose M here: the
module will be called xen-kbdfront.
+config INPUT_STE_FF_VIBRA
+ tristate "ST-Ericsson Force Feedback Vibrator"
+ depends on STE_AUDIO_IO_DEV
+ select INPUT_FF_MEMLESS
+ help
+ This option enables support for ST-Ericsson's Vibrator which
+ registers as an input force feedback driver.
+
+ To compile this driver as a module, choose M here. The module will
+ be called ste_ff_vibra.
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index a244fc6a781..0159d83a7ff 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -5,6 +5,8 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o
+obj-$(CONFIG_INPUT_AB8500_ACCDET) += abx500-accdet.o ab8500-accdet.o
+obj-$(CONFIG_INPUT_AB5500_ACCDET) += abx500-accdet.o ab5500-accdet.o
obj-$(CONFIG_INPUT_AB8500_PONKEY) += ab8500-ponkey.o
obj-$(CONFIG_INPUT_AD714X) += ad714x.o
obj-$(CONFIG_INPUT_AD714X_I2C) += ad714x-i2c.o
@@ -51,3 +53,4 @@ obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
+obj-$(CONFIG_INPUT_STE_FF_VIBRA) += ste_ff_vibra.o
diff --git a/drivers/input/misc/ab5500-accdet.c b/drivers/input/misc/ab5500-accdet.c
new file mode 100644
index 00000000000..828ff29cb1e
--- /dev/null
+++ b/drivers/input/misc/ab5500-accdet.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GPL V2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/mfd/abx500.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <mach/abx500-accdet.h>
+
+/*
+ * Register definition for accessory detection.
+ */
+#define AB5500_REGU_CTRL1_SPARE_REG 0x84
+#define AB5500_ACC_DET_DB1_REG 0x20
+#define AB5500_ACC_DET_DB2_REG 0x21
+#define AB5500_ACC_DET_CTRL_REG 0x23
+#define AB5500_VDENC_CTRL0 0x80
+
+/* REGISTER: AB8500_ACC_DET_CTRL_REG */
+#define BITS_ACCDETCTRL2_ENA (0x20 | 0x10 | 0x08)
+#define BITS_ACCDETCTRL1_ENA (0x02 | 0x01)
+
+/* REGISTER: AB8500_REGU_CTRL1_SPARE_REG */
+#define BIT_REGUCTRL1SPARE_VAMIC1_GROUND 0x01
+
+/* REGISTER: AB8500_IT_SOURCE5_REG */
+#define BIT_ITSOURCE5_ACCDET1 0x02
+
+static struct accessory_irq_descriptor ab5500_irq_desc[] = {
+ {
+ .irq = PLUG_IRQ,
+ .name = "acc_detedt1db_falling",
+ .isr = plug_irq_handler,
+ },
+ {
+ .irq = UNPLUG_IRQ,
+ .name = "acc_detedt1db_rising",
+ .isr = unplug_irq_handler,
+ },
+ {
+ .irq = BUTTON_PRESS_IRQ,
+ .name = "acc_detedt21db_falling",
+ .isr = button_press_irq_handler,
+ },
+ {
+ .irq = BUTTON_RELEASE_IRQ,
+ .name = "acc_detedt21db_rising",
+ .isr = button_release_irq_handler,
+ },
+};
+
+static struct accessory_regu_descriptor ab5500_regu_desc[] = {
+ {
+ .id = REGULATOR_VAMIC1,
+ .name = "v-amic",
+ },
+};
+
+
+/*
+ * configures accdet2 input on/off
+ */
+static void ab5500_config_accdetect2_hw(struct abx500_ad *dd, int enable)
+{
+ int ret = 0;
+
+ if (!dd->accdet2_th_set) {
+ /* Configure accdetect21+22 thresholds */
+ ret = abx500_set_register_interruptible(&dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_DB2_REG,
+ dd->pdata->accdet2122_th);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to write reg (%d).\n", __func__,
+ ret);
+ goto out;
+ } else {
+ dd->accdet2_th_set = 1;
+ }
+ }
+
+ /* Enable/Disable accdetect21 comparators + pullup */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ BITS_ACCDETCTRL2_ENA,
+ enable ? BITS_ACCDETCTRL2_ENA : 0);
+
+ if (ret < 0)
+ dev_err(&dd->pdev->dev, "%s: Failed to update reg (%d).\n",
+ __func__, ret);
+out:
+ return;
+}
+
+/*
+ * configures accdet1 input on/off
+ */
+static void ab5500_config_accdetect1_hw(struct abx500_ad *dd, int enable)
+{
+ int ret;
+
+ if (!dd->accdet1_th_set) {
+ ret = abx500_set_register_interruptible(&dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_DB1_REG,
+ dd->pdata->accdet1_dbth);
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to write reg (%d).\n", __func__,
+ ret);
+ else
+ dd->accdet1_th_set = 1;
+ }
+
+ /* enable accdetect1 comparator */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ BITS_ACCDETCTRL1_ENA,
+ enable ? BITS_ACCDETCTRL1_ENA : 0);
+
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to update reg (%d).\n", __func__, ret);
+}
+
+/*
+ * returns the high level status whether some accessory is connected (1|0).
+ */
+static int ab5500_detect_plugged_in(struct abx500_ad *dd)
+{
+ u8 value = 0;
+
+ int status = abx500_get_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_IT,
+ AB5500_IT_SOURCE3_REG,
+ &value);
+ if (status < 0) {
+ dev_err(&dd->pdev->dev, "%s: reg read failed (%d).\n",
+ __func__, status);
+ return 0;
+ }
+
+ if (dd->pdata->is_detection_inverted)
+ return value & BIT_ITSOURCE5_ACCDET1 ? 1 : 0;
+ else
+ return value & BIT_ITSOURCE5_ACCDET1 ? 0 : 1;
+}
+
+/*
+ * mic_line_voltage_stable - measures a relative stable voltage from spec. input
+ */
+static int ab5500_meas_voltage_stable(struct abx500_ad *dd)
+{
+ int iterations = 2;
+ int v1, v2, dv;
+
+ v1 = ab5500_gpadc_convert((struct ab5500_gpadc *)dd->gpadc,
+ ACC_DETECT2);
+ do {
+ msleep(1);
+ --iterations;
+ v2 = ab5500_gpadc_convert((struct ab5500_gpadc *)dd->gpadc,
+ ACC_DETECT2);
+ dv = abs(v2 - v1);
+ v1 = v2;
+ } while (iterations > 0 && dv > MAX_VOLT_DIFF);
+
+ return v1;
+}
+
+/*
+ * configures HW so that it is possible to make decision whether
+ * accessory is connected or not.
+ */
+static void ab5500_config_hw_test_plug_connected(struct abx500_ad *dd,
+ int enable)
+{
+ dev_dbg(&dd->pdev->dev, "%s:%d\n", __func__, enable);
+
+ /* enable mic BIAS2 */
+ if (enable)
+ accessory_regulator_enable(dd, REGULATOR_VAMIC1);
+}
+
+/*
+ * configures HW so that carkit/headset detection can be accomplished.
+ */
+static void ab5500_config_hw_test_basic_carkit(struct abx500_ad *dd, int enable)
+{
+ /* enable mic BIAS2 */
+ if (enable)
+ accessory_regulator_disable(dd, REGULATOR_VAMIC1);
+}
+
+static u8 acc_det_ctrl_suspend_val;
+
+static void ab5500_turn_off_accdet_comparator(struct platform_device *pdev)
+{
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+
+ /* Turn off AccDetect comparators and pull-up */
+ (void) abx500_get_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ &acc_det_ctrl_suspend_val);
+ (void) abx500_set_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ 0);
+}
+
+static void ab5500_turn_on_accdet_comparator(struct platform_device *pdev)
+{
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+
+ /* Turn on AccDetect comparators and pull-up */
+ (void) abx500_set_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ acc_det_ctrl_suspend_val);
+}
+
+static void *ab5500_accdet_abx500_gpadc_get(void)
+{
+ return ab5500_gpadc_get("ab5500-adc.0");
+}
+
+struct abx500_accdet_platform_data *
+ ab5500_get_platform_data(struct platform_device *pdev)
+{
+ return pdev->dev.platform_data;
+}
+
+struct abx500_ad ab5500_accessory_det_callbacks = {
+ .irq_desc_norm = ab5500_irq_desc,
+ .irq_desc_inverted = NULL,
+ .no_irqs = ARRAY_SIZE(ab5500_irq_desc),
+ .regu_desc = ab5500_regu_desc,
+ .no_of_regu_desc = ARRAY_SIZE(ab5500_regu_desc),
+ .config_accdetect2_hw = ab5500_config_accdetect2_hw,
+ .config_accdetect1_hw = ab5500_config_accdetect1_hw,
+ .detect_plugged_in = ab5500_detect_plugged_in,
+ .meas_voltage_stable = ab5500_meas_voltage_stable,
+ .config_hw_test_basic_carkit = ab5500_config_hw_test_basic_carkit,
+ .turn_off_accdet_comparator = ab5500_turn_off_accdet_comparator,
+ .turn_on_accdet_comparator = ab5500_turn_on_accdet_comparator,
+ .accdet_abx500_gpadc_get = ab5500_accdet_abx500_gpadc_get,
+ .config_hw_test_plug_connected = ab5500_config_hw_test_plug_connected,
+ .set_av_switch = NULL,
+ .get_platform_data = ab5500_get_platform_data,
+};
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/ab8500-accdet.c b/drivers/input/misc/ab8500-accdet.c
new file mode 100644
index 00000000000..3cd440a2631
--- /dev/null
+++ b/drivers/input/misc/ab8500-accdet.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GPL V2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/ab8500/gpadc.h>
+#include <linux/mfd/ab8500/gpio.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <mach/abx500-accdet.h>
+
+#define MAX_DET_COUNT 10
+#define MAX_VOLT_DIFF 30
+#define MIN_MIC_POWER -100
+
+/* Unique value used to identify Headset button input device */
+#define BTN_INPUT_UNIQUE_VALUE "AB8500HsBtn"
+#define BTN_INPUT_DEV_NAME "AB8500 Hs Button"
+
+#define DEBOUNCE_PLUG_EVENT_MS 100
+#define DEBOUNCE_PLUG_RETEST_MS 25
+#define DEBOUNCE_UNPLUG_EVENT_MS 0
+
+/*
+ * Register definition for accessory detection.
+ */
+#define AB8500_REGU_CTRL1_SPARE_REG 0x84
+#define AB8500_ACC_DET_DB1_REG 0x80
+#define AB8500_ACC_DET_DB2_REG 0x81
+#define AB8500_ACC_DET_CTRL_REG 0x82
+#define AB8500_IT_SOURCE5_REG 0x04
+
+/* REGISTER: AB8500_ACC_DET_CTRL_REG */
+#define BITS_ACCDETCTRL2_ENA (0x20 | 0x10 | 0x08)
+#define BITS_ACCDETCTRL1_ENA (0x02 | 0x01)
+
+/* REGISTER: AB8500_REGU_CTRL1_SPARE_REG */
+#define BIT_REGUCTRL1SPARE_VAMIC1_GROUND 0x01
+
+/* REGISTER: AB8500_IT_SOURCE5_REG */
+#define BIT_ITSOURCE5_ACCDET1 0x04
+
+/* After being loaded, how fast the first check is to be made */
+#define INIT_DELAY_MS 3000
+
+/* Voltage limits (mV) for various types of AV Accessories */
+#define ACCESSORY_DET_VOL_DONTCARE -1
+#define ACCESSORY_HEADPHONE_DET_VOL_MIN 0
+#define ACCESSORY_HEADPHONE_DET_VOL_MAX 40
+#define ACCESSORY_CVIDEO_DET_VOL_MIN 41
+#define ACCESSORY_CVIDEO_DET_VOL_MAX 105
+#define ACCESSORY_CARKIT_DET_VOL_MIN 1100
+#define ACCESSORY_CARKIT_DET_VOL_MAX 1300
+#define ACCESSORY_HEADSET_DET_VOL_MIN 0
+#define ACCESSORY_HEADSET_DET_VOL_MAX 200
+#define ACCESSORY_OPENCABLE_DET_VOL_MIN 1730
+#define ACCESSORY_OPENCABLE_DET_VOL_MAX 2150
+
+/* Static data initialization */
+
+static struct accessory_regu_descriptor ab8500_regu_desc[3] = {
+ {
+ .id = REGULATOR_VAUDIO,
+ .name = "v-audio",
+ },
+ {
+ .id = REGULATOR_VAMIC1,
+ .name = "v-amic1",
+ },
+ {
+ .id = REGULATOR_AVSWITCH,
+ .name = "vcc-N2158",
+ },
+};
+
+static struct accessory_irq_descriptor ab8500_irq_desc_norm[] = {
+ {
+ .irq = PLUG_IRQ,
+ .name = "ACC_DETECT_1DB_F",
+ .isr = plug_irq_handler,
+ },
+ {
+ .irq = UNPLUG_IRQ,
+ .name = "ACC_DETECT_1DB_R",
+ .isr = unplug_irq_handler,
+ },
+ {
+ .irq = BUTTON_PRESS_IRQ,
+ .name = "ACC_DETECT_22DB_F",
+ .isr = button_press_irq_handler,
+ },
+ {
+ .irq = BUTTON_RELEASE_IRQ,
+ .name = "ACC_DETECT_22DB_R",
+ .isr = button_release_irq_handler,
+ },
+};
+
+static struct accessory_irq_descriptor ab8500_irq_desc_inverted[] = {
+ {
+ .irq = PLUG_IRQ,
+ .name = "ACC_DETECT_1DB_R",
+ .isr = plug_irq_handler,
+ },
+ {
+ .irq = UNPLUG_IRQ,
+ .name = "ACC_DETECT_1DB_F",
+ .isr = unplug_irq_handler,
+ },
+ {
+ .irq = BUTTON_PRESS_IRQ,
+ .name = "ACC_DETECT_22DB_R",
+ .isr = button_press_irq_handler,
+ },
+ {
+ .irq = BUTTON_RELEASE_IRQ,
+ .name = "ACC_DETECT_22DB_F",
+ .isr = button_release_irq_handler,
+ },
+};
+
+/*
+ * configures accdet2 input on/off
+ */
+static void ab8500_config_accdetect2_hw(struct abx500_ad *dd, int enable)
+{
+ int ret = 0;
+
+ if (!dd->accdet2_th_set) {
+ /* Configure accdetect21+22 thresholds */
+ ret = abx500_set_register_interruptible(&dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_DB2_REG,
+ dd->pdata->accdet2122_th);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to write reg (%d).\n", __func__,
+ ret);
+ goto out;
+ } else {
+ dd->accdet2_th_set = 1;
+ }
+ }
+
+ /* Enable/Disable accdetect21 comparators + pullup */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ BITS_ACCDETCTRL2_ENA,
+ enable ? BITS_ACCDETCTRL2_ENA : 0);
+
+ if (ret < 0)
+ dev_err(&dd->pdev->dev, "%s: Failed to update reg (%d).\n",
+ __func__, ret);
+
+out:
+ return;
+}
+
+/*
+ * configures accdet1 input on/off
+ */
+static void ab8500_config_accdetect1_hw(struct abx500_ad *dd, int enable)
+{
+ int ret;
+
+ if (!dd->accdet1_th_set) {
+ ret = abx500_set_register_interruptible(&dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_DB1_REG,
+ dd->pdata->accdet1_dbth);
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to write reg (%d).\n", __func__,
+ ret);
+ else
+ dd->accdet1_th_set = 1;
+ }
+
+ /* enable accdetect1 comparator */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ BITS_ACCDETCTRL1_ENA,
+ enable ? BITS_ACCDETCTRL1_ENA : 0);
+
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to update reg (%d).\n", __func__, ret);
+}
+
+/*
+ * returns the high level status whether some accessory is connected (1|0).
+ */
+static int ab8500_detect_plugged_in(struct abx500_ad *dd)
+{
+ u8 value = 0;
+
+ int status = abx500_get_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_INTERRUPT,
+ AB8500_IT_SOURCE5_REG,
+ &value);
+ if (status < 0) {
+ dev_err(&dd->pdev->dev, "%s: reg read failed (%d).\n",
+ __func__, status);
+ return 0;
+ }
+
+ if (dd->pdata->is_detection_inverted)
+ return value & BIT_ITSOURCE5_ACCDET1 ? 1 : 0;
+ else
+ return value & BIT_ITSOURCE5_ACCDET1 ? 0 : 1;
+}
+
+/*
+ * mic_line_voltage_stable - measures a relative stable voltage from spec. input
+ */
+static int ab8500_meas_voltage_stable(struct abx500_ad *dd)
+{
+ int iterations = 2;
+ int v1, v2, dv;
+
+ v1 = ab8500_gpadc_convert((struct ab8500_gpadc *)dd->gpadc,
+ ACC_DETECT2);
+ do {
+ msleep(1);
+ --iterations;
+ v2 = ab8500_gpadc_convert((struct ab8500_gpadc *)dd->gpadc,
+ ACC_DETECT2);
+ dv = abs(v2 - v1);
+ v1 = v2;
+ } while (iterations > 0 && dv > MAX_VOLT_DIFF);
+
+ return v1;
+}
+
+/*
+ * configures HW so that it is possible to make decision whether
+ * accessory is connected or not.
+ */
+static void ab8500_config_hw_test_plug_connected(struct abx500_ad *dd,
+ int enable)
+{
+ int ret;
+
+ dev_dbg(&dd->pdev->dev, "%s:%d\n", __func__, enable);
+
+ ret = ab8500_config_pulldown(&dd->pdev->dev,
+ dd->pdata->video_ctrl_gpio, !enable);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to update reg (%d).\n", __func__, ret);
+ return;
+ }
+
+ if (enable)
+ accessory_regulator_enable(dd, REGULATOR_VAMIC1);
+}
+
+/*
+ * configures HW so that carkit/headset detection can be accomplished.
+ */
+static void ab8500_config_hw_test_basic_carkit(struct abx500_ad *dd, int enable)
+{
+ int ret;
+
+ dev_dbg(&dd->pdev->dev, "%s:%d\n", __func__, enable);
+
+ if (enable)
+ accessory_regulator_disable(dd, REGULATOR_VAMIC1);
+
+ /* Un-Ground the VAMic1 output when enabled */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_REGU_CTRL1,
+ AB8500_REGU_CTRL1_SPARE_REG,
+ BIT_REGUCTRL1SPARE_VAMIC1_GROUND,
+ enable ? BIT_REGUCTRL1SPARE_VAMIC1_GROUND : 0);
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to update reg (%d).\n", __func__, ret);
+}
+
+/*
+ * sets the av switch direction - audio-in vs video-out
+ */
+static void ab8500_set_av_switch(struct abx500_ad *dd,
+ enum accessory_avcontrol_dir dir)
+{
+ int ret;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (%d)\n", __func__, dir);
+ if (dir == NOT_SET) {
+ ret = gpio_direction_input(dd->pdata->video_ctrl_gpio);
+ dd->gpio35_dir_set = 0;
+ ret = gpio_direction_output(dd->pdata->video_ctrl_gpio, 0);
+ } else if (!dd->gpio35_dir_set) {
+ ret = gpio_direction_output(dd->pdata->video_ctrl_gpio,
+ dir == AUDIO_IN ? 1 : 0);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Output video ctrl signal failed (%d).\n",
+ __func__, ret);
+ } else {
+ dd->gpio35_dir_set = 1;
+ dev_dbg(&dd->pdev->dev, "AV-SWITCH: %s\n",
+ dir == AUDIO_IN ? "AUDIO_IN" : "VIDEO_OUT");
+ }
+ } else {
+ gpio_set_value(dd->pdata->video_ctrl_gpio,
+ dir == AUDIO_IN ? 1 : 0);
+ }
+}
+
+static u8 acc_det_ctrl_suspend_val;
+
+static void ab8500_turn_off_accdet_comparator(struct platform_device *pdev)
+{
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+
+ /* Turn off AccDetect comparators and pull-up */
+ (void) abx500_get_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ &acc_det_ctrl_suspend_val);
+ (void) abx500_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ 0);
+
+}
+
+static void ab8500_turn_on_accdet_comparator(struct platform_device *pdev)
+{
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+
+ /* Turn on AccDetect comparators and pull-up */
+ (void) abx500_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ acc_det_ctrl_suspend_val);
+
+}
+
+static void *ab8500_accdet_abx500_gpadc_get(void)
+{
+ return ab8500_gpadc_get();
+}
+
+struct abx500_accdet_platform_data *
+ ab8500_get_platform_data(struct platform_device *pdev)
+{
+ struct ab8500_platform_data *plat;
+
+ plat = dev_get_platdata(pdev->dev.parent);
+
+ if (!plat || !plat->accdet) {
+ dev_err(&pdev->dev, "%s: Failed to get accdet plat data.\n",
+ __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return plat->accdet;
+}
+
+struct abx500_ad ab8500_accessory_det_callbacks = {
+ .irq_desc_norm = ab8500_irq_desc_norm,
+ .irq_desc_inverted = ab8500_irq_desc_inverted,
+ .no_irqs = ARRAY_SIZE(ab8500_irq_desc_norm),
+ .regu_desc = ab8500_regu_desc,
+ .no_of_regu_desc = ARRAY_SIZE(ab8500_regu_desc),
+ .config_accdetect2_hw = ab8500_config_accdetect2_hw,
+ .config_accdetect1_hw = ab8500_config_accdetect1_hw,
+ .detect_plugged_in = ab8500_detect_plugged_in,
+ .meas_voltage_stable = ab8500_meas_voltage_stable,
+ .config_hw_test_basic_carkit = ab8500_config_hw_test_basic_carkit,
+ .turn_off_accdet_comparator = ab8500_turn_off_accdet_comparator,
+ .turn_on_accdet_comparator = ab8500_turn_on_accdet_comparator,
+ .accdet_abx500_gpadc_get = ab8500_accdet_abx500_gpadc_get,
+ .config_hw_test_plug_connected = ab8500_config_hw_test_plug_connected,
+ .set_av_switch = ab8500_set_av_switch,
+ .get_platform_data = ab8500_get_platform_data,
+};
+
+MODULE_DESCRIPTION("AB8500 AV Accessory detection driver");
+MODULE_ALIAS("platform:ab8500-acc-det");
+MODULE_AUTHOR("ST-Ericsson");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 3d3288a78fd..c1bb2128217 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -6,136 +6,214 @@
*
* AB8500 Power-On Key handler
*/
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/interrupt.h>
-#include <linux/mfd/ab8500.h>
#include <linux/slab.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+
+/* Ponkey time control bits */
+#define AB5500_MCB 0x2F
+#define AB5500_PONKEY_10SEC 0x0
+#define AB5500_PONKEY_5SEC 0x1
+#define AB5500_PONKEY_DISABLE 0x2
+#define AB5500_PONKEY_TMR_MASK 0x1
+#define AB5500_PONKEY_TR_MASK 0x2
+
+static int ab5500_ponkey_hw_init(struct platform_device *);
+
+struct ab8500_ponkey_variant {
+ const char *irq_falling;
+ const char *irq_rising;
+ int (*hw_init)(struct platform_device *);
+};
+
+static const struct ab8500_ponkey_variant ab5500_onswa = {
+ .irq_falling = "ONSWAn_falling",
+ .irq_rising = "ONSWAn_rising",
+ .hw_init = ab5500_ponkey_hw_init,
+};
+
+static const struct ab8500_ponkey_variant ab8500_ponkey = {
+ .irq_falling = "ONKEY_DBF",
+ .irq_rising = "ONKEY_DBR",
+};
/**
- * struct ab8500_ponkey - ab8500 ponkey information
+ * struct ab8500_ponkey_info - ab8500 ponkey information
* @input_dev: pointer to input device
- * @ab8500: ab8500 parent
* @irq_dbf: irq number for falling transition
* @irq_dbr: irq number for rising transition
*/
-struct ab8500_ponkey {
+struct ab8500_ponkey_info {
struct input_dev *idev;
- struct ab8500 *ab8500;
int irq_dbf;
int irq_dbr;
};
+static int ab5500_ponkey_hw_init(struct platform_device *pdev)
+{
+ u8 val;
+ struct ab5500_ponkey_platform_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata) {
+ switch (pdata->shutdown_secs) {
+ case 0:
+ val = AB5500_PONKEY_DISABLE;
+ break;
+ case 5:
+ val = AB5500_PONKEY_5SEC;
+ break;
+ case 10:
+ val = AB5500_PONKEY_10SEC;
+ break;
+ default:
+ val = AB5500_PONKEY_10SEC;
+ }
+ } else {
+ val = AB5500_PONKEY_10SEC;
+ }
+ return abx500_mask_and_set(
+ &pdev->dev,
+ AB5500_BANK_STARTUP,
+ AB5500_MCB,
+ AB5500_PONKEY_TMR_MASK | AB5500_PONKEY_TR_MASK,
+ val);
+}
+
/* AB8500 gives us an interrupt when ONKEY is held */
static irqreturn_t ab8500_ponkey_handler(int irq, void *data)
{
- struct ab8500_ponkey *ponkey = data;
+ struct ab8500_ponkey_info *info = data;
- if (irq == ponkey->irq_dbf)
- input_report_key(ponkey->idev, KEY_POWER, true);
- else if (irq == ponkey->irq_dbr)
- input_report_key(ponkey->idev, KEY_POWER, false);
+ if (irq == info->irq_dbf)
+ input_report_key(info->idev, KEY_POWER, true);
+ else if (irq == info->irq_dbr)
+ input_report_key(info->idev, KEY_POWER, false);
- input_sync(ponkey->idev);
+ input_sync(info->idev);
return IRQ_HANDLED;
}
static int __devinit ab8500_ponkey_probe(struct platform_device *pdev)
{
- struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_ponkey *ponkey;
- struct input_dev *input;
- int irq_dbf, irq_dbr;
- int error;
+ const struct ab8500_ponkey_variant *variant;
+ struct ab8500_ponkey_info *info;
+ int irq_dbf, irq_dbr, ret;
+
+ variant = (const struct ab8500_ponkey_variant *)
+ pdev->id_entry->driver_data;
+
+ if (variant->hw_init) {
+ ret = variant->hw_init(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to init hw");
+ return ret;
+ }
+ }
- irq_dbf = platform_get_irq_byname(pdev, "ONKEY_DBF");
+ irq_dbf = platform_get_irq_byname(pdev, variant->irq_falling);
if (irq_dbf < 0) {
- dev_err(&pdev->dev, "No IRQ for ONKEY_DBF, error=%d\n", irq_dbf);
+ dev_err(&pdev->dev, "No IRQ for %s: %d\n",
+ variant->irq_falling, irq_dbf);
return irq_dbf;
}
- irq_dbr = platform_get_irq_byname(pdev, "ONKEY_DBR");
+ irq_dbr = platform_get_irq_byname(pdev, variant->irq_rising);
if (irq_dbr < 0) {
- dev_err(&pdev->dev, "No IRQ for ONKEY_DBR, error=%d\n", irq_dbr);
+ dev_err(&pdev->dev, "No IRQ for %s: %d\n",
+ variant->irq_rising, irq_dbr);
return irq_dbr;
}
- ponkey = kzalloc(sizeof(struct ab8500_ponkey), GFP_KERNEL);
- input = input_allocate_device();
- if (!ponkey || !input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ info = kzalloc(sizeof(struct ab8500_ponkey_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- ponkey->idev = input;
- ponkey->ab8500 = ab8500;
- ponkey->irq_dbf = irq_dbf;
- ponkey->irq_dbr = irq_dbr;
+ info->irq_dbf = irq_dbf;
+ info->irq_dbr = irq_dbr;
- input->name = "AB8500 POn(PowerOn) Key";
- input->dev.parent = &pdev->dev;
+ info->idev = input_allocate_device();
+ if (!info->idev) {
+ dev_err(&pdev->dev, "Failed to allocate input dev\n");
+ ret = -ENOMEM;
+ goto out;
+ }
- input_set_capability(input, EV_KEY, KEY_POWER);
+ info->idev->name = "AB8500 POn(PowerOn) Key";
+ info->idev->dev.parent = &pdev->dev;
+ info->idev->evbit[0] = BIT_MASK(EV_KEY);
+ info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
- error = request_any_context_irq(ponkey->irq_dbf, ab8500_ponkey_handler,
- 0, "ab8500-ponkey-dbf", ponkey);
- if (error < 0) {
- dev_err(ab8500->dev, "Failed to request dbf IRQ#%d: %d\n",
- ponkey->irq_dbf, error);
- goto err_free_mem;
+ ret = input_register_device(info->idev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register input device: %d\n", ret);
+ goto out_unfreedevice;
}
- error = request_any_context_irq(ponkey->irq_dbr, ab8500_ponkey_handler,
- 0, "ab8500-ponkey-dbr", ponkey);
- if (error < 0) {
- dev_err(ab8500->dev, "Failed to request dbr IRQ#%d: %d\n",
- ponkey->irq_dbr, error);
- goto err_free_dbf_irq;
+ ret = request_threaded_irq(info->irq_dbf, NULL, ab8500_ponkey_handler,
+ IRQF_NO_SUSPEND, "ab8500-ponkey-dbf",
+ info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
+ info->irq_dbf, ret);
+ goto out_unregisterdevice;
}
- error = input_register_device(ponkey->idev);
- if (error) {
- dev_err(ab8500->dev, "Can't register input device: %d\n", error);
- goto err_free_dbr_irq;
+ ret = request_threaded_irq(info->irq_dbr, NULL, ab8500_ponkey_handler,
+ IRQF_NO_SUSPEND, "ab8500-ponkey-dbr",
+ info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
+ info->irq_dbr, ret);
+ goto out_irq_dbf;
}
- platform_set_drvdata(pdev, ponkey);
- return 0;
+ platform_set_drvdata(pdev, info);
-err_free_dbr_irq:
- free_irq(ponkey->irq_dbr, ponkey);
-err_free_dbf_irq:
- free_irq(ponkey->irq_dbf, ponkey);
-err_free_mem:
- input_free_device(input);
- kfree(ponkey);
+ return 0;
- return error;
+out_irq_dbf:
+ free_irq(info->irq_dbf, info);
+out_unregisterdevice:
+ input_unregister_device(info->idev);
+ info->idev = NULL;
+out_unfreedevice:
+ input_free_device(info->idev);
+out:
+ kfree(info);
+ return ret;
}
static int __devexit ab8500_ponkey_remove(struct platform_device *pdev)
{
- struct ab8500_ponkey *ponkey = platform_get_drvdata(pdev);
-
- free_irq(ponkey->irq_dbf, ponkey);
- free_irq(ponkey->irq_dbr, ponkey);
- input_unregister_device(ponkey->idev);
- kfree(ponkey);
-
- platform_set_drvdata(pdev, NULL);
+ struct ab8500_ponkey_info *info = platform_get_drvdata(pdev);
+ free_irq(info->irq_dbf, info);
+ free_irq(info->irq_dbr, info);
+ input_unregister_device(info->idev);
+ kfree(info);
return 0;
}
+static struct platform_device_id ab8500_ponkey_id_table[] = {
+ { "ab5500-onswa", (kernel_ulong_t)&ab5500_onswa, },
+ { "ab8500-poweron-key", (kernel_ulong_t)&ab8500_ponkey, },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, ab8500_ponkey_id_table);
+
static struct platform_driver ab8500_ponkey_driver = {
.driver = {
.name = "ab8500-poweron-key",
.owner = THIS_MODULE,
},
+ .id_table = ab8500_ponkey_id_table,
.probe = ab8500_ponkey_probe,
.remove = __devexit_p(ab8500_ponkey_remove),
};
diff --git a/drivers/input/misc/abx500-accdet.c b/drivers/input/misc/abx500-accdet.c
new file mode 100644
index 00000000000..67403e5ae5d
--- /dev/null
+++ b/drivers/input/misc/abx500-accdet.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GPL V2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/mfd/abx500.h>
+#include <linux/interrupt.h>
+#include <sound/jack.h>
+#include <mach/abx500-accdet.h>
+#ifdef CONFIG_SND_SOC_UX500_AB8500
+#include <sound/ux500_ab8500.h>
+#else
+#define ux500_ab8500_jack_report(i)
+#endif
+
+/* Unique value used to identify Headset button input device */
+#define BTN_INPUT_UNIQUE_VALUE "AB8500HsBtn"
+#define BTN_INPUT_DEV_NAME "AB8500 Hs Button"
+
+#define DEBOUNCE_PLUG_EVENT_MS 100
+#define DEBOUNCE_PLUG_RETEST_MS 25
+#define DEBOUNCE_UNPLUG_EVENT_MS 0
+
+/* After being loaded, how fast the first check is to be made */
+#define INIT_DELAY_MS 3000
+
+/* Voltage limits (mV) for various types of AV Accessories */
+#define ACCESSORY_DET_VOL_DONTCARE -1
+#define ACCESSORY_HEADPHONE_DET_VOL_MIN 0
+#define ACCESSORY_HEADPHONE_DET_VOL_MAX 40
+#define ACCESSORY_CVIDEO_DET_VOL_MIN 41
+#define ACCESSORY_CVIDEO_DET_VOL_MAX 105
+#define ACCESSORY_CARKIT_DET_VOL_MIN 1100
+#define ACCESSORY_CARKIT_DET_VOL_MAX 1300
+#define ACCESSORY_HEADSET_DET_VOL_MIN 0
+#define ACCESSORY_HEADSET_DET_VOL_MAX 200
+#define ACCESSORY_OPENCABLE_DET_VOL_MIN 1730
+#define ACCESSORY_OPENCABLE_DET_VOL_MAX 2150
+
+
+/* Macros */
+
+/*
+ * Conviniency macros to check jack characteristics.
+ */
+#define jack_supports_mic(type) \
+ (type == JACK_TYPE_HEADSET || type == JACK_TYPE_CARKIT)
+#define jack_supports_spkr(type) \
+ ((type != JACK_TYPE_DISCONNECTED) && (type != JACK_TYPE_CONNECTED))
+#define jack_supports_buttons(type) \
+ ((type == JACK_TYPE_HEADSET) ||\
+ (type == JACK_TYPE_CARKIT) ||\
+ (type == JACK_TYPE_OPENCABLE) ||\
+ (type == JACK_TYPE_CONNECTED))
+
+
+/* Forward declarations */
+static void config_accdetect(struct abx500_ad *dd);
+static enum accessory_jack_type detect(struct abx500_ad *dd, int *required_det);
+
+/* Static data initialization */
+static struct accessory_detect_task detect_ops[] = {
+ {
+ .type = JACK_TYPE_DISCONNECTED,
+ .typename = "DISCONNECTED",
+ .meas_mv = 1,
+ .req_det_count = 1,
+ .minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .maxvol = ACCESSORY_DET_VOL_DONTCARE
+ },
+ {
+ .type = JACK_TYPE_HEADPHONE,
+ .typename = "HEADPHONE",
+ .meas_mv = 1,
+ .req_det_count = 1,
+ .minvol = ACCESSORY_HEADPHONE_DET_VOL_MIN,
+ .maxvol = ACCESSORY_HEADPHONE_DET_VOL_MAX
+ },
+ {
+ .type = JACK_TYPE_CVIDEO,
+ .typename = "CVIDEO",
+ .meas_mv = 0,
+ .req_det_count = 4,
+ .minvol = ACCESSORY_CVIDEO_DET_VOL_MIN,
+ .maxvol = ACCESSORY_CVIDEO_DET_VOL_MAX
+ },
+ {
+ .type = JACK_TYPE_OPENCABLE,
+ .typename = "OPENCABLE",
+ .meas_mv = 0,
+ .req_det_count = 4,
+ .minvol = ACCESSORY_OPENCABLE_DET_VOL_MIN,
+ .maxvol = ACCESSORY_OPENCABLE_DET_VOL_MAX
+ },
+ {
+ .type = JACK_TYPE_CARKIT,
+ .typename = "CARKIT",
+ .meas_mv = 1,
+ .req_det_count = 1,
+ .minvol = ACCESSORY_CARKIT_DET_VOL_MIN,
+ .maxvol = ACCESSORY_CARKIT_DET_VOL_MAX
+ },
+ {
+ .type = JACK_TYPE_HEADSET,
+ .typename = "HEADSET",
+ .meas_mv = 0,
+ .req_det_count = 2,
+ .minvol = ACCESSORY_HEADSET_DET_VOL_MIN,
+ .maxvol = ACCESSORY_HEADSET_DET_VOL_MAX
+ },
+ {
+ .type = JACK_TYPE_CONNECTED,
+ .typename = "CONNECTED",
+ .meas_mv = 0,
+ .req_det_count = 4,
+ .minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .maxvol = ACCESSORY_DET_VOL_DONTCARE
+ }
+};
+
+static struct accessory_irq_descriptor *abx500_accdet_irq_desc;
+
+/*
+ * textual represenation of the accessory type
+ */
+static const char *accessory_str(enum accessory_jack_type type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(detect_ops); i++)
+ if (type == detect_ops[i].type)
+ return detect_ops[i].typename;
+
+ return "UNKNOWN?";
+}
+
+/*
+ * enables regulator but only if it has not been enabled earlier.
+ */
+void accessory_regulator_enable(struct abx500_ad *dd,
+ enum accessory_regulator reg)
+{
+ int i;
+
+ for (i = 0; i < dd->no_of_regu_desc; i++) {
+ if (reg & dd->regu_desc[i].id) {
+ if (!dd->regu_desc[i].enabled) {
+ if (!regulator_enable(dd->regu_desc[i].handle))
+ dd->regu_desc[i].enabled = 1;
+ }
+ }
+ }
+}
+
+/*
+ * disables regulator but only if it has been previously enabled.
+ */
+void accessory_regulator_disable(struct abx500_ad *dd,
+ enum accessory_regulator reg)
+{
+ int i;
+
+ for (i = 0; i < dd->no_of_regu_desc; i++) {
+ if (reg & dd->regu_desc[i].id) {
+ if (dd->regu_desc[i].enabled) {
+ if (!regulator_disable(dd->regu_desc[i].handle))
+ dd->regu_desc[i].enabled = 0;
+ }
+ }
+ }
+}
+
+/*
+ * frees previously retrieved regulators.
+ */
+static void free_regulators(struct abx500_ad *dd)
+{
+ int i;
+
+ for (i = 0; i < dd->no_of_regu_desc; i++) {
+ if (dd->regu_desc[i].handle) {
+ regulator_put(dd->regu_desc[i].handle);
+ dd->regu_desc[i].handle = NULL;
+ }
+ }
+}
+
+/*
+ * gets required regulators.
+ */
+static int create_regulators(struct abx500_ad *dd)
+{
+ int i;
+ int status = 0;
+
+ for (i = 0; i < dd->no_of_regu_desc; i++) {
+ struct regulator *regu =
+ regulator_get(&dd->pdev->dev, dd->regu_desc[i].name);
+ if (IS_ERR(regu)) {
+ status = PTR_ERR(regu);
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to get supply '%s' (%d).\n",
+ __func__, dd->regu_desc[i].name, status);
+ free_regulators(dd);
+ goto out;
+ } else {
+ dd->regu_desc[i].handle = regu;
+ }
+ }
+
+out:
+ return status;
+}
+
+/*
+ * create input device for button press reporting
+ */
+static int create_btn_input_dev(struct abx500_ad *dd)
+{
+ int err;
+
+ dd->btn_input_dev = input_allocate_device();
+ if (!dd->btn_input_dev) {
+ dev_err(&dd->pdev->dev, "%s: Failed to allocate input dev.\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ input_set_capability(dd->btn_input_dev,
+ EV_KEY,
+ dd->pdata->btn_keycode);
+
+ dd->btn_input_dev->name = BTN_INPUT_DEV_NAME;
+ dd->btn_input_dev->uniq = BTN_INPUT_UNIQUE_VALUE;
+ dd->btn_input_dev->dev.parent = &dd->pdev->dev;
+
+ err = input_register_device(dd->btn_input_dev);
+ if (err) {
+ dev_err(&dd->pdev->dev,
+ "%s: register_input_device failed (%d).\n", __func__,
+ err);
+ input_free_device(dd->btn_input_dev);
+ dd->btn_input_dev = NULL;
+ goto out;
+ }
+out:
+ return err;
+}
+
+/*
+ * reports jack status
+ */
+void report_jack_status(struct abx500_ad *dd)
+{
+ int value = 0;
+
+ /* Never report possible open cable */
+ if (dd->jack_type == JACK_TYPE_OPENCABLE)
+ goto out;
+
+ /* Never report same state twice in a row */
+ if (dd->jack_type == dd->reported_jack_type)
+ goto out;
+ dd->reported_jack_type = dd->jack_type;
+
+ dev_info(&dd->pdev->dev, "Accessory: %s\n",
+ accessory_str(dd->jack_type));
+
+ if (dd->jack_type != JACK_TYPE_DISCONNECTED &&
+ dd->jack_type != JACK_TYPE_UNSPECIFIED)
+ value |= SND_JACK_MECHANICAL;
+ if (jack_supports_mic(dd->jack_type))
+ value |= SND_JACK_MICROPHONE;
+ if (jack_supports_spkr(dd->jack_type))
+ value |= (SND_JACK_HEADPHONE | SND_JACK_LINEOUT);
+ if (dd->jack_type == JACK_TYPE_CVIDEO) {
+ value |= SND_JACK_VIDEOOUT;
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, VIDEO_OUT);
+ }
+ ux500_ab8500_jack_report(value);
+
+out: return;
+}
+
+/*
+ * worker routine to handle accessory unplug case
+ */
+void unplug_irq_handler_work(struct work_struct *work)
+{
+ struct abx500_ad *dd = container_of(work,
+ struct abx500_ad, unplug_irq_work.work);
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ dd->jack_type = dd->jack_type_temp = JACK_TYPE_DISCONNECTED;
+ dd->jack_det_count = dd->total_jack_det_count = 0;
+ dd->btn_state = BUTTON_UNK;
+ config_accdetect(dd);
+
+ accessory_regulator_disable(dd, REGULATOR_ALL);
+
+ report_jack_status(dd);
+}
+
+/*
+ * interrupt service routine for accessory unplug.
+ */
+irqreturn_t unplug_irq_handler(int irq, void *_userdata)
+{
+ struct abx500_ad *dd = _userdata;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", __func__, irq);
+
+ queue_delayed_work(dd->irq_work_queue, &dd->unplug_irq_work,
+ msecs_to_jiffies(DEBOUNCE_UNPLUG_EVENT_MS));
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * interrupt service routine for accessory plug.
+ */
+irqreturn_t plug_irq_handler(int irq, void *_userdata)
+{
+ struct abx500_ad *dd = _userdata;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n",
+ __func__, irq);
+
+ switch (dd->jack_type) {
+ case JACK_TYPE_DISCONNECTED:
+ case JACK_TYPE_UNSPECIFIED:
+ queue_delayed_work(dd->irq_work_queue, &dd->detect_work,
+ msecs_to_jiffies(DEBOUNCE_PLUG_EVENT_MS));
+ break;
+
+ default:
+ dev_err(&dd->pdev->dev, "%s: Unexpected plug IRQ\n", __func__);
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * worker routine to perform detection.
+ */
+static void detect_work(struct work_struct *work)
+{
+ int req_det_count = 1;
+ enum accessory_jack_type new_type;
+ struct abx500_ad *dd = container_of(work,
+ struct abx500_ad, detect_work.work);
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, AUDIO_IN);
+
+ new_type = detect(dd, &req_det_count);
+
+ dd->total_jack_det_count++;
+ if (dd->jack_type_temp == new_type) {
+ dd->jack_det_count++;
+ } else {
+ dd->jack_det_count = 1;
+ dd->jack_type_temp = new_type;
+ }
+
+ if (dd->total_jack_det_count >= MAX_DET_COUNT) {
+ dev_err(&dd->pdev->dev,
+ "%s: MAX_DET_COUNT(=%d) reached. Bailing out.\n",
+ __func__, MAX_DET_COUNT);
+ queue_delayed_work(dd->irq_work_queue, &dd->unplug_irq_work,
+ msecs_to_jiffies(DEBOUNCE_UNPLUG_EVENT_MS));
+ } else if (dd->jack_det_count >= req_det_count) {
+ dd->total_jack_det_count = dd->jack_det_count = 0;
+ dd->jack_type = new_type;
+ dd->detect_jiffies = jiffies;
+ report_jack_status(dd);
+ config_accdetect(dd);
+ } else {
+ queue_delayed_work(dd->irq_work_queue,
+ &dd->detect_work,
+ msecs_to_jiffies(DEBOUNCE_PLUG_RETEST_MS));
+ }
+}
+
+/*
+ * reports a button event (pressed, released).
+ */
+static void report_btn_event(struct abx500_ad *dd, int down)
+{
+ input_report_key(dd->btn_input_dev, dd->pdata->btn_keycode, down);
+ input_sync(dd->btn_input_dev);
+
+ dev_dbg(&dd->pdev->dev, "HS-BTN: %s\n", down ? "PRESSED" : "RELEASED");
+}
+
+/*
+ * interrupt service routine invoked when hs button is pressed down.
+ */
+irqreturn_t button_press_irq_handler(int irq, void *_userdata)
+{
+ struct abx500_ad *dd = _userdata;
+
+ unsigned long accept_jiffies = dd->detect_jiffies +
+ msecs_to_jiffies(1000);
+ if (time_before(jiffies, accept_jiffies)) {
+ dev_dbg(&dd->pdev->dev, "%s: Skipped spurious btn press.\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", __func__, irq);
+
+ if (dd->jack_type == JACK_TYPE_OPENCABLE) {
+ /* Someting got connected to open cable -> detect.. */
+ dd->config_accdetect2_hw(dd, 0);
+ queue_delayed_work(dd->irq_work_queue, &dd->detect_work,
+ msecs_to_jiffies(DEBOUNCE_PLUG_EVENT_MS));
+ return IRQ_HANDLED;
+ }
+
+ if (dd->btn_state == BUTTON_PRESSED)
+ return IRQ_HANDLED;
+
+ if (jack_supports_buttons(dd->jack_type)) {
+ dd->btn_state = BUTTON_PRESSED;
+ report_btn_event(dd, 1);
+ } else {
+ dd->btn_state = BUTTON_UNK;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * interrupts service routine invoked when hs button is released.
+ */
+irqreturn_t button_release_irq_handler(int irq, void *_userdata)
+{
+ struct abx500_ad *dd = _userdata;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", __func__, irq);
+
+ if (dd->jack_type == JACK_TYPE_OPENCABLE)
+ return IRQ_HANDLED;
+
+ if (dd->btn_state != BUTTON_PRESSED)
+ return IRQ_HANDLED;
+
+ if (jack_supports_buttons(dd->jack_type)) {
+ report_btn_event(dd, 0);
+ dd->btn_state = BUTTON_RELEASED;
+ } else {
+ dd->btn_state = BUTTON_UNK;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * checks whether measured voltage is in given range. depending on arguments,
+ * voltage might be re-measured or previously measured voltage is reused.
+ */
+static int mic_vol_in_range(struct abx500_ad *dd,
+ int lo, int hi, int force_read)
+{
+ static int mv = MIN_MIC_POWER;
+
+ if (mv == -100 || force_read)
+ mv = dd->meas_voltage_stable(dd);
+
+ return (mv >= lo && mv <= hi) ? 1 : 0;
+}
+
+/*
+ * checks whether the currently connected HW is of given type.
+ */
+static int detect_hw(struct abx500_ad *dd,
+ struct accessory_detect_task *task)
+{
+ int status;
+
+ switch (task->type) {
+ case JACK_TYPE_DISCONNECTED:
+ dd->config_hw_test_plug_connected(dd, 1);
+ status = !dd->detect_plugged_in(dd);
+ break;
+ case JACK_TYPE_CONNECTED:
+ dd->config_hw_test_plug_connected(dd, 1);
+ status = dd->detect_plugged_in(dd);
+ break;
+ case JACK_TYPE_CARKIT:
+ dd->config_hw_test_basic_carkit(dd, 1);
+ /* flow through.. */
+ case JACK_TYPE_HEADPHONE:
+ case JACK_TYPE_CVIDEO:
+ case JACK_TYPE_HEADSET:
+ case JACK_TYPE_OPENCABLE:
+ status = mic_vol_in_range(dd,
+ task->minvol,
+ task->maxvol,
+ task->meas_mv);
+ break;
+ default:
+ status = 0;
+ }
+
+ return status;
+}
+
+/*
+ * Tries to detect the currently attached accessory
+ */
+static enum accessory_jack_type detect(struct abx500_ad *dd,
+ int *req_det_count)
+{
+ enum accessory_jack_type type = JACK_TYPE_DISCONNECTED;
+ int i;
+
+ accessory_regulator_enable(dd, REGULATOR_VAUDIO | REGULATOR_AVSWITCH);
+
+ for (i = 0; i < ARRAY_SIZE(detect_ops); ++i) {
+ if (detect_hw(dd, &detect_ops[i])) {
+ type = detect_ops[i].type;
+ *req_det_count = detect_ops[i].req_det_count;
+ break;
+ }
+ }
+
+ dd->config_hw_test_basic_carkit(dd, 0);
+ dd->config_hw_test_plug_connected(dd, 0);
+
+ if (jack_supports_buttons(type))
+ accessory_regulator_enable(dd, REGULATOR_VAMIC1);
+ else
+ accessory_regulator_disable(dd, REGULATOR_VAMIC1 |
+ REGULATOR_AVSWITCH);
+
+ accessory_regulator_disable(dd, REGULATOR_VAUDIO);
+
+ return type;
+}
+
+/*
+ * registers to specific interrupt
+ */
+static void claim_irq(struct abx500_ad *dd, enum accessory_irq irq_id)
+{
+ int ret;
+ int irq;
+
+ if (dd->pdata->is_detection_inverted)
+ abx500_accdet_irq_desc = dd->irq_desc_inverted;
+ else
+ abx500_accdet_irq_desc = dd->irq_desc_norm;
+
+ if (abx500_accdet_irq_desc[irq_id].registered)
+ return;
+
+ irq = platform_get_irq_byname(
+ dd->pdev,
+ abx500_accdet_irq_desc[irq_id].name);
+ if (irq < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to get irq %s\n", __func__,
+ abx500_accdet_irq_desc[irq_id].name);
+ return;
+ }
+
+ ret = request_threaded_irq(irq,
+ NULL,
+ abx500_accdet_irq_desc[irq_id].isr,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ abx500_accdet_irq_desc[irq_id].name,
+ dd);
+ if (ret != 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to claim irq %s (%d)\n",
+ __func__,
+ abx500_accdet_irq_desc[irq_id].name,
+ ret);
+ } else {
+ abx500_accdet_irq_desc[irq_id].registered = 1;
+ dev_dbg(&dd->pdev->dev, "%s: %s\n",
+ __func__, abx500_accdet_irq_desc[irq_id].name);
+ }
+}
+
+/*
+ * releases specific interrupt
+ */
+static void release_irq(struct abx500_ad *dd, enum accessory_irq irq_id)
+{
+ int irq;
+
+ if (dd->pdata->is_detection_inverted)
+ abx500_accdet_irq_desc = dd->irq_desc_inverted;
+ else
+ abx500_accdet_irq_desc = dd->irq_desc_norm;
+
+ if (!abx500_accdet_irq_desc[irq_id].registered)
+ return;
+
+ irq = platform_get_irq_byname(
+ dd->pdev,
+ abx500_accdet_irq_desc[irq_id].name);
+ if (irq < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to get irq %s (%d)\n",
+ __func__,
+ abx500_accdet_irq_desc[irq_id].name, irq);
+ } else {
+ free_irq(irq, dd);
+ abx500_accdet_irq_desc[irq_id].registered = 0;
+ dev_dbg(&dd->pdev->dev, "%s: %s\n",
+ __func__, abx500_accdet_irq_desc[irq_id].name);
+ }
+}
+
+/*
+ * configures interrupts + detection hardware to meet the requirements
+ * set by currently attached accessory type.
+ */
+static void config_accdetect(struct abx500_ad *dd)
+{
+ switch (dd->jack_type) {
+ case JACK_TYPE_UNSPECIFIED:
+ dd->config_accdetect1_hw(dd, 1);
+ dd->config_accdetect2_hw(dd, 0);
+
+ release_irq(dd, PLUG_IRQ);
+ release_irq(dd, UNPLUG_IRQ);
+ release_irq(dd, BUTTON_PRESS_IRQ);
+ release_irq(dd, BUTTON_RELEASE_IRQ);
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, NOT_SET);
+ break;
+
+ case JACK_TYPE_DISCONNECTED:
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, NOT_SET);
+ case JACK_TYPE_HEADPHONE:
+ case JACK_TYPE_CVIDEO:
+ dd->config_accdetect1_hw(dd, 1);
+ dd->config_accdetect2_hw(dd, 0);
+
+ claim_irq(dd, PLUG_IRQ);
+ claim_irq(dd, UNPLUG_IRQ);
+ release_irq(dd, BUTTON_PRESS_IRQ);
+ release_irq(dd, BUTTON_RELEASE_IRQ);
+ break;
+
+ case JACK_TYPE_CONNECTED:
+ case JACK_TYPE_HEADSET:
+ case JACK_TYPE_CARKIT:
+ case JACK_TYPE_OPENCABLE:
+ dd->config_accdetect1_hw(dd, 1);
+ dd->config_accdetect2_hw(dd, 1);
+
+ release_irq(dd, PLUG_IRQ);
+ claim_irq(dd, UNPLUG_IRQ);
+ claim_irq(dd, BUTTON_PRESS_IRQ);
+ claim_irq(dd, BUTTON_RELEASE_IRQ);
+ break;
+
+ default:
+ dev_err(&dd->pdev->dev, "%s: Unknown type: %d\n",
+ __func__, dd->jack_type);
+ }
+}
+
+/*
+ * Deferred initialization of the work.
+ */
+static void init_work(struct work_struct *work)
+{
+ struct abx500_ad *dd = container_of(work,
+ struct abx500_ad, init_work.work);
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ dd->jack_type = dd->reported_jack_type = JACK_TYPE_UNSPECIFIED;
+ config_accdetect(dd);
+ queue_delayed_work(dd->irq_work_queue,
+ &dd->detect_work,
+ msecs_to_jiffies(0));
+}
+
+/*
+ * performs platform device initialization
+ */
+static int abx500_accessory_init(struct platform_device *pdev)
+{
+ int ret;
+ struct abx500_ad *dd = (struct abx500_ad *)pdev->id_entry->driver_data;
+
+ dev_dbg(&pdev->dev, "Enter: %s\n", __func__);
+
+ dd->pdev = pdev;
+ dd->pdata = dd->get_platform_data(pdev);
+ if (IS_ERR(dd->pdata))
+ return PTR_ERR(dd->pdata);
+
+ if (dd->pdata->video_ctrl_gpio) {
+ ret = gpio_is_valid(dd->pdata->video_ctrl_gpio);
+ if (!ret) {
+ dev_err(&pdev->dev,
+ "%s: Video ctrl GPIO invalid (%d).\n", __func__,
+ dd->pdata->video_ctrl_gpio);
+
+ return ret;
+ }
+ ret = gpio_request(dd->pdata->video_ctrl_gpio,
+ "Video Control");
+ if (ret) {
+ dev_err(&pdev->dev, "%s: Get video ctrl GPIO"
+ "failed.\n", __func__);
+ return ret;
+ }
+ }
+
+ (ret = create_btn_input_dev(dd));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s: create_button_input_dev failed.\n",
+ __func__);
+ goto fail_no_btn_input_dev;
+ }
+
+ ret = create_regulators(dd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s: failed to create regulators\n",
+ __func__);
+ goto fail_no_regulators;
+ }
+ dd->btn_state = BUTTON_UNK;
+
+ dd->irq_work_queue = create_singlethread_workqueue("abx500_accdet_wq");
+ if (!dd->irq_work_queue) {
+ dev_err(&pdev->dev, "%s: Failed to create wq\n", __func__);
+ ret = -ENOMEM;
+ goto fail_no_mem_for_wq;
+ }
+
+ dd->gpadc = dd->accdet_abx500_gpadc_get();
+
+ INIT_DELAYED_WORK(&dd->detect_work, detect_work);
+ INIT_DELAYED_WORK(&dd->unplug_irq_work, unplug_irq_handler_work);
+ INIT_DELAYED_WORK(&dd->init_work, init_work);
+
+ /* Deferred init/detect since no use for the info early in boot */
+ queue_delayed_work(dd->irq_work_queue,
+ &dd->init_work,
+ msecs_to_jiffies(INIT_DELAY_MS));
+
+ platform_set_drvdata(pdev, dd);
+
+ return 0;
+fail_no_mem_for_wq:
+ free_regulators(dd);
+fail_no_regulators:
+ input_unregister_device(dd->btn_input_dev);
+fail_no_btn_input_dev:
+ if (dd->pdata->video_ctrl_gpio)
+ gpio_free(dd->pdata->video_ctrl_gpio);
+ return ret;
+}
+
+/*
+ * Performs platform device cleanup
+ */
+static void abx500_accessory_cleanup(struct abx500_ad *dd)
+{
+ dev_dbg(&dd->pdev->dev, "Enter: %s\n", __func__);
+
+ dd->jack_type = JACK_TYPE_UNSPECIFIED;
+ config_accdetect(dd);
+
+ gpio_free(dd->pdata->video_ctrl_gpio);
+ input_unregister_device(dd->btn_input_dev);
+ free_regulators(dd);
+
+ cancel_delayed_work(&dd->detect_work);
+ cancel_delayed_work(&dd->unplug_irq_work);
+ cancel_delayed_work(&dd->init_work);
+ flush_workqueue(dd->irq_work_queue);
+ destroy_workqueue(dd->irq_work_queue);
+
+ kfree(dd);
+}
+
+static int __devinit abx500_acc_detect_probe(struct platform_device *pdev)
+{
+
+ return abx500_accessory_init(pdev);
+}
+
+static int __devexit abx500_acc_detect_remove(struct platform_device *pdev)
+{
+ abx500_accessory_cleanup(platform_get_drvdata(pdev));
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#if defined(CONFIG_PM)
+static int abx500_acc_detect_suspend(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+ int irq_id, irq;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ cancel_delayed_work_sync(&dd->unplug_irq_work);
+ cancel_delayed_work_sync(&dd->detect_work);
+ cancel_delayed_work_sync(&dd->init_work);
+
+ if (dd->pdata->is_detection_inverted)
+ abx500_accdet_irq_desc = dd->irq_desc_inverted;
+ else
+ abx500_accdet_irq_desc = dd->irq_desc_norm;
+
+ for (irq_id = 0; irq_id < dd->no_irqs; irq_id++) {
+ if (abx500_accdet_irq_desc[irq_id].registered == 1) {
+ irq = platform_get_irq_byname(
+ dd->pdev,
+ abx500_accdet_irq_desc[irq_id].name);
+
+ disable_irq(irq);
+ }
+ }
+
+ dd->turn_off_accdet_comparator(pdev);
+
+ if (dd->jack_type == JACK_TYPE_HEADSET)
+ accessory_regulator_disable(dd, REGULATOR_VAMIC1);
+
+ return 0;
+}
+
+static int abx500_acc_detect_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+ int irq_id, irq;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ if (dd->jack_type == JACK_TYPE_HEADSET)
+ accessory_regulator_enable(dd, REGULATOR_VAMIC1);
+
+ dd->turn_on_accdet_comparator(pdev);
+
+ if (dd->pdata->is_detection_inverted)
+ abx500_accdet_irq_desc = dd->irq_desc_inverted;
+ else
+ abx500_accdet_irq_desc = dd->irq_desc_norm;
+
+ for (irq_id = 0; irq_id < dd->no_irqs; irq_id++) {
+ if (abx500_accdet_irq_desc[irq_id].registered == 1) {
+ irq = platform_get_irq_byname(
+ dd->pdev,
+ abx500_accdet_irq_desc[irq_id].name);
+
+ enable_irq(irq);
+
+ }
+ }
+
+ /* After resume, reinitialize */
+ dd->gpio35_dir_set = dd->accdet1_th_set = dd->accdet2_th_set = 0;
+ queue_delayed_work(dd->irq_work_queue, &dd->init_work, 0);
+
+ return 0;
+}
+#else
+#define abx500_acc_detect_suspend NULL
+#define abx500_acc_detect_resume NULL
+#endif
+
+static struct platform_device_id abx500_accdet_ids[] = {
+#ifdef CONFIG_INPUT_AB5500_ACCDET
+ { "ab5500-acc-det", (kernel_ulong_t)&ab5500_accessory_det_callbacks, },
+#endif
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+ { "ab8500-acc-det", (kernel_ulong_t)&ab8500_accessory_det_callbacks, },
+#endif
+ { },
+};
+
+static const struct dev_pm_ops abx_ops = {
+ .suspend = abx500_acc_detect_suspend,
+ .resume = abx500_acc_detect_resume,
+};
+
+static struct platform_driver abx500_acc_detect_platform_driver = {
+ .driver = {
+ .name = "abx500-acc-det",
+ .owner = THIS_MODULE,
+ .pm = &abx_ops,
+ },
+ .probe = abx500_acc_detect_probe,
+ .id_table = abx500_accdet_ids,
+ .remove = __devexit_p(abx500_acc_detect_remove),
+};
+
+static int __init abx500_acc_detect_init(void)
+{
+ return platform_driver_register(&abx500_acc_detect_platform_driver);
+}
+
+static void __exit abx500_acc_detect_exit(void)
+{
+ platform_driver_unregister(&abx500_acc_detect_platform_driver);
+}
+
+module_init(abx500_acc_detect_init);
+module_exit(abx500_acc_detect_exit);
+
+MODULE_DESCRIPTION("ABx500 AV Accessory detection driver");
+MODULE_ALIAS("platform:abx500-acc-det");
+MODULE_AUTHOR("ST-Ericsson");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/ste_ff_vibra.c b/drivers/input/misc/ste_ff_vibra.c
new file mode 100644
index 00000000000..9038e6be046
--- /dev/null
+++ b/drivers/input/misc/ste_ff_vibra.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com>
+ * for ST-Ericsson
+ * License Terms: GNU General Public License v2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <mach/ste_audio_io_vibrator.h>
+
+#define FF_VIBRA_DOWN 0x0000 /* 0 degrees */
+#define FF_VIBRA_LEFT 0x4000 /* 90 degrees */
+#define FF_VIBRA_UP 0x8000 /* 180 degrees */
+#define FF_VIBRA_RIGHT 0xC000 /* 270 degrees */
+
+/**
+ * struct vibra_info - Vibrator information structure
+ * @idev: Pointer to input device structure
+ * @vibra_workqueue: Pointer to vibrator workqueue structure
+ * @vibra_work: Vibrator work
+ * @direction: Vibration direction
+ * @speed: Vibration speed
+ *
+ * Structure vibra_info holds vibrator informations
+ **/
+struct vibra_info {
+ struct input_dev *idev;
+ struct workqueue_struct *vibra_workqueue;
+ struct work_struct vibra_work;
+ int direction;
+ unsigned char speed;
+};
+
+/**
+ * vibra_play_work() - Vibrator work, sets speed and direction
+ * @work: Pointer to work structure
+ *
+ * This function is called from workqueue, turns on/off vibrator
+ **/
+static void vibra_play_work(struct work_struct *work)
+{
+ struct vibra_info *vinfo = container_of(work,
+ struct vibra_info, vibra_work);
+ struct ste_vibra_speed left_speed = {
+ .positive = 0,
+ .negative = 0,
+ };
+ struct ste_vibra_speed right_speed = {
+ .positive = 0,
+ .negative = 0,
+ };
+
+ /* Divide by 2 because supported range by PWM is 0-100 */
+ vinfo->speed /= 2;
+
+ if ((vinfo->direction > FF_VIBRA_DOWN) &&
+ (vinfo->direction < FF_VIBRA_UP)) {
+ /* 1 - 179 degrees, turn on left vibrator */
+ left_speed.positive = vinfo->speed;
+ } else if (vinfo->direction > FF_VIBRA_UP) {
+ /* more than 180 degrees, turn on right vibrator */
+ right_speed.positive = vinfo->speed;
+ } else {
+ /* 0 (down) or 180 (up) degrees, turn on 2 vibrators */
+ left_speed.positive = vinfo->speed;
+ right_speed.positive = vinfo->speed;
+ }
+
+ ste_audioio_vibrator_pwm_control(STE_AUDIOIO_CLIENT_FF_VIBRA,
+ left_speed, right_speed);
+}
+
+/**
+ * vibra_play() - Memless device control function
+ * @idev: Pointer to input device structure
+ * @data: Pointer to private data (not used)
+ * @effect: Pointer to force feedback effect structure
+ *
+ * This function controls memless device
+ *
+ * Returns:
+ * 0 - success
+ **/
+static int vibra_play(struct input_dev *idev, void *data,
+ struct ff_effect *effect)
+{
+ struct vibra_info *vinfo = input_get_drvdata(idev);
+
+ vinfo->direction = effect->direction;
+ vinfo->speed = effect->u.rumble.strong_magnitude >> 8;
+ if (!vinfo->speed)
+ /* Shift weak magnitude to make it feelable on vibrator */
+ vinfo->speed = effect->u.rumble.weak_magnitude >> 9;
+
+ queue_work(vinfo->vibra_workqueue, &vinfo->vibra_work);
+
+ return 0;
+}
+
+/**
+ * ste_ff_vibra_open() - Input device open function
+ * @idev: Pointer to input device structure
+ *
+ * This function is called on opening input device
+ *
+ * Returns:
+ * -ENOMEM - no memory left
+ * 0 - success
+ **/
+static int ste_ff_vibra_open(struct input_dev *idev)
+{
+ struct vibra_info *vinfo = input_get_drvdata(idev);
+
+ vinfo->vibra_workqueue =
+ create_singlethread_workqueue("ste_ff-ff-vibra");
+ if (!vinfo->vibra_workqueue) {
+ dev_err(&idev->dev, "couldn't create vibra workqueue\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * ste_ff_vibra_close() - Input device close function
+ * @idev: Pointer to input device structure
+ *
+ * This function is called on closing input device
+ **/
+static void ste_ff_vibra_close(struct input_dev *idev)
+{
+ struct vibra_info *vinfo = input_get_drvdata(idev);
+
+ cancel_work_sync(&vinfo->vibra_work);
+ INIT_WORK(&vinfo->vibra_work, vibra_play_work);
+ destroy_workqueue(vinfo->vibra_workqueue);
+ vinfo->vibra_workqueue = NULL;
+}
+
+static int __devinit ste_ff_vibra_probe(struct platform_device *pdev)
+{
+ struct vibra_info *vinfo;
+ int ret;
+
+ vinfo = kmalloc(sizeof *vinfo, GFP_KERNEL);
+ if (!vinfo) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ vinfo->idev = input_allocate_device();
+ if (!vinfo->idev) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ ret = -ENOMEM;
+ goto exit_vinfo_free;
+ }
+
+ vinfo->idev->name = "ste-ff-vibra";
+ vinfo->idev->dev.parent = pdev->dev.parent;
+ vinfo->idev->open = ste_ff_vibra_open;
+ vinfo->idev->close = ste_ff_vibra_close;
+ INIT_WORK(&vinfo->vibra_work, vibra_play_work);
+ __set_bit(FF_RUMBLE, vinfo->idev->ffbit);
+
+ ret = input_ff_create_memless(vinfo->idev, NULL, vibra_play);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create memless device\n");
+ goto exit_idev_free;
+ }
+
+ ret = input_register_device(vinfo->idev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ goto exit_destroy_memless;
+ }
+
+ input_set_drvdata(vinfo->idev, vinfo);
+ platform_set_drvdata(pdev, vinfo);
+ return 0;
+
+exit_destroy_memless:
+ input_ff_destroy(vinfo->idev);
+exit_idev_free:
+ input_free_device(vinfo->idev);
+exit_vinfo_free:
+ kfree(vinfo);
+ return ret;
+}
+
+static int __devexit ste_ff_vibra_remove(struct platform_device *pdev)
+{
+ struct vibra_info *vinfo = platform_get_drvdata(pdev);
+
+ /*
+ * Function device_release() will call input_dev_release()
+ * which will free ff and input device. No need to call
+ * input_ff_destroy() and input_free_device() explicitly.
+ */
+ input_unregister_device(vinfo->idev);
+ kfree(vinfo);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ste_ff_vibra_driver = {
+ .driver = {
+ .name = "ste_ff_vibra",
+ .owner = THIS_MODULE,
+ },
+ .probe = ste_ff_vibra_probe,
+ .remove = __devexit_p(ste_ff_vibra_remove)
+};
+
+static int __init ste_ff_vibra_init(void)
+{
+ return platform_driver_register(&ste_ff_vibra_driver);
+}
+module_init(ste_ff_vibra_init);
+
+static void __exit ste_ff_vibra_exit(void)
+{
+ platform_driver_unregister(&ste_ff_vibra_driver);
+}
+module_exit(ste_ff_vibra_exit);
+
+MODULE_AUTHOR("Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com>");
+MODULE_DESCRIPTION("STE Force Feedback Vibrator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 3488ffe1fa0..085b45d5cb0 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -122,6 +122,23 @@ config TOUCHSCREEN_BU21013
To compile this driver as a module, choose M here: the
module will be called bu21013_ts.
+config TOUCHSCREEN_CYTTSP_CORE
+ tristate "Cypress TTSP touchscreen core"
+ help
+ Always activated for Cypress TTSP touchscreen.
+
+config TOUCHSCREEN_CYTTSP_SPI
+ tristate "Cypress TTSP spi touchscreen"
+ depends on SPI_MASTER && TOUCHSCREEN_CYTTSP_CORE
+ help
+ Say Y here if you have a Cypress TTSP touchscreen
+ connected to your with an SPI interface.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cyttsp-spi.
+
config TOUCHSCREEN_CY8CTMG110
tristate "cy8ctmg110 touchscreen"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index f957676035a..3eb61596c0a 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -16,7 +16,9 @@ obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
-obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
+obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o
+obj-$(CONFIG_TOUCHSCREEN_CYTTSP_SPI) += cyttsp_spi.o
+bj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o
obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index 902c7214e88..857a21db0eb 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) ST-Ericsson SA 2009
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
* License terms:GNU General Public License (GPL) version 2
*/
@@ -12,13 +12,14 @@
#include <linux/input.h>
#include <linux/input/bu21013.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#define PEN_DOWN_INTR 0
-#define MAX_FINGERS 2
#define RESET_DELAY 30
-#define PENUP_TIMEOUT (10)
+#define PENUP_TIMEOUT 2 /* 2msecs */
+#define SCALE_FACTOR 1000
#define DELTA_MIN 16
#define MASK_BITS 0x03
#define SHIFT_8 8
@@ -131,7 +132,7 @@
#define BU21013_NUMBER_OF_X_SENSORS (6)
#define BU21013_NUMBER_OF_Y_SENSORS (11)
-#define DRIVER_TP "bu21013_tp"
+#define DRIVER_TP "bu21013_ts"
/**
* struct bu21013_ts_data - touch panel data structure
@@ -142,6 +143,12 @@
* @in_dev: pointer to the input device structure
* @intr_pin: interrupt pin value
* @regulator: pointer to the Regulator used for touch screen
+ * @enable: variable to indicate the enable/disable of touch screen
+ * @ext_clk_enable: true if running on ext clk
+ * @ext_clk_state: Saved state for suspend/resume of ext clk
+ * @factor_x: x scale factor
+ * @factor_y: y scale factor
+ * @tpclk: pointer to clock structure
*
* Touch panel device data structure
*/
@@ -149,12 +156,226 @@ struct bu21013_ts_data {
struct i2c_client *client;
wait_queue_head_t wait;
bool touch_stopped;
- const struct bu21013_platform_device *chip;
+ struct bu21013_platform_device *chip;
struct input_dev *in_dev;
unsigned int intr_pin;
struct regulator *regulator;
+ bool enable;
+ bool ext_clk_enable;
+ bool ext_clk_state;
+ unsigned int factor_x;
+ unsigned int factor_y;
+ struct clk *tpclk;
};
+static int bu21013_init_chip(struct bu21013_ts_data *data, bool on_ext_clk);
+
+/**
+ * bu21013_ext_clk() - enable/disable the external clock
+ * @pdata: touch screen data
+ * @enable: enable external clock
+ * @reconfig: reconfigure chip upon external clock off.
+ *
+ * This function used to enable or disable the external clock and possible
+ * reconfigure hw.
+ */
+static int bu21013_ext_clk(struct bu21013_ts_data *pdata, bool enable,
+ bool reconfig)
+{
+ int retval = 0;
+
+ if (!pdata->tpclk || pdata->ext_clk_enable == enable)
+ return retval;
+
+ if (enable) {
+ pdata->ext_clk_enable = true;
+ clk_enable(pdata->tpclk);
+ retval = bu21013_init_chip(pdata, true);
+ } else {
+ pdata->ext_clk_enable = false;
+ if (reconfig)
+ retval = bu21013_init_chip(pdata, false);
+ clk_disable(pdata->tpclk);
+ }
+ return retval;
+}
+
+/**
+ * bu21013_enable() - enable the touch driver event
+ * @pdata: touch screen data
+ *
+ * This function used to enable the driver and returns integer
+ */
+static int bu21013_enable(struct bu21013_ts_data *pdata)
+{
+ int retval;
+
+ if (pdata->regulator)
+ regulator_enable(pdata->regulator);
+
+ if (pdata->chip->cs_en) {
+ retval = pdata->chip->cs_en(pdata->chip->cs_pin);
+ if (retval < 0) {
+ dev_err(&pdata->client->dev, "enable hw failed\n");
+ return retval;
+ }
+ }
+
+ if (pdata->ext_clk_state)
+ retval = bu21013_ext_clk(pdata, true, true);
+ else
+ retval = bu21013_init_chip(pdata, false);
+
+ if (retval < 0) {
+ dev_err(&pdata->client->dev, "enable hw failed\n");
+ return retval;
+ }
+ pdata->touch_stopped = false;
+ enable_irq(pdata->chip->irq);
+
+ return 0;
+}
+
+/**
+ * bu21013_disable() - disable the touch driver event
+ * @pdata: touch screen data
+ *
+ * This function used to disable the driver and returns integer
+ */
+static void bu21013_disable(struct bu21013_ts_data *pdata)
+{
+ pdata->touch_stopped = true;
+
+ pdata->ext_clk_state = pdata->ext_clk_enable;
+ (void) bu21013_ext_clk(pdata, false, false);
+
+ disable_irq(pdata->chip->irq);
+ if (pdata->chip->cs_dis)
+ pdata->chip->cs_dis(pdata->chip->cs_pin);
+ if (pdata->regulator)
+ regulator_disable(pdata->regulator);
+}
+
+/**
+ * bu21013_show_attr_enable() - show the touch screen controller status
+ * @dev: pointer to device structure
+ * @attr: pointer to device attribute
+ * @buf: parameter buffer
+ *
+ * This funtion is used to show whether the touch screen is enabled or
+ * disabled
+ */
+static ssize_t bu21013_show_attr_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bu21013_ts_data *pdata = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", pdata->enable);
+}
+
+/**
+ * bu21013_store_attr_enable() - Enable/Disable the touchscreen.
+ * @dev: pointer to device structure
+ * @attr: pointer to device attribute
+ * @buf: parameter buffer
+ * @count: number of parameters
+ *
+ * This funtion is used to enable or disable the touch screen controller.
+ */
+static ssize_t bu21013_store_attr_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ unsigned long val;
+
+ struct bu21013_ts_data *pdata = dev_get_drvdata(dev);
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (pdata->enable != val) {
+ pdata->enable = val ? true : false;
+ if (pdata->enable) {
+ ret = bu21013_enable(pdata);
+ if (ret < 0)
+ return ret;
+ } else
+ bu21013_disable(pdata);
+ }
+ return count;
+}
+
+/**
+ * bu21013_show_attr_extclk() - shows the external clock status
+ * @dev: pointer to device structure
+ * @attr: pointer to device attribute
+ * @buf: parameter buffer
+ *
+ * This funtion is used to show whether the external clock for the touch
+ * screen is enabled or disabled.
+ */
+static ssize_t bu21013_show_attr_extclk(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bu21013_ts_data *pdata = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", pdata->ext_clk_enable);
+}
+
+/**
+ * bu21013_store_attr_extclk() - Enable/Disable the external clock
+ * for the tocuh screen controller.
+ * @dev: pointer to device structure
+ * @attr: pointer to device attribute
+ * @buf: parameter buffer
+ * @count: number of parameters
+ *
+ * This funtion is used enabled or disable the external clock for the touch
+ * screen controller.
+ */
+static ssize_t bu21013_store_attr_extclk(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval = 0;
+ struct bu21013_ts_data *pdata = dev_get_drvdata(dev);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (pdata->chip->has_ext_clk) {
+ if (pdata->enable)
+ retval = bu21013_ext_clk(pdata, val, true);
+ else
+ pdata->ext_clk_state = val;
+ if (retval < 0)
+ return retval;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
+ bu21013_show_attr_enable, bu21013_store_attr_enable);
+
+static DEVICE_ATTR(ext_clk, S_IWUSR | S_IRUGO,
+ bu21013_show_attr_extclk, bu21013_store_attr_extclk);
+
+
+static struct attribute *bu21013_attribute[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_ext_clk.attr,
+ NULL,
+};
+
+static struct attribute_group bu21013_attr_group = {
+ .attrs = bu21013_attribute,
+};
+
+
/**
* bu21013_read_block_data(): read the touch co-ordinates
* @data: bu21013_ts_data structure pointer
@@ -204,12 +425,14 @@ static int bu21013_do_touch_report(struct bu21013_ts_data *data)
if (!has_x_sensors || !has_y_sensors)
return 0;
- for (i = 0; i < MAX_FINGERS; i++) {
+ for (i = 0; i < 2; i++) {
const u8 *p = &buf[4 * i + 3];
unsigned int x = p[0] << SHIFT_2 | (p[1] & MASK_BITS);
unsigned int y = p[2] << SHIFT_2 | (p[3] & MASK_BITS);
if (x == 0 || y == 0)
continue;
+ x = x * data->factor_x / SCALE_FACTOR;
+ y = y * data->factor_y / SCALE_FACTOR;
pos_x[finger_down_count] = x;
pos_y[finger_down_count] = y;
finger_down_count++;
@@ -217,21 +440,21 @@ static int bu21013_do_touch_report(struct bu21013_ts_data *data)
if (finger_down_count) {
if (finger_down_count == 2 &&
- (abs(pos_x[0] - pos_x[1]) < DELTA_MIN ||
- abs(pos_y[0] - pos_y[1]) < DELTA_MIN)) {
+ (abs(pos_x[0] - pos_x[1]) < DELTA_MIN ||
+ abs(pos_y[0] - pos_y[1]) < DELTA_MIN))
return 0;
- }
for (i = 0; i < finger_down_count; i++) {
- if (data->chip->x_flip)
- pos_x[i] = data->chip->touch_x_max - pos_x[i];
- if (data->chip->y_flip)
- pos_y[i] = data->chip->touch_y_max - pos_y[i];
-
- input_report_abs(data->in_dev,
- ABS_MT_POSITION_X, pos_x[i]);
- input_report_abs(data->in_dev,
- ABS_MT_POSITION_Y, pos_y[i]);
+ if (data->chip->portrait && data->chip->x_flip)
+ pos_x[i] = data->chip->x_max_res - pos_x[i];
+ if (data->chip->portrait && data->chip->y_flip)
+ pos_y[i] = data->chip->y_max_res - pos_y[i];
+ input_report_abs(data->in_dev, ABS_MT_TOUCH_MAJOR,
+ max(pos_x[i], pos_y[i]));
+ input_report_abs(data->in_dev, ABS_MT_POSITION_X,
+ pos_x[i]);
+ input_report_abs(data->in_dev, ABS_MT_POSITION_Y,
+ pos_y[i]);
input_mt_sync(data->in_dev);
}
} else
@@ -261,24 +484,23 @@ static irqreturn_t bu21013_gpio_irq(int irq, void *device_data)
dev_err(&i2c->dev, "bu21013_do_touch_report failed\n");
return IRQ_NONE;
}
-
data->intr_pin = data->chip->irq_read_val();
if (data->intr_pin == PEN_DOWN_INTR)
wait_event_timeout(data->wait, data->touch_stopped,
- msecs_to_jiffies(2));
+ msecs_to_jiffies(PENUP_TIMEOUT));
} while (!data->intr_pin && !data->touch_stopped);
-
return IRQ_HANDLED;
}
/**
* bu21013_init_chip() - power on sequence for the bu21013 controller
* @data: device structure pointer
+ * @on_ext_clk: Run on external clock
*
* This function is used to power on
* the bu21013 controller and returns integer.
*/
-static int bu21013_init_chip(struct bu21013_ts_data *data)
+static int bu21013_init_chip(struct bu21013_ts_data *data, bool on_ext_clk)
{
int retval;
struct i2c_client *i2c = data->client;
@@ -297,28 +519,24 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_SENSOR_0_7 reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_8_15_REG,
BU21013_SENSORS_EN_8_15);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_SENSOR_8_15 reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_16_23_REG,
BU21013_SENSORS_EN_16_23);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_SENSOR_16_23 reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_POS_MODE1_REG,
(BU21013_POS_MODE1_0 | BU21013_POS_MODE1_1));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_POS_MODE1 reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_POS_MODE2_REG,
(BU21013_POS_MODE2_ZERO | BU21013_POS_MODE2_AVG1 |
BU21013_POS_MODE2_AVG2 | BU21013_POS_MODE2_EN_RAW |
@@ -327,8 +545,7 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_POS_MODE2 reg write failed\n");
return retval;
}
-
- if (data->chip->ext_clk)
+ if (on_ext_clk)
retval = i2c_smbus_write_byte_data(i2c, BU21013_CLK_MODE_REG,
(BU21013_CLK_MODE_EXT | BU21013_CLK_MODE_CALIB));
else
@@ -338,21 +555,18 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_CLK_MODE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_IDLE_REG,
(BU21013_IDLET_0 | BU21013_IDLE_INTERMIT_EN));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_IDLE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_INT_MODE_REG,
BU21013_INT_MODE_LEVEL);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_INT_MODE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_FILTER_REG,
(BU21013_DELTA_0_6 |
BU21013_FILTER_EN));
@@ -367,14 +581,12 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_TH_ON reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_TH_OFF_REG,
BU21013_TH_OFF_4 | BU21013_TH_OFF_3);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_TH_OFF reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_GAIN_REG,
(BU21013_GAIN_0 | BU21013_GAIN_1));
if (retval < 0) {
@@ -388,7 +600,6 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_OFFSET_MODE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_XY_EDGE_REG,
(BU21013_X_EDGE_0 | BU21013_X_EDGE_2 |
BU21013_Y_EDGE_1 | BU21013_Y_EDGE_3));
@@ -396,7 +607,6 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_XY_EDGE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_DONE_REG,
BU21013_DONE);
if (retval < 0) {
@@ -404,25 +614,15 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
return retval;
}
- return 0;
+ data->factor_x = (data->chip->x_max_res * SCALE_FACTOR /
+ data->chip->touch_x_max);
+ data->factor_y = (data->chip->y_max_res * SCALE_FACTOR /
+ data->chip->touch_y_max);
+ return retval;
}
/**
- * bu21013_free_irq() - frees IRQ registered for touchscreen
- * @bu21013_data: device structure pointer
- *
- * This function signals interrupt thread to stop processing and
- * frees interrupt.
- */
-static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
-{
- bu21013_data->touch_stopped = true;
- wake_up(&bu21013_data->wait);
- free_irq(bu21013_data->chip->irq, bu21013_data);
-}
-
-/**
- * bu21013_probe() - initializes the i2c-client touchscreen driver
+ * bu21013_probe() - initialzes the i2c-client touchscreen driver
* @client: i2c client structure pointer
* @id: i2c device id pointer
*
@@ -432,11 +632,11 @@ static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
static int __devinit bu21013_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ int retval;
struct bu21013_ts_data *bu21013_data;
struct input_dev *in_dev;
- const struct bu21013_platform_device *pdata =
+ struct bu21013_platform_device *pdata =
client->dev.platform_data;
- int error;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -446,53 +646,72 @@ static int __devinit bu21013_probe(struct i2c_client *client,
if (!pdata) {
dev_err(&client->dev, "platform data not defined\n");
- return -EINVAL;
+ retval = -EINVAL;
+ return retval;
}
bu21013_data = kzalloc(sizeof(struct bu21013_ts_data), GFP_KERNEL);
- in_dev = input_allocate_device();
- if (!bu21013_data || !in_dev) {
+ if (!bu21013_data) {
dev_err(&client->dev, "device memory alloc failed\n");
- error = -ENOMEM;
- goto err_free_mem;
+ retval = -ENOMEM;
+ return retval;
+ }
+ /* allocate input device */
+ in_dev = input_allocate_device();
+ if (!in_dev) {
+ dev_err(&client->dev, "input device memory alloc failed\n");
+ retval = -ENOMEM;
+ goto err_alloc;
}
bu21013_data->in_dev = in_dev;
bu21013_data->chip = pdata;
bu21013_data->client = client;
- bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH");
+ bu21013_data->regulator = regulator_get(&client->dev, "avdd");
if (IS_ERR(bu21013_data->regulator)) {
- dev_err(&client->dev, "regulator_get failed\n");
- error = PTR_ERR(bu21013_data->regulator);
- goto err_free_mem;
+ dev_warn(&client->dev, "regulator_get failed\n");
+ bu21013_data->regulator = NULL;
}
-
- error = regulator_enable(bu21013_data->regulator);
- if (error < 0) {
- dev_err(&client->dev, "regulator enable failed\n");
- goto err_put_regulator;
- }
-
- bu21013_data->touch_stopped = false;
- init_waitqueue_head(&bu21013_data->wait);
+ if (bu21013_data->regulator)
+ regulator_enable(bu21013_data->regulator);
/* configure the gpio pins */
if (pdata->cs_en) {
- error = pdata->cs_en(pdata->cs_pin);
- if (error < 0) {
+ retval = pdata->cs_en(pdata->cs_pin);
+ if (retval < 0) {
dev_err(&client->dev, "chip init failed\n");
- goto err_disable_regulator;
+ goto err_init_cs;
+ }
+ }
+
+ if (pdata->has_ext_clk) {
+ bu21013_data->tpclk = clk_get(&client->dev, NULL);
+ if (IS_ERR(bu21013_data->tpclk)) {
+ dev_warn(&client->dev, "get extern clock failed\n");
+ bu21013_data->tpclk = NULL;
+ }
+ }
+
+ if (pdata->enable_ext_clk && bu21013_data->tpclk) {
+ retval = clk_enable(bu21013_data->tpclk);
+ if (retval < 0) {
+ dev_err(&client->dev, "clock enable failed\n");
+ goto err_ext_clk;
}
+ bu21013_data->ext_clk_enable = true;
}
/* configure the touch panel controller */
- error = bu21013_init_chip(bu21013_data);
- if (error) {
+ retval = bu21013_init_chip(bu21013_data, bu21013_data->ext_clk_enable);
+ if (retval < 0) {
dev_err(&client->dev, "error in bu21013 config\n");
- goto err_cs_disable;
+ goto err_init_config;
}
+ init_waitqueue_head(&bu21013_data->wait);
+ bu21013_data->touch_stopped = false;
+
/* register the device to input subsystem */
in_dev->name = DRIVER_TP;
in_dev->id.bustype = BUS_I2C;
@@ -503,44 +722,63 @@ static int __devinit bu21013_probe(struct i2c_client *client,
__set_bit(EV_ABS, in_dev->evbit);
input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0,
- pdata->touch_x_max, 0, 0);
+ pdata->x_max_res, 0, 0);
input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0,
- pdata->touch_y_max, 0, 0);
+ pdata->y_max_res, 0, 0);
+ input_set_abs_params(in_dev, ABS_MT_TOUCH_MAJOR, 0,
+ max(pdata->x_max_res , pdata->y_max_res), 0, 0);
input_set_drvdata(in_dev, bu21013_data);
-
- error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
- IRQF_TRIGGER_FALLING | IRQF_SHARED,
- DRIVER_TP, bu21013_data);
- if (error) {
+ retval = input_register_device(in_dev);
+ if (retval)
+ goto err_input_register;
+
+ retval = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
+ (IRQF_TRIGGER_FALLING | IRQF_SHARED),
+ DRIVER_TP, bu21013_data);
+ if (retval) {
dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
- goto err_cs_disable;
+ goto err_init_irq;
}
+ bu21013_data->enable = true;
+ i2c_set_clientdata(client, bu21013_data);
- error = input_register_device(in_dev);
- if (error) {
- dev_err(&client->dev, "failed to register input device\n");
- goto err_free_irq;
+ /* sysfs implementation for dynamic enable/disable the input event */
+ retval = sysfs_create_group(&client->dev.kobj, &bu21013_attr_group);
+ if (retval) {
+ dev_err(&client->dev, "failed to create sysfs entries\n");
+ goto err_sysfs_create;
}
- device_init_wakeup(&client->dev, pdata->wakeup);
- i2c_set_clientdata(client, bu21013_data);
-
- return 0;
+ return retval;
-err_free_irq:
- bu21013_free_irq(bu21013_data);
-err_cs_disable:
- pdata->cs_dis(pdata->cs_pin);
-err_disable_regulator:
- regulator_disable(bu21013_data->regulator);
-err_put_regulator:
- regulator_put(bu21013_data->regulator);
-err_free_mem:
- input_free_device(in_dev);
+err_sysfs_create:
+ free_irq(pdata->irq, bu21013_data);
+ i2c_set_clientdata(client, NULL);
+err_init_irq:
+ input_unregister_device(bu21013_data->in_dev);
+err_input_register:
+ wake_up(&bu21013_data->wait);
+err_init_config:
+ if (bu21013_data->tpclk) {
+ if (bu21013_data->ext_clk_enable)
+ clk_disable(bu21013_data->tpclk);
+ clk_put(bu21013_data->tpclk);
+ }
+err_ext_clk:
+ if (pdata->cs_dis)
+ pdata->cs_dis(pdata->cs_pin);
+err_init_cs:
+ if (bu21013_data->regulator) {
+ regulator_disable(bu21013_data->regulator);
+ regulator_put(bu21013_data->regulator);
+ }
+ input_free_device(bu21013_data->in_dev);
+err_alloc:
kfree(bu21013_data);
- return error;
+ return retval;
}
+
/**
* bu21013_remove() - removes the i2c-client touchscreen driver
* @client: i2c client structure pointer
@@ -552,19 +790,24 @@ static int __devexit bu21013_remove(struct i2c_client *client)
{
struct bu21013_ts_data *bu21013_data = i2c_get_clientdata(client);
- bu21013_free_irq(bu21013_data);
-
+ bu21013_data->touch_stopped = true;
+ sysfs_remove_group(&client->dev.kobj, &bu21013_attr_group);
+ wake_up(&bu21013_data->wait);
+ free_irq(bu21013_data->chip->irq, bu21013_data);
bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
-
input_unregister_device(bu21013_data->in_dev);
- regulator_disable(bu21013_data->regulator);
- regulator_put(bu21013_data->regulator);
-
+ if (bu21013_data->tpclk) {
+ if (bu21013_data->ext_clk_enable)
+ clk_disable(bu21013_data->tpclk);
+ clk_put(bu21013_data->tpclk);
+ }
+ if (bu21013_data->regulator) {
+ regulator_disable(bu21013_data->regulator);
+ regulator_put(bu21013_data->regulator);
+ }
kfree(bu21013_data);
- device_init_wakeup(&client->dev, false);
-
return 0;
}
@@ -579,15 +822,8 @@ static int __devexit bu21013_remove(struct i2c_client *client)
static int bu21013_suspend(struct device *dev)
{
struct bu21013_ts_data *bu21013_data = dev_get_drvdata(dev);
- struct i2c_client *client = bu21013_data->client;
- bu21013_data->touch_stopped = true;
- if (device_may_wakeup(&client->dev))
- enable_irq_wake(bu21013_data->chip->irq);
- else
- disable_irq(bu21013_data->chip->irq);
-
- regulator_disable(bu21013_data->regulator);
+ bu21013_disable(bu21013_data);
return 0;
}
@@ -602,29 +838,8 @@ static int bu21013_suspend(struct device *dev)
static int bu21013_resume(struct device *dev)
{
struct bu21013_ts_data *bu21013_data = dev_get_drvdata(dev);
- struct i2c_client *client = bu21013_data->client;
- int retval;
- retval = regulator_enable(bu21013_data->regulator);
- if (retval < 0) {
- dev_err(&client->dev, "bu21013 regulator enable failed\n");
- return retval;
- }
-
- retval = bu21013_init_chip(bu21013_data);
- if (retval < 0) {
- dev_err(&client->dev, "bu21013 controller config failed\n");
- return retval;
- }
-
- bu21013_data->touch_stopped = false;
-
- if (device_may_wakeup(&client->dev))
- disable_irq_wake(bu21013_data->chip->irq);
- else
- enable_irq(bu21013_data->chip->irq);
-
- return 0;
+ return bu21013_enable(bu21013_data);
}
static const struct dev_pm_ops bu21013_dev_pm_ops = {
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
new file mode 100755
index 00000000000..2d9ce61bf54
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -0,0 +1,2221 @@
+/* Source for:
+ * Cypress TrueTouch(TM) Standard Product touchscreen driver.
+ * drivers/input/touchscreen/cyttsp_core.c
+ *
+ * Copyright (C) 2009-2011 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/byteorder/generic.h>
+#include <linux/bitops.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+#include <linux/cyttsp.h>
+#include <linux/ctype.h>
+#include <linux/regulator/consumer.h>
+#include "cyttsp_core.h"
+
+#define DBG(x)
+#define DBG2(x)
+#define DBG3(x)
+
+/* rely on kernel input.h to define Multi-Touch capability */
+#ifndef ABS_MT_TRACKING_ID
+/* define only if not defined already by system; */
+/* value based on linux kernel 2.6.30.10 */
+#define ABS_MT_TRACKING_ID (ABS_MT_BLOB_ID + 1)
+#endif /* ABS_MT_TRACKING_ID */
+
+#define TOUCHSCREEN_TIMEOUT (msecs_to_jiffies(28))
+/* Bootloader File 0 offset */
+#define CY_BL_FILE0 0x00
+/* Bootloader command directive */
+#define CY_BL_CMD 0xFF
+/* Bootloader Enter Loader mode */
+#define CY_BL_ENTER 0x38
+/* Bootloader Write a Block */
+#define CY_BL_WRITE_BLK 0x39
+/* Bootloader Terminate Loader mode */
+#define CY_BL_TERMINATE 0x3B
+/* Bootloader Exit and Verify Checksum command */
+#define CY_BL_EXIT 0xA5
+/* Bootloader default keys */
+#define CY_BL_KEY0 0
+#define CY_BL_KEY1 1
+#define CY_BL_KEY2 2
+#define CY_BL_KEY3 3
+#define CY_BL_KEY4 4
+#define CY_BL_KEY5 5
+#define CY_BL_KEY6 6
+#define CY_BL_KEY7 7
+
+#define CY_DIFF(m, n) ((m) != (n))
+#define GET_NUM_TOUCHES(x) ((x) & 0x0F)
+#define GET_TOUCH1_ID(x) (((x) & 0xF0) >> 4)
+#define GET_TOUCH2_ID(x) ((x) & 0x0F)
+#define GET_TOUCH3_ID(x) (((x) & 0xF0) >> 4)
+#define GET_TOUCH4_ID(x) ((x) & 0x0F)
+#define IS_LARGE_AREA(x) (((x) & 0x10) >> 4)
+#define IS_BAD_PKT(x) ((x) & 0x20)
+#define FLIP_DATA_FLAG 0x01
+#define REVERSE_X_FLAG 0x02
+#define REVERSE_Y_FLAG 0x04
+#define FLIP_DATA(flags) ((flags) & FLIP_DATA_FLAG)
+#define REVERSE_X(flags) ((flags) & REVERSE_X_FLAG)
+#define REVERSE_Y(flags) ((flags) & REVERSE_Y_FLAG)
+#define FLIP_XY(x, y) {typeof(x) tmp; tmp = (x); (x) = (y); (y) = tmp; }
+#define INVERT_X(x, xmax) ((xmax) - (x))
+#define INVERT_Y(y, ymax) ((ymax) - (y))
+#define SET_HSTMODE(reg, mode) ((reg) & (mode))
+#define GET_HSTMODE(reg) ((reg & 0x70) >> 4)
+#define GET_BOOTLOADERMODE(reg) ((reg & 0x10) >> 4)
+
+/* Watchdog timeout to check if device is reset and no interrupts running */
+#define CY_WDG_TIMEOUT msecs_to_jiffies(3000)
+#define CY_MODE_ERROR(ps, hst_mode, tt_mode) \
+ ((ps == CY_ACTIVE_STATE && GET_HSTMODE(hst_mode) != CY_OPERATE_MODE) ||\
+ GET_BOOTLOADERMODE(tt_mode))
+
+/* maximum number of concurrent ST track IDs */
+#define CY_NUM_ST_TCH_ID 2
+/* maximum number of concurrent MT track IDs */
+#define CY_NUM_MT_TCH_ID 4
+/* maximum number of track IDs */
+#define CY_NUM_TRK_ID 16
+/*
+ * maximum number of concurrent touches
+ * (only CY_NUM_MT_TCH_ID have coord data)
+ */
+#define CY_MAX_TCH 10
+/*
+ * maximum number of touch reports with
+ * current touches=0 before performing Driver reset
+ */
+#define CY_MAX_NTCH 10
+
+#define CY_NTCH 0 /* lift off */
+#define CY_TCH 1 /* touch down */
+#define CY_ST_FNGR1_IDX 0
+#define CY_ST_FNGR2_IDX 1
+#define CY_MT_TCH1_IDX 0
+#define CY_MT_TCH2_IDX 1
+#define CY_MT_TCH3_IDX 2
+#define CY_MT_TCH4_IDX 3
+#define CY_XPOS 0
+#define CY_YPOS 1
+#define CY_IGNR_TCH (-1)
+#define CY_SMALL_TOOL_WIDTH 10
+#define CY_LARGE_TOOL_WIDTH 255
+#define CY_REG_BASE 0x00
+#define CY_REG_GEST_SET 0x1E
+#define CY_REG_SCN_TYP 0x1C
+#define CY_REG_ACT_INTRVL 0x1D
+#define CY_REG_TCH_TMOUT (CY_REG_ACT_INTRVL+1)
+#define CY_REG_LP_INTRVL (CY_REG_TCH_TMOUT+1)
+#define CY_SOFT_RESET (1 << 0)
+#define CY_DEEP_SLEEP (1 << 1)
+#define CY_LOW_POWER (1 << 2)
+#define CY_MAXZ 255
+#define CY_OK 0
+#define CY_INIT 1
+#define CY_DELAY_DFLT 20 /* ms */
+#define CY_DELAY_MAX (500/CY_DELAY_DFLT) /* half second */
+#define CY_DELAY_SYSINFO 20 /* ms */
+#define CY_MODE_CHANGE_DELAY 30 /* ms */
+#define CY_DELAY_BL 300
+#define CY_DELAY_DNLOAD 100 /* ms */
+#define CY_HNDSHK_BIT 0x80
+/* device mode bits */
+#define CY_OPERATE_MODE 0x00
+#define CY_SYSINFO_MODE 0x10
+/* power mode select bits */
+#define CY_SOFT_RESET_MODE 0x01 /* return to Bootloader mode */
+#define CY_DEEP_SLEEP_MODE 0x02
+#define CY_LOW_POWER_MODE 0x04
+#define CY_NUM_KEY 8
+
+/* TrueTouch Standard Product Gen3 (Txx3xx) interface definition */
+struct cyttsp_xydata {
+ u8 hst_mode;
+ u8 tt_mode;
+ u8 tt_stat;
+ u16 x1 __attribute__ ((packed));
+ u16 y1 __attribute__ ((packed));
+ u8 z1;
+ u8 touch12_id;
+ u16 x2 __attribute__ ((packed));
+ u16 y2 __attribute__ ((packed));
+ u8 z2;
+ u8 gest_cnt;
+ u8 gest_id;
+ u16 x3 __attribute__ ((packed));
+ u16 y3 __attribute__ ((packed));
+ u8 z3;
+ u8 touch34_id;
+ u16 x4 __attribute__ ((packed));
+ u16 y4 __attribute__ ((packed));
+ u8 z4;
+ u8 tt_undef[3];
+ u8 gest_set;
+ u8 tt_reserved;
+};
+
+struct cyttsp_xydata_gen2 {
+ u8 hst_mode;
+ u8 tt_mode;
+ u8 tt_stat;
+ u16 x1 __attribute__ ((packed));
+ u16 y1 __attribute__ ((packed));
+ u8 z1;
+ u8 evnt_idx;
+ u16 x2 __attribute__ ((packed));
+ u16 y2 __attribute__ ((packed));
+ u8 tt_undef1;
+ u8 gest_cnt;
+ u8 gest_id;
+ u8 tt_undef[14];
+ u8 gest_set;
+ u8 tt_reserved;
+};
+
+/* TrueTouch Standard Product Gen2 (Txx2xx) interface definition */
+enum cyttsp_gen2_std {
+ CY_GEN2_NOTOUCH = 0x03, /* Both touches removed */
+ CY_GEN2_GHOST = 0x02, /* ghost */
+ CY_GEN2_2TOUCH = 0x03, /* 2 touch; no ghost */
+ CY_GEN2_1TOUCH = 0x01, /* 1 touch only */
+ CY_GEN2_TOUCH2 = 0x01, /* 1st touch removed; 2nd touch remains */
+};
+
+/* TTSP System Information interface definition */
+struct cyttsp_sysinfo_data {
+ u8 hst_mode;
+ u8 mfg_stat;
+ u8 mfg_cmd;
+ u8 cid[3];
+ u8 tt_undef1;
+ u8 uid[8];
+ u8 bl_verh;
+ u8 bl_verl;
+ u8 tts_verh;
+ u8 tts_verl;
+ u8 app_idh;
+ u8 app_idl;
+ u8 app_verh;
+ u8 app_verl;
+ u8 tt_undef[5];
+ u8 scn_typ; /* Gen3 only: scan type [0:Mutual, 1:Self] */
+ u8 act_intrvl;
+ u8 tch_tmout;
+ u8 lp_intrvl;
+};
+
+/* TTSP Bootloader Register Map interface definition */
+#define CY_BL_CHKSUM_OK 0x01
+struct cyttsp_bootloader_data {
+ u8 bl_file;
+ u8 bl_status;
+ u8 bl_error;
+ u8 blver_hi;
+ u8 blver_lo;
+ u8 bld_blver_hi;
+ u8 bld_blver_lo;
+ u8 ttspver_hi;
+ u8 ttspver_lo;
+ u8 appid_hi;
+ u8 appid_lo;
+ u8 appver_hi;
+ u8 appver_lo;
+ u8 cid_0;
+ u8 cid_1;
+ u8 cid_2;
+};
+
+#define cyttsp_wake_data cyttsp_xydata
+
+struct cyttsp {
+ struct device *pdev;
+ int irq;
+ struct input_dev *input;
+ struct work_struct work;
+ struct timer_list timer;
+ struct mutex mutex;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+ char phys[32];
+ struct cyttsp_platform_data *platform_data;
+ struct cyttsp_bootloader_data bl_data;
+ struct cyttsp_sysinfo_data sysinfo_data;
+ u8 num_prv_st_tch;
+ u16 act_trk[CY_NUM_TRK_ID];
+ u16 prv_mt_tch[CY_NUM_MT_TCH_ID];
+ u16 prv_st_tch[CY_NUM_ST_TCH_ID];
+ u16 prv_mt_pos[CY_NUM_TRK_ID][2];
+ struct cyttsp_bus_ops *bus_ops;
+ struct regulator *regulator;
+ unsigned fw_loader_mode:1;
+ unsigned suspended:1;
+ struct timer_list to_timer;
+ bool to_timeout;
+ struct completion int_running;
+ bool bl_ready;
+ u8 reg_id;
+ u8 ntch_count;
+};
+
+struct cyttsp_track_data {
+ u8 prv_tch;
+ u8 cur_tch;
+ u16 tmp_trk[CY_NUM_MT_TCH_ID];
+ u16 snd_trk[CY_NUM_MT_TCH_ID];
+ u16 cur_trk[CY_NUM_TRK_ID];
+ u16 cur_st_tch[CY_NUM_ST_TCH_ID];
+ u16 cur_mt_tch[CY_NUM_MT_TCH_ID];
+ /* if NOT CY_USE_TRACKING_ID then only */
+ /* uses CY_NUM_MT_TCH_ID positions */
+ u16 cur_mt_pos[CY_NUM_TRK_ID][2];
+ /* if NOT CY_USE_TRACKING_ID then only */
+ /* uses CY_NUM_MT_TCH_ID positions */
+ u8 cur_mt_z[CY_NUM_TRK_ID];
+ u8 tool_width;
+ u16 st_x1;
+ u16 st_y1;
+ u8 st_z1;
+ u16 st_x2;
+ u16 st_y2;
+ u8 st_z2;
+};
+
+static const u8 bl_cmd[] = {
+ CY_BL_FILE0, CY_BL_CMD, CY_BL_EXIT,
+ CY_BL_KEY0, CY_BL_KEY1, CY_BL_KEY2,
+ CY_BL_KEY3, CY_BL_KEY4, CY_BL_KEY5,
+ CY_BL_KEY6, CY_BL_KEY7
+};
+
+#define LOCK(m) do { \
+ DBG(printk(KERN_INFO "%s: lock\n", __func__);) \
+ mutex_lock(&(m)); \
+} while (0);
+
+#define UNLOCK(m) do { \
+ DBG(printk(KERN_INFO "%s: unlock\n", __func__);) \
+ mutex_unlock(&(m)); \
+} while (0);
+
+DBG(
+static void print_data_block(const char *func, u8 command,
+ u8 length, void *data)
+{
+ char buf[1024];
+ unsigned buf_len = sizeof(buf);
+ char *p = buf;
+ int i;
+ int l;
+
+ l = snprintf(p, buf_len, "cmd 0x%x: ", command);
+ buf_len -= l;
+ p += l;
+ for (i = 0; i < length && buf_len; i++, p += l, buf_len -= l)
+ l = snprintf(p, buf_len, "%02x ", *((char *)data + i));
+ printk(KERN_DEBUG "%s: %s\n", func, buf);
+})
+
+static int cyttsp_soft_reset(struct cyttsp *ts);
+static int cyttsp_set_operational_mode(struct cyttsp *ts);
+static int cyttsp_exit_bl_mode(struct cyttsp *ts);
+static int cyttsp_power_on(struct cyttsp *ts);
+static void cyttsp_init_tch(struct cyttsp *ts)
+{
+ /* init the touch structures */
+ ts->num_prv_st_tch = CY_NTCH;
+ memset(ts->act_trk, CY_NTCH, sizeof(ts->act_trk));
+ memset(ts->prv_mt_pos, CY_NTCH, sizeof(ts->prv_mt_pos));
+ memset(ts->prv_mt_tch, CY_IGNR_TCH, sizeof(ts->prv_mt_tch));
+ memset(ts->prv_st_tch, CY_IGNR_TCH, sizeof(ts->prv_st_tch));
+ ts->ntch_count = 0;
+}
+
+static u8 ttsp_convert_gen2(u8 cur_tch, struct cyttsp_xydata *pxy_data)
+{
+ struct cyttsp_xydata_gen2 *pxy_data_gen2;
+ pxy_data_gen2 = (struct cyttsp_xydata_gen2 *)(pxy_data);
+
+ if (pxy_data_gen2->evnt_idx == CY_GEN2_NOTOUCH) {
+ cur_tch = 0;
+ } else if (cur_tch == CY_GEN2_GHOST) {
+ cur_tch = 0;
+ } else if (cur_tch == CY_GEN2_2TOUCH) {
+ /* stuff artificial track ID1 and ID2 */
+ pxy_data->touch12_id = 0x12;
+ pxy_data->z1 = CY_MAXZ;
+ pxy_data->z2 = CY_MAXZ;
+ cur_tch--; /* 2 touches */
+ } else if (cur_tch == CY_GEN2_1TOUCH) {
+ /* stuff artificial track ID1 and ID2 */
+ pxy_data->touch12_id = 0x12;
+ pxy_data->z1 = CY_MAXZ;
+ pxy_data->z2 = CY_NTCH;
+ if (pxy_data_gen2->evnt_idx == CY_GEN2_TOUCH2) {
+ /* push touch 2 data into touch1
+ * (first finger up; second finger down) */
+ /* stuff artificial track ID1 for touch2 info */
+ pxy_data->touch12_id = 0x20;
+ /* stuff touch 1 with touch 2 coordinate data */
+ pxy_data->x1 = pxy_data->x2;
+ pxy_data->y1 = pxy_data->y2;
+ }
+ } else {
+ cur_tch = 0;
+ }
+ return cur_tch;
+}
+
+static int ttsp_read_block_data(struct cyttsp *ts, u8 command,
+ u8 length, void *buf)
+{
+ int rc;
+ int tries;
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ if (!buf || !length) {
+ printk(KERN_ERR "%s: Error, buf:%s len:%u\n",
+ __func__, !buf ? "NULL" : "OK", length);
+ return -EIO;
+ }
+
+ for (tries = 0, rc = -1; tries < CY_NUM_RETRY && (rc < 0); tries++) {
+ rc = ts->bus_ops->read(ts->bus_ops, command, length, buf);
+ if (rc)
+ msleep(CY_DELAY_DFLT);
+ }
+
+ if (rc < 0)
+ printk(KERN_ERR "%s: error %d\n", __func__, rc);
+ DBG(print_data_block(__func__, command, length, buf);)
+ return rc;
+}
+
+static int ttsp_write_block_data(struct cyttsp *ts, u8 command,
+ u8 length, void *buf)
+{
+ int rc;
+ int tries;
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ if (!buf || !length) {
+ printk(KERN_ERR "%s: Error, buf:%s len:%u\n",
+ __func__, !buf ? "NULL" : "OK", length);
+ return -EIO;
+ }
+
+ for (tries = 0, rc = -1; tries < CY_NUM_RETRY && (rc < 0); tries++) {
+ rc = ts->bus_ops->write(ts->bus_ops, command, length, buf);
+ if (rc)
+ msleep(CY_DELAY_DFLT);
+ }
+
+ if (rc < 0)
+ printk(KERN_ERR "%s: error %d\n", __func__, rc);
+ DBG(print_data_block(__func__, command, length, buf);)
+ return rc;
+}
+
+static int ttsp_tch_ext(struct cyttsp *ts, void *buf)
+{
+ int rc;
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ if (!buf) {
+ printk(KERN_ERR "%s: Error, buf:%s\n",
+ __func__, !buf ? "NULL" : "OK");
+ return -EIO;
+ }
+ rc = ts->bus_ops->ext(ts->bus_ops, buf);
+ if (rc < 0)
+ printk(KERN_ERR "%s: error %d\n", __func__, rc);
+ return rc;
+}
+
+/* ************************************************************************
+ * The cyttsp_xy_worker function reads the XY coordinates and sends them to
+ * the input layer. It is called from the Touch Interrupt.
+ * *************************************************************************/
+static int cyttsp_inlist(u16 prev_track[], u8 cur_trk_id, u8 *prev_loc,
+ u8 num_touches)
+{
+ u8 id = 0;
+
+ DBG(printk(KERN_INFO"%s: IN p[%d]=%d c=%d n=%d loc=%d\n",
+ __func__, id, prev_track[id], cur_trk_id,
+ num_touches, *prev_loc);)
+
+ for (*prev_loc = CY_IGNR_TCH; id < num_touches; id++) {
+ DBG(printk(KERN_INFO"%s: p[%d]=%d c=%d n=%d loc=%d\n",
+ __func__, id, prev_track[id], cur_trk_id,
+ num_touches, *prev_loc);)
+ if (prev_track[id] == cur_trk_id) {
+ *prev_loc = id;
+ break;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: OUT p[%d]=%d c=%d n=%d loc=%d\n", __func__,
+ id, prev_track[id], cur_trk_id, num_touches, *prev_loc);)
+
+ return *prev_loc < CY_NUM_TRK_ID;
+}
+
+static int cyttsp_next_avail_inlist(u16 cur_trk[], u8 *new_loc,
+ u8 num_touches)
+{
+ u8 id = 0;
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ for (*new_loc = CY_IGNR_TCH; id < num_touches; id++) {
+ if (cur_trk[id] > CY_NUM_TRK_ID) {
+ *new_loc = id;
+ break;
+ }
+ }
+ return *new_loc < CY_NUM_TRK_ID;
+}
+
+static void handle_single_touch(struct cyttsp_xydata *xy,
+ struct cyttsp_track_data *t, struct cyttsp *ts)
+{
+ u8 id;
+ u8 use_trk_id = ts->platform_data->use_trk_id;
+
+ DBG(printk(KERN_INFO"%s: ST STEP 0 - ST1 ID=%d ST2 ID=%d\n",
+ __func__, t->cur_st_tch[CY_ST_FNGR1_IDX],
+ t->cur_st_tch[CY_ST_FNGR2_IDX]);)
+
+ if (t->cur_st_tch[CY_ST_FNGR1_IDX] > CY_NUM_TRK_ID) {
+ /* reassign finger 1 and 2 positions to new tracks */
+ if (t->cur_tch > 0) {
+ /* reassign st finger1 */
+ if (use_trk_id) {
+ id = CY_MT_TCH1_IDX;
+ t->cur_st_tch[CY_ST_FNGR1_IDX] =
+ t->cur_mt_tch[id];
+ } else {
+ id = GET_TOUCH1_ID(xy->touch12_id);
+ t->cur_st_tch[CY_ST_FNGR1_IDX] = id;
+ }
+ t->st_x1 = t->cur_mt_pos[id][CY_XPOS];
+ t->st_y1 = t->cur_mt_pos[id][CY_YPOS];
+ t->st_z1 = t->cur_mt_z[id];
+
+ DBG(printk(KERN_INFO"%s: ST STEP 1 - ST1 ID=%3d\n",
+ __func__, t->cur_st_tch[CY_ST_FNGR1_IDX]);)
+
+ if ((t->cur_tch > 1) &&
+ (t->cur_st_tch[CY_ST_FNGR2_IDX] >
+ CY_NUM_TRK_ID)) {
+ /* reassign st finger2 */
+ if (use_trk_id) {
+ id = CY_MT_TCH2_IDX;
+ t->cur_st_tch[CY_ST_FNGR2_IDX] =
+ t->cur_mt_tch[id];
+ } else {
+ id = GET_TOUCH2_ID(xy->touch12_id);
+ t->cur_st_tch[CY_ST_FNGR2_IDX] = id;
+ }
+ t->st_x2 = t->cur_mt_pos[id][CY_XPOS];
+ t->st_y2 = t->cur_mt_pos[id][CY_YPOS];
+ t->st_z2 = t->cur_mt_z[id];
+
+ DBG(
+ printk(KERN_INFO"%s: ST STEP 2 - ST2 ID=%3d\n",
+ __func__, t->cur_st_tch[CY_ST_FNGR2_IDX]);)
+ }
+ }
+ } else if (t->cur_st_tch[CY_ST_FNGR2_IDX] > CY_NUM_TRK_ID) {
+ if (t->cur_tch > 1) {
+ /* reassign st finger2 */
+ if (use_trk_id) {
+ /* reassign st finger2 */
+ id = CY_MT_TCH2_IDX;
+ t->cur_st_tch[CY_ST_FNGR2_IDX] =
+ t->cur_mt_tch[id];
+ } else {
+ /* reassign st finger2 */
+ id = GET_TOUCH2_ID(xy->touch12_id);
+ t->cur_st_tch[CY_ST_FNGR2_IDX] = id;
+ }
+ t->st_x2 = t->cur_mt_pos[id][CY_XPOS];
+ t->st_y2 = t->cur_mt_pos[id][CY_YPOS];
+ t->st_z2 = t->cur_mt_z[id];
+
+ DBG(printk(KERN_INFO"%s: ST STEP 3 - ST2 ID=%3d\n",
+ __func__, t->cur_st_tch[CY_ST_FNGR2_IDX]);)
+ }
+ }
+ /* if the 1st touch is missing and there is a 2nd touch,
+ * then set the 1st touch to 2nd touch and terminate 2nd touch
+ */
+ if ((t->cur_st_tch[CY_ST_FNGR1_IDX] > CY_NUM_TRK_ID) &&
+ (t->cur_st_tch[CY_ST_FNGR2_IDX] < CY_NUM_TRK_ID)) {
+ t->st_x1 = t->st_x2;
+ t->st_y1 = t->st_y2;
+ t->st_z1 = t->st_z2;
+ t->cur_st_tch[CY_ST_FNGR1_IDX] = t->cur_st_tch[CY_ST_FNGR2_IDX];
+ t->cur_st_tch[CY_ST_FNGR2_IDX] = CY_IGNR_TCH;
+ }
+ /* if the 2nd touch ends up equal to the 1st touch,
+ * then just report a single touch */
+ if (t->cur_st_tch[CY_ST_FNGR1_IDX] == t->cur_st_tch[CY_ST_FNGR2_IDX])
+ t->cur_st_tch[CY_ST_FNGR2_IDX] = CY_IGNR_TCH;
+
+ /* set Single Touch current event signals */
+ if (t->cur_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) {
+ input_report_abs(ts->input, ABS_X, t->st_x1);
+ input_report_abs(ts->input, ABS_Y, t->st_y1);
+ input_report_abs(ts->input, ABS_PRESSURE, t->st_z1);
+ input_report_key(ts->input, BTN_TOUCH, CY_TCH);
+ input_report_abs(ts->input, ABS_TOOL_WIDTH, t->tool_width);
+
+ DBG2(printk(KERN_INFO"%s:ST->F1:%3d X:%3d Y:%3d Z:%3d\n",
+ __func__, t->cur_st_tch[CY_ST_FNGR1_IDX],
+ t->st_x1, t->st_y1, t->st_z1);)
+
+ if (t->cur_st_tch[CY_ST_FNGR2_IDX] < CY_NUM_TRK_ID) {
+ input_report_key(ts->input, BTN_2, CY_TCH);
+ input_report_abs(ts->input, ABS_HAT0X, t->st_x2);
+ input_report_abs(ts->input, ABS_HAT0Y, t->st_y2);
+
+ DBG2(printk(KERN_INFO
+ "%s:ST->F2:%3d X:%3d Y:%3d Z:%3d\n",
+ __func__, t->cur_st_tch[CY_ST_FNGR2_IDX],
+ t->st_x2, t->st_y2, t->st_z2);)
+ } else {
+ input_report_key(ts->input, BTN_2, CY_NTCH);
+ }
+ } else {
+ input_report_abs(ts->input, ABS_PRESSURE, CY_NTCH);
+ input_report_key(ts->input, BTN_TOUCH, CY_NTCH);
+ input_report_key(ts->input, BTN_2, CY_NTCH);
+ }
+ /* update platform data for the current single touch info */
+ ts->prv_st_tch[CY_ST_FNGR1_IDX] = t->cur_st_tch[CY_ST_FNGR1_IDX];
+ ts->prv_st_tch[CY_ST_FNGR2_IDX] = t->cur_st_tch[CY_ST_FNGR2_IDX];
+
+}
+
+static void handle_multi_touch(struct cyttsp_track_data *t, struct cyttsp *ts)
+{
+
+ u8 id;
+ u8 i, loc;
+ void (*mt_sync_func)(struct input_dev *) = ts->platform_data->mt_sync;
+
+ if (!ts->platform_data->use_trk_id)
+ goto no_track_id;
+
+ /* terminate any previous touch where the track
+ * is missing from the current event */
+ for (id = 0; id < CY_NUM_TRK_ID; id++) {
+ if ((ts->act_trk[id] == CY_NTCH) || (t->cur_trk[id] != CY_NTCH))
+ continue;
+
+ input_report_abs(ts->input, ABS_MT_TRACKING_ID, id);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, CY_NTCH);
+ input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR, t->tool_width);
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ ts->prv_mt_pos[id][CY_XPOS]);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ ts->prv_mt_pos[id][CY_YPOS]);
+ if (mt_sync_func)
+ mt_sync_func(ts->input);
+ ts->act_trk[id] = CY_NTCH;
+ ts->prv_mt_pos[id][CY_XPOS] = 0;
+ ts->prv_mt_pos[id][CY_YPOS] = 0;
+ }
+ /* set Multi-Touch current event signals */
+ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+ if (t->cur_mt_tch[id] >= CY_NUM_TRK_ID)
+ continue;
+
+ input_report_abs(ts->input, ABS_MT_TRACKING_ID,
+ t->cur_mt_tch[id]);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+ t->cur_mt_z[id]);
+ input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR,
+ t->tool_width);
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ t->cur_mt_pos[id][CY_XPOS]);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ t->cur_mt_pos[id][CY_YPOS]);
+ if (mt_sync_func)
+ mt_sync_func(ts->input);
+
+ ts->act_trk[id] = CY_TCH;
+ ts->prv_mt_pos[id][CY_XPOS] = t->cur_mt_pos[id][CY_XPOS];
+ ts->prv_mt_pos[id][CY_YPOS] = t->cur_mt_pos[id][CY_YPOS];
+ }
+ return;
+no_track_id:
+
+ /* set temporary track array elements to voids */
+ memset(t->tmp_trk, CY_IGNR_TCH, sizeof(t->tmp_trk));
+ memset(t->snd_trk, CY_IGNR_TCH, sizeof(t->snd_trk));
+
+ /* get what is currently active */
+ for (i = id = 0; id < CY_NUM_TRK_ID && i < CY_NUM_MT_TCH_ID; id++) {
+ if (t->cur_trk[id] == CY_TCH) {
+ /* only incr counter if track found */
+ t->tmp_trk[i] = id;
+ i++;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: T1: t0=%d, t1=%d, t2=%d, t3=%d\n", __func__,
+ t->tmp_trk[0], t->tmp_trk[1],
+ t->tmp_trk[2], t->tmp_trk[3]);)
+ DBG(printk(KERN_INFO"%s: T1: p0=%d, p1=%d, p2=%d, p3=%d\n", __func__,
+ ts->prv_mt_tch[0], ts->prv_mt_tch[1],
+ ts->prv_mt_tch[2], ts->prv_mt_tch[3]);)
+
+ /* pack in still active previous touches */
+ for (id = t->prv_tch = 0; id < CY_NUM_MT_TCH_ID; id++) {
+ if (t->tmp_trk[id] >= CY_NUM_TRK_ID)
+ continue;
+
+ if (cyttsp_inlist(ts->prv_mt_tch, t->tmp_trk[id], &loc,
+ CY_NUM_MT_TCH_ID)) {
+ loc &= CY_NUM_MT_TCH_ID - 1;
+ t->snd_trk[loc] = t->tmp_trk[id];
+ t->prv_tch++;
+ DBG(printk(KERN_INFO"%s: in list s[%d]=%d "
+ "t[%d]=%d, loc=%d p=%d\n", __func__,
+ loc, t->snd_trk[loc],
+ id, t->tmp_trk[id],
+ loc, t->prv_tch);)
+ } else {
+ DBG(printk(KERN_INFO"%s: is not in list s[%d]=%d"
+ " t[%d]=%d loc=%d\n", __func__,
+ id, t->snd_trk[id],
+ id, t->tmp_trk[id],
+ loc);)
+ }
+ }
+ DBG(printk(KERN_INFO"%s: S1: s0=%d, s1=%d, s2=%d, s3=%d p=%d\n",
+ __func__,
+ t->snd_trk[0], t->snd_trk[1], t->snd_trk[2],
+ t->snd_trk[3], t->prv_tch);)
+
+ /* pack in new touches */
+ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+ if (t->tmp_trk[id] >= CY_NUM_TRK_ID)
+ continue;
+
+ if (!cyttsp_inlist(t->snd_trk, t->tmp_trk[id], &loc,
+ CY_NUM_MT_TCH_ID)) {
+
+ DBG(
+ printk(KERN_INFO"%s: not in list t[%d]=%d, loc=%d\n",
+ __func__,
+ id, t->tmp_trk[id], loc);)
+
+ if (cyttsp_next_avail_inlist(t->snd_trk, &loc,
+ CY_NUM_MT_TCH_ID)) {
+ loc &= CY_NUM_MT_TCH_ID - 1;
+ t->snd_trk[loc] = t->tmp_trk[id];
+ DBG(printk(KERN_INFO "%s: put in list s[%d]=%d"
+ " t[%d]=%d\n", __func__,
+ loc,
+ t->snd_trk[loc], id, t->tmp_trk[id]);
+ )
+ }
+ } else {
+ DBG(printk(KERN_INFO"%s: is in list s[%d]=%d "
+ "t[%d]=%d loc=%d\n", __func__,
+ id, t->snd_trk[id], id, t->tmp_trk[id], loc);)
+ }
+ }
+ DBG(printk(KERN_INFO"%s: S2: s0=%d, s1=%d, s2=%d, s3=%d\n", __func__,
+ t->snd_trk[0], t->snd_trk[1],
+ t->snd_trk[2], t->snd_trk[3]);)
+
+ /* sync motion event signals for each current touch */
+ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+ /* z will either be 0 (NOTOUCH) or
+ * some pressure (TOUCH)
+ */
+ DBG(printk(KERN_INFO "%s: MT0 prev[%d]=%d "
+ "temp[%d]=%d send[%d]=%d\n",
+ __func__, id, ts->prv_mt_tch[id],
+ id, t->tmp_trk[id], id, t->snd_trk[id]);)
+
+ if (ts->platform_data->invert) {
+ t->cur_mt_pos[t->snd_trk[id]][CY_XPOS] =
+ INVERT_X(t->cur_mt_pos[t->snd_trk[id]]
+ [CY_XPOS], ts->platform_data->maxx);
+ t->cur_mt_pos[t->snd_trk[id]][CY_YPOS] =
+ INVERT_X(t->cur_mt_pos[t->snd_trk[id]]
+ [CY_YPOS], ts->platform_data->maxy);
+ }
+ if (t->snd_trk[id] < CY_NUM_TRK_ID) {
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+ t->cur_mt_z[t->snd_trk[id]]);
+ input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR,
+ t->tool_width);
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ t->cur_mt_pos[t->snd_trk[id]][CY_XPOS]);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ t->cur_mt_pos[t->snd_trk[id]][CY_YPOS]);
+ if (mt_sync_func)
+ mt_sync_func(ts->input);
+
+ DBG2(printk(KERN_INFO"%s: MT1 -> TID:"
+ "%3d X:%3d Y:%3d Z:%3d\n", __func__,
+ t->snd_trk[id],
+ t->cur_mt_pos[t->snd_trk[id]][CY_XPOS],
+ t->cur_mt_pos[t->snd_trk[id]][CY_YPOS],
+ t->cur_mt_z[t->snd_trk[id]]);)
+
+ } else if (ts->prv_mt_tch[id] < CY_NUM_TRK_ID) {
+ /* void out this touch */
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+ CY_NTCH);
+ input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR,
+ t->tool_width);
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_XPOS]);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_YPOS]);
+
+ if (mt_sync_func)
+ mt_sync_func(ts->input);
+
+ DBG2(printk(KERN_INFO"%s: "
+ "MT2->TID:%2d X:%3d Y:%3d Z:%3d liftoff-sent\n",
+ __func__, ts->prv_mt_tch[id],
+ ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_XPOS],
+ ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_YPOS],
+ CY_NTCH);)
+ } else {
+ /* do not stuff any signals for this
+ * previously and currently void touches
+ */
+ DBG(printk(KERN_INFO"%s: "
+ "MT3->send[%d]=%d - No touch - NOT sent\n",
+ __func__, id, t->snd_trk[id]);)
+ }
+ }
+
+ /* save current posted tracks to
+ * previous track memory */
+ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+ ts->prv_mt_tch[id] = t->snd_trk[id];
+ if (t->snd_trk[id] < CY_NUM_TRK_ID) {
+ ts->prv_mt_pos[t->snd_trk[id]][CY_XPOS] =
+ t->cur_mt_pos[t->snd_trk[id]][CY_XPOS];
+ ts->prv_mt_pos[t->snd_trk[id]][CY_YPOS] =
+ t->cur_mt_pos[t->snd_trk[id]][CY_YPOS];
+ DBG(printk(KERN_INFO"%s: "
+ "MT4->TID:%2d X:%3d Y:%3d Z:%3d save for prv\n",
+ __func__, t->snd_trk[id],
+ ts->prv_mt_pos[t->snd_trk[id]][CY_XPOS],
+ ts->prv_mt_pos[t->snd_trk[id]][CY_YPOS],
+ CY_NTCH);)
+ }
+ }
+ memset(ts->act_trk, CY_NTCH, sizeof(ts->act_trk));
+ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+ if (t->snd_trk[id] < CY_NUM_TRK_ID)
+ ts->act_trk[t->snd_trk[id]] = CY_TCH;
+ }
+}
+
+static void cyttsp_xy_worker(struct cyttsp *ts)
+{
+ struct cyttsp_xydata xy_data;
+ u8 id, tilt, rev_x, rev_y;
+ struct cyttsp_track_data trc;
+ s32 retval;
+
+ DBG(printk(KERN_INFO "%s: Enter\n", __func__);)
+ /* get event data from CYTTSP device */
+ retval = ttsp_read_block_data(ts, CY_REG_BASE,
+ sizeof(xy_data), &xy_data);
+
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Error, fail to read device on host interface bus\n",
+ __func__);
+ goto exit_xy_worker;
+ }
+
+ if (CY_MODE_ERROR(ts->platform_data->power_state,
+ xy_data.hst_mode, xy_data.tt_mode)) {
+ /* TTSP device has switched to non-operational mode */
+ printk(KERN_ERR "%s: Error, bad mode ps=%d hm=%02X tm=%02X\n",
+ __func__, ts->platform_data->power_state,
+ xy_data.hst_mode, xy_data.tt_mode);
+ retval = cyttsp_power_on(ts);
+ if (retval < 0)
+ printk(KERN_ERR "%s: Error, power on fail\n", __func__);
+ goto exit_xy_worker;
+ }
+
+ /* touch extension handling */
+ retval = ttsp_tch_ext(ts, &xy_data);
+
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Error, touch extension handling\n",
+ __func__);
+ goto exit_xy_worker;
+ } else if (retval > 0) {
+ DBG(printk(KERN_INFO "%s: Touch extension handled\n",
+ __func__);)
+ goto exit_xy_worker;
+ }
+
+ /* provide flow control handshake */
+ if (ts->irq) {
+ if (ts->platform_data->use_hndshk) {
+ u8 cmd;
+ cmd = xy_data.hst_mode & CY_HNDSHK_BIT ?
+ xy_data.hst_mode & ~CY_HNDSHK_BIT :
+ xy_data.hst_mode | CY_HNDSHK_BIT;
+ retval = ttsp_write_block_data(ts, CY_REG_BASE,
+ sizeof(cmd), (u8 *)&cmd);
+ }
+ }
+ trc.cur_tch = GET_NUM_TOUCHES(xy_data.tt_stat);
+
+ if (IS_LARGE_AREA(xy_data.tt_stat) == 1) {
+ /* terminate all active tracks */
+ trc.cur_tch = CY_NTCH;
+ DBG(printk(KERN_INFO "%s: Large area detected\n",
+ __func__);)
+ } else if (trc.cur_tch > CY_MAX_TCH) {
+ /* terminate all active tracks */
+ trc.cur_tch = CY_NTCH;
+ DBG(printk(KERN_INFO "%s: Num touch error detected\n",
+ __func__);)
+ } else if (IS_BAD_PKT(xy_data.tt_mode)) {
+ /* terminate all active tracks */
+ trc.cur_tch = CY_NTCH;
+ DBG(printk(KERN_INFO "%s: Invalid buffer detected\n",
+ __func__);)
+ }
+
+ /* set tool size */
+ trc.tool_width = CY_SMALL_TOOL_WIDTH;
+
+ if (ts->platform_data->gen == CY_GEN2) {
+ /* translate Gen2 interface data into comparable Gen3 data */
+ trc.cur_tch = ttsp_convert_gen2(trc.cur_tch, &xy_data);
+ }
+
+ /* clear current active track ID array and count previous touches */
+ for (id = 0, trc.prv_tch = CY_NTCH; id < CY_NUM_TRK_ID; id++) {
+ trc.cur_trk[id] = CY_NTCH;
+ trc.prv_tch += ts->act_trk[id];
+ }
+
+ /*
+ * send no events if there were no
+ * previous touches and no new touches
+ */
+ if ((trc.prv_tch == CY_NTCH) && ((trc.cur_tch == CY_NTCH) ||
+ (trc.cur_tch > CY_MAX_TCH))) {
+ if (++ts->ntch_count > CY_MAX_NTCH) {
+ /* TTSP device has a stuck operational mode */
+ printk(KERN_ERR "%s: Error, stuck no-touch ct=%d\n",
+ __func__, trc.cur_tch);
+ retval = cyttsp_power_on(ts);
+ if (retval < 0)
+ printk(KERN_ERR "%s: Error, power on fail\n",
+ __func__);
+ goto exit_xy_worker;
+ }
+ } else
+ ts->ntch_count = 0;
+
+ DBG(printk(KERN_INFO "%s: prev=%d curr=%d\n", __func__,
+ trc.prv_tch, trc.cur_tch);)
+
+ /* clear current single-touch array */
+ memset(trc.cur_st_tch, CY_IGNR_TCH, sizeof(trc.cur_st_tch));
+
+ /* clear single touch positions */
+ trc.st_x1 = trc.st_y1 = trc.st_z1 =
+ trc.st_x2 = trc.st_y2 = trc.st_z2 = CY_NTCH;
+
+ /* clear current multi-touch arrays */
+ memset(trc.cur_mt_tch, CY_IGNR_TCH, sizeof(trc.cur_mt_tch));
+ memset(trc.cur_mt_pos, CY_NTCH, sizeof(trc.cur_mt_pos));
+ memset(trc.cur_mt_z, CY_NTCH, sizeof(trc.cur_mt_z));
+
+ DBG(
+ if (trc.cur_tch) {
+ unsigned i;
+ u8 *pdata = (u8 *)&xy_data;
+
+ printk(KERN_INFO "%s: TTSP data_pack: ", __func__);
+ for (i = 0; i < sizeof(struct cyttsp_xydata); i++)
+ printk(KERN_INFO "[%d]=0x%x ", i, pdata[i]);
+ printk(KERN_INFO "\n");
+ })
+
+ /* Determine if display is tilted */
+ tilt = !!FLIP_DATA(ts->platform_data->flags);
+ /* Check for switch in origin */
+ rev_x = !!REVERSE_X(ts->platform_data->flags);
+ rev_y = !!REVERSE_Y(ts->platform_data->flags);
+
+ /* process the touches */
+ switch (trc.cur_tch) {
+ case 4:
+ xy_data.x4 = be16_to_cpu(xy_data.x4);
+ xy_data.y4 = be16_to_cpu(xy_data.y4);
+ if (tilt)
+ FLIP_XY(xy_data.x4, xy_data.y4);
+
+ if (rev_x)
+ xy_data.x4 = INVERT_X(xy_data.x4,
+ ts->platform_data->maxx);
+ if (rev_y)
+ xy_data.y4 = INVERT_X(xy_data.y4,
+ ts->platform_data->maxy);
+
+ id = GET_TOUCH4_ID(xy_data.touch34_id);
+ if (ts->platform_data->use_trk_id) {
+ trc.cur_mt_pos[CY_MT_TCH4_IDX][CY_XPOS] = xy_data.x4;
+ trc.cur_mt_pos[CY_MT_TCH4_IDX][CY_YPOS] = xy_data.y4;
+ trc.cur_mt_z[CY_MT_TCH4_IDX] = xy_data.z4;
+ } else {
+ trc.cur_mt_pos[id][CY_XPOS] = xy_data.x4;
+ trc.cur_mt_pos[id][CY_YPOS] = xy_data.y4;
+ trc.cur_mt_z[id] = xy_data.z4;
+ }
+ trc.cur_mt_tch[CY_MT_TCH4_IDX] = id;
+ trc.cur_trk[id] = CY_TCH;
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) {
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) {
+ trc.st_x1 = xy_data.x4;
+ trc.st_y1 = xy_data.y4;
+ trc.st_z1 = xy_data.z4;
+ trc.cur_st_tch[CY_ST_FNGR1_IDX] = id;
+ } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) {
+ trc.st_x2 = xy_data.x4;
+ trc.st_y2 = xy_data.y4;
+ trc.st_z2 = xy_data.z4;
+ trc.cur_st_tch[CY_ST_FNGR2_IDX] = id;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: 4th XYZ:% 3d,% 3d,% 3d ID:% 2d\n\n",
+ __func__, xy_data.x4, xy_data.y4, xy_data.z4,
+ (xy_data.touch34_id & 0x0F));)
+
+ /* do not break */
+ case 3:
+ xy_data.x3 = be16_to_cpu(xy_data.x3);
+ xy_data.y3 = be16_to_cpu(xy_data.y3);
+ if (tilt)
+ FLIP_XY(xy_data.x3, xy_data.y3);
+
+ if (rev_x)
+ xy_data.x3 = INVERT_X(xy_data.x3,
+ ts->platform_data->maxx);
+ if (rev_y)
+ xy_data.y3 = INVERT_X(xy_data.y3,
+ ts->platform_data->maxy);
+
+ id = GET_TOUCH3_ID(xy_data.touch34_id);
+ if (ts->platform_data->use_trk_id) {
+ trc.cur_mt_pos[CY_MT_TCH3_IDX][CY_XPOS] = xy_data.x3;
+ trc.cur_mt_pos[CY_MT_TCH3_IDX][CY_YPOS] = xy_data.y3;
+ trc.cur_mt_z[CY_MT_TCH3_IDX] = xy_data.z3;
+ } else {
+ trc.cur_mt_pos[id][CY_XPOS] = xy_data.x3;
+ trc.cur_mt_pos[id][CY_YPOS] = xy_data.y3;
+ trc.cur_mt_z[id] = xy_data.z3;
+ }
+ trc.cur_mt_tch[CY_MT_TCH3_IDX] = id;
+ trc.cur_trk[id] = CY_TCH;
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) {
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) {
+ trc.st_x1 = xy_data.x3;
+ trc.st_y1 = xy_data.y3;
+ trc.st_z1 = xy_data.z3;
+ trc.cur_st_tch[CY_ST_FNGR1_IDX] = id;
+ } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) {
+ trc.st_x2 = xy_data.x3;
+ trc.st_y2 = xy_data.y3;
+ trc.st_z2 = xy_data.z3;
+ trc.cur_st_tch[CY_ST_FNGR2_IDX] = id;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: 3rd XYZ:% 3d,% 3d,% 3d ID:% 2d\n",
+ __func__, xy_data.x3, xy_data.y3, xy_data.z3,
+ ((xy_data.touch34_id >> 4) & 0x0F));)
+
+ /* do not break */
+ case 2:
+ xy_data.x2 = be16_to_cpu(xy_data.x2);
+ xy_data.y2 = be16_to_cpu(xy_data.y2);
+ if (tilt)
+ FLIP_XY(xy_data.x2, xy_data.y2);
+
+ if (rev_x)
+ xy_data.x2 = INVERT_X(xy_data.x2,
+ ts->platform_data->maxx);
+ if (rev_y)
+ xy_data.y2 = INVERT_X(xy_data.y2,
+ ts->platform_data->maxy);
+ id = GET_TOUCH2_ID(xy_data.touch12_id);
+ if (ts->platform_data->use_trk_id) {
+ trc.cur_mt_pos[CY_MT_TCH2_IDX][CY_XPOS] = xy_data.x2;
+ trc.cur_mt_pos[CY_MT_TCH2_IDX][CY_YPOS] = xy_data.y2;
+ trc.cur_mt_z[CY_MT_TCH2_IDX] = xy_data.z2;
+ } else {
+ trc.cur_mt_pos[id][CY_XPOS] = xy_data.x2;
+ trc.cur_mt_pos[id][CY_YPOS] = xy_data.y2;
+ trc.cur_mt_z[id] = xy_data.z2;
+ }
+ trc.cur_mt_tch[CY_MT_TCH2_IDX] = id;
+ trc.cur_trk[id] = CY_TCH;
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) {
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) {
+ trc.st_x1 = xy_data.x2;
+ trc.st_y1 = xy_data.y2;
+ trc.st_z1 = xy_data.z2;
+ trc.cur_st_tch[CY_ST_FNGR1_IDX] = id;
+ } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) {
+ trc.st_x2 = xy_data.x2;
+ trc.st_y2 = xy_data.y2;
+ trc.st_z2 = xy_data.z2;
+ trc.cur_st_tch[CY_ST_FNGR2_IDX] = id;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: 2nd XYZ:% 3d,% 3d,% 3d ID:% 2d\n",
+ __func__, xy_data.x2, xy_data.y2, xy_data.z2,
+ (xy_data.touch12_id & 0x0F));)
+
+ /* do not break */
+ case 1:
+ xy_data.x1 = be16_to_cpu(xy_data.x1);
+ xy_data.y1 = be16_to_cpu(xy_data.y1);
+ if (tilt)
+ FLIP_XY(xy_data.x1, xy_data.y1);
+
+ if (rev_x)
+ xy_data.x1 = INVERT_X(xy_data.x1,
+ ts->platform_data->maxx);
+ if (rev_y)
+ xy_data.y1 = INVERT_X(xy_data.y1,
+ ts->platform_data->maxy);
+
+ id = GET_TOUCH1_ID(xy_data.touch12_id);
+ if (ts->platform_data->use_trk_id) {
+ trc.cur_mt_pos[CY_MT_TCH1_IDX][CY_XPOS] = xy_data.x1;
+ trc.cur_mt_pos[CY_MT_TCH1_IDX][CY_YPOS] = xy_data.y1;
+ trc.cur_mt_z[CY_MT_TCH1_IDX] = xy_data.z1;
+ } else {
+ trc.cur_mt_pos[id][CY_XPOS] = xy_data.x1;
+ trc.cur_mt_pos[id][CY_YPOS] = xy_data.y1;
+ trc.cur_mt_z[id] = xy_data.z1;
+ }
+ trc.cur_mt_tch[CY_MT_TCH1_IDX] = id;
+ trc.cur_trk[id] = CY_TCH;
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) {
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) {
+ trc.st_x1 = xy_data.x1;
+ trc.st_y1 = xy_data.y1;
+ trc.st_z1 = xy_data.z1;
+ trc.cur_st_tch[CY_ST_FNGR1_IDX] = id;
+ } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) {
+ trc.st_x2 = xy_data.x1;
+ trc.st_y2 = xy_data.y1;
+ trc.st_z2 = xy_data.z1;
+ trc.cur_st_tch[CY_ST_FNGR2_IDX] = id;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: S1st XYZ:% 3d,% 3d,% 3d ID:% 2d\n",
+ __func__, xy_data.x1, xy_data.y1, xy_data.z1,
+ ((xy_data.touch12_id >> 4) & 0x0F));)
+
+ break;
+ case 0:
+ default:
+ break;
+ }
+
+ if (ts->platform_data->use_st)
+ handle_single_touch(&xy_data, &trc, ts);
+
+ if (ts->platform_data->use_mt)
+ handle_multi_touch(&trc, ts);
+
+ /* handle gestures */
+ if (ts->platform_data->use_gestures && xy_data.gest_id) {
+ input_report_key(ts->input, BTN_3, CY_TCH);
+ input_report_abs(ts->input, ABS_HAT1X, xy_data.gest_id);
+ input_report_abs(ts->input, ABS_HAT1Y, xy_data.gest_cnt);
+ }
+
+ /* signal the view motion event */
+ input_sync(ts->input);
+
+ /* update platform data for the current multi-touch information */
+ memcpy(ts->act_trk, trc.cur_trk, CY_NUM_TRK_ID);
+
+exit_xy_worker:
+ /* reset the watchdog */
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+ DBG(printk(KERN_INFO"%s: finished.\n", __func__);)
+ return;
+}
+
+static irqreturn_t cyttsp_irq(int irq, void *handle)
+{
+ struct cyttsp *ts = (struct cyttsp *)handle;
+
+ DBG(printk(KERN_INFO"%s: Got IRQ!\n", __func__);)
+ switch (ts->platform_data->power_state) {
+ case CY_BL_STATE:
+ case CY_SYSINFO_STATE:
+ complete(&ts->int_running);
+ break;
+ case CY_LDR_STATE:
+ ts->bl_ready = true;
+ break;
+ case CY_ACTIVE_STATE:
+ cyttsp_xy_worker(ts);
+ break;
+ case CY_READY_STATE:
+ default:
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Timer function used as watchdog */
+static void cyttsp_timer(unsigned long handle)
+{
+ struct cyttsp *ts = (struct cyttsp *)handle;
+
+ DBG(printk(KERN_INFO"%s: Watchdog timer event\n", __func__);)
+ if (!work_pending(&ts->work))
+ schedule_work(&ts->work);
+ return;
+}
+/*
+ ************************************************************************
+ * Probe initialization functions
+ ************************************************************************
+ */
+static int cyttsp_load_bl_regs(struct cyttsp *ts)
+{
+ int retval;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ memset(&ts->bl_data, 0, sizeof(ts->bl_data));
+
+ retval = ttsp_read_block_data(ts, CY_REG_BASE,
+ sizeof(ts->bl_data), &(ts->bl_data));
+
+ if (retval < 0) {
+ DBG2(printk(KERN_INFO "%s: Failed reading block data, err:%d\n",
+ __func__, retval);)
+ goto fail;
+ }
+
+ DBG({
+ int i;
+ u8 *bl_data = (u8 *)&(ts->bl_data);
+ for (i = 0; i < sizeof(struct cyttsp_bootloader_data); i++)
+ printk(KERN_INFO "%s bl_data[%d]=0x%x\n",
+ __func__, i, bl_data[i]);
+ })
+ DBG2(printk(KERN_INFO "%s: bl f=%02X s=%02X e=%02X\n",
+ __func__,
+ ts->bl_data.bl_file,
+ ts->bl_data.bl_status,
+ ts->bl_data.bl_error);)
+
+ return 0;
+fail:
+ return retval;
+}
+
+static bool cyttsp_bl_app_valid(struct cyttsp *ts)
+{
+ int retval;
+
+ ts->bl_data.bl_status = 0x00;
+
+ retval = cyttsp_load_bl_regs(ts);
+
+ if (retval < 0)
+ return false;
+
+ if (ts->bl_data.bl_status == 0x11) {
+ printk(KERN_INFO "%s: App found; normal boot\n", __func__);
+ return true;
+ } else if (ts->bl_data.bl_status == 0x10) {
+ printk(KERN_INFO "%s: NO APP; load firmware!!\n", __func__);
+ return false;
+ } else {
+ if (ts->bl_data.bl_file == CY_OPERATE_MODE) {
+ int invalid_op_mode_status;
+ invalid_op_mode_status = ts->bl_data.bl_status & 0x3f;
+ if (invalid_op_mode_status)
+ return false;
+ else {
+ if (ts->platform_data->power_state ==
+ CY_ACTIVE_STATE)
+ printk(KERN_INFO "%s: Operational\n",
+ __func__);
+ else
+ printk(KERN_INFO "%s: No bootloader\n",
+ __func__);
+ }
+ }
+ return true;
+ }
+}
+
+static int cyttsp_exit_bl_mode(struct cyttsp *ts)
+{
+ int retval;
+ int tries = 0;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ /* check if in bootloader mode;
+ * if in operational mode then just return without fail
+ */
+ cyttsp_load_bl_regs(ts);
+ if (!GET_BOOTLOADERMODE(ts->bl_data.bl_status))
+ return 0;
+
+ retval = ttsp_write_block_data(ts, CY_REG_BASE, sizeof(bl_cmd),
+ (void *)bl_cmd);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Failed writing block data, err:%d\n",
+ __func__, retval);
+ goto fail;
+ }
+ do {
+ msleep(20);
+ cyttsp_load_bl_regs(ts);
+ } while (GET_BOOTLOADERMODE(ts->bl_data.bl_status) && tries++ < 10);
+
+ DBG2(printk(KERN_INFO "%s: read tries=%d\n", __func__, tries);)
+
+ DBG(printk(KERN_INFO"%s: Exit "
+ "f=%02X s=%02X e=%02X\n",
+ __func__,
+ ts->bl_data.bl_file, ts->bl_data.bl_status,
+ ts->bl_data.bl_error);)
+
+ return 0;
+fail:
+ return retval;
+}
+
+static int cyttsp_set_sysinfo_mode(struct cyttsp *ts)
+{
+ int retval;
+ u8 cmd = CY_SYSINFO_MODE;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ memset(&(ts->sysinfo_data), 0, sizeof(struct cyttsp_sysinfo_data));
+
+ /* switch to sysinfo mode */
+ retval = ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Failed writing block data, err:%d\n",
+ __func__, retval);
+ goto exit_sysinfo_mode;
+ }
+
+ if (!(retval < 0)) {
+ /* wait for interrupt to set ready completion */
+ ts->platform_data->power_state = CY_SYSINFO_STATE;
+ INIT_COMPLETION(ts->int_running);
+ retval = wait_for_completion_interruptible_timeout(
+ &ts->int_running,
+ msecs_to_jiffies(CY_DELAY_DFLT * CY_DELAY_MAX));
+ ts->platform_data->power_state = CY_READY_STATE;
+ }
+
+ if (retval < 0) {
+ ts->platform_data->power_state = CY_IDLE_STATE;
+ printk(KERN_ERR "%s: reset timeout fail (ret=%d)\n",
+ __func__, retval);
+ }
+
+exit_sysinfo_mode:
+ /* read sysinfo registers */
+ retval = ttsp_read_block_data(ts, CY_REG_BASE,
+ sizeof(ts->sysinfo_data), &(ts->sysinfo_data));
+
+ DBG(printk(KERN_INFO"%s:SI2: hst_mode=0x%02X mfg_cmd=0x%02X "
+ "mfg_stat=0x%02X\n", __func__, ts->sysinfo_data.hst_mode,
+ ts->sysinfo_data.mfg_cmd,
+ ts->sysinfo_data.mfg_stat);)
+
+ DBG(printk(KERN_INFO"%s:SI2: bl_ver=0x%02X%02X\n",
+ __func__, ts->sysinfo_data.bl_verh, ts->sysinfo_data.bl_verl);)
+
+ DBG(printk(KERN_INFO"%s:SI2: sysinfo act_intrvl=0x%02X "
+ "tch_tmout=0x%02X lp_intrvl=0x%02X\n",
+ __func__, ts->sysinfo_data.act_intrvl,
+ ts->sysinfo_data.tch_tmout,
+ ts->sysinfo_data.lp_intrvl);)
+
+ printk(KERN_INFO"%s:SI2:tts_ver=%02X%02X app_id=%02X%02X "
+ "app_ver=%02X%02X c_id=%02X%02X%02X "
+ "u_id=%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ __func__,
+ ts->sysinfo_data.tts_verh, ts->sysinfo_data.tts_verl,
+ ts->sysinfo_data.app_idh, ts->sysinfo_data.app_idl,
+ ts->sysinfo_data.app_verh, ts->sysinfo_data.app_verl,
+ ts->sysinfo_data.cid[0], ts->sysinfo_data.cid[1],
+ ts->sysinfo_data.cid[2],
+ ts->sysinfo_data.uid[0], ts->sysinfo_data.uid[1],
+ ts->sysinfo_data.uid[2], ts->sysinfo_data.uid[3],
+ ts->sysinfo_data.uid[4], ts->sysinfo_data.uid[5],
+ ts->sysinfo_data.uid[6], ts->sysinfo_data.uid[7]);
+
+ return retval;
+}
+
+static int cyttsp_set_sysinfo_regs(struct cyttsp *ts)
+{
+ int retval = 0;
+
+ if (ts->platform_data->scn_typ != CY_SCN_TYP_DFLT) {
+ u8 scn_typ = ts->platform_data->scn_typ;
+
+ retval = ttsp_write_block_data(ts,
+ CY_REG_SCN_TYP,
+ sizeof(scn_typ), &scn_typ);
+ }
+
+ if (retval < 0)
+ return retval;
+
+ if ((ts->platform_data->act_intrvl != CY_ACT_INTRVL_DFLT) ||
+ (ts->platform_data->tch_tmout != CY_TCH_TMOUT_DFLT) ||
+ (ts->platform_data->lp_intrvl != CY_LP_INTRVL_DFLT)) {
+
+ u8 intrvl_ray[3];
+
+ intrvl_ray[0] = ts->platform_data->act_intrvl;
+ intrvl_ray[1] = ts->platform_data->tch_tmout;
+ intrvl_ray[2] = ts->platform_data->lp_intrvl;
+
+ /* set intrvl registers */
+ retval = ttsp_write_block_data(ts,
+ CY_REG_ACT_INTRVL,
+ sizeof(intrvl_ray), intrvl_ray);
+
+ msleep(CY_DELAY_SYSINFO);
+ }
+
+ return retval;
+}
+
+static int cyttsp_set_operational_mode(struct cyttsp *ts)
+{
+ int retval;
+ int tries;
+ u8 cmd = CY_OPERATE_MODE;
+ u8 gest_default;
+ struct cyttsp_xydata xy_data;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ retval = ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Failed writing block data, err:%d\n",
+ __func__, retval);
+ goto write_block_data_fail;
+ }
+
+ /* wait for TTSP Device to complete switch to Operational mode */
+ msleep(20);
+
+ tries = 0;
+ gest_default =
+ CY_GEST_GRP1 + CY_GEST_GRP2 +
+ CY_GEST_GRP3 + CY_GEST_GRP4 +
+ CY_ACT_DIST_DFLT;
+ tries = 0;
+ do {
+ msleep(CY_DELAY_DFLT);
+ memset(&xy_data, 0, sizeof(xy_data));
+ retval = ttsp_read_block_data(ts, CY_REG_BASE,
+ sizeof(struct cyttsp_xydata), &xy_data);
+ } while (!((retval == 0) &&
+ ((xy_data.gest_set & CY_ACT_DIST_BITS) ==
+ (CY_ACT_DIST_DFLT & CY_ACT_DIST_BITS))) &&
+ (tries++ < CY_DELAY_MAX));
+
+ DBG2(printk(KERN_INFO "%s: read tries=%d\n", __func__, tries);)
+
+ return 0;
+write_block_data_fail:
+ return retval;
+}
+
+static int cyttsp_soft_reset(struct cyttsp *ts)
+{
+ int retval;
+ u8 cmd = CY_SOFT_RESET_MODE;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ /* reset TTSP Device back to bootloader mode */
+ ts->platform_data->power_state = CY_BL_STATE;
+ retval = ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
+
+ if (!(retval < 0)) {
+ /* wait for interrupt to set ready completion */
+ INIT_COMPLETION(ts->int_running);
+ retval = wait_for_completion_interruptible_timeout(
+ &ts->int_running,
+ msecs_to_jiffies(CY_DELAY_DFLT * CY_DELAY_MAX));
+ }
+
+ if (retval < 0) {
+ ts->platform_data->power_state = CY_IDLE_STATE;
+ printk(KERN_ERR "%s: reset timeout fail (ret=%d)\n",
+ __func__, retval);
+ }
+
+ if (retval > 0)
+ retval = 0;
+
+ return retval;
+}
+
+static int cyttsp_gesture_setup(struct cyttsp *ts)
+{
+ int retval;
+ u8 gesture_setup;
+
+ DBG(printk(KERN_INFO"%s: Init gesture; active distance setup\n",
+ __func__);)
+
+ gesture_setup = ts->platform_data->gest_set;
+ retval = ttsp_write_block_data(ts, CY_REG_GEST_SET,
+ sizeof(gesture_setup), &gesture_setup);
+
+ return retval;
+}
+
+#ifdef CY_INCLUDE_LOAD_RECS
+#include "cyttsp_ldr.h"
+#else
+static bool cyttsp_bl_status(struct cyttsp *ts)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ return ((ts->bl_data.bl_status == 0x10) ||
+ (ts->bl_data.bl_status == 0x11));
+}
+
+static int cyttsp_loader(struct cyttsp *ts)
+{
+ void *fptr = cyttsp_bl_status; /* kill warning */
+
+ if (ts) {
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ DBG(printk(KERN_INFO"%s: Exit\n", __func__);)
+ }
+
+ if (!fptr)
+ return -EIO;
+ else
+ return 0;
+}
+#endif
+
+static int cyttsp_power_on(struct cyttsp *ts)
+{
+ int retval = 0;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ /* (re-)start watchdog */
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+
+ cyttsp_init_tch(ts);
+ retval = cyttsp_soft_reset(ts);
+ DBG2(printk(KERN_INFO"%s: after softreset r=%d\n", __func__, retval);)
+ if (retval < 0)
+ goto bypass;
+
+ if (ts->platform_data->use_load_file)
+ retval = cyttsp_loader(ts);
+
+ if (!cyttsp_bl_app_valid(ts)) {
+ retval = 1;
+ goto bypass;
+ }
+
+ retval = cyttsp_exit_bl_mode(ts);
+ DBG2(printk(KERN_INFO"%s: after exit bl r=%d\n", __func__, retval);)
+
+ if (retval < 0)
+ goto bypass;
+
+ /* switch to System Information mode to read */
+ /* versions and set interval registers */
+ ts->platform_data->power_state = CY_READY_STATE;
+ retval = cyttsp_set_sysinfo_mode(ts);
+ if (retval < 0)
+ goto bypass;
+
+ retval = cyttsp_set_sysinfo_regs(ts);
+ if (retval < 0)
+ goto bypass;
+
+ /* switch back to Operational mode */
+ DBG2(printk(KERN_INFO"%s: switch back to operational mode\n",
+ __func__);)
+ retval = cyttsp_set_operational_mode(ts);
+ if (retval < 0)
+ goto bypass;
+
+ /* init gesture setup; required for active distance */
+ cyttsp_gesture_setup(ts);
+
+bypass:
+ if (retval)
+ ts->platform_data->power_state = CY_IDLE_STATE;
+ else
+ ts->platform_data->power_state = CY_ACTIVE_STATE;
+
+ printk(KERN_INFO"%s: Power state is %s\n",
+ __func__, (ts->platform_data->power_state == CY_ACTIVE_STATE) ?
+ "ACTIVE" : "IDLE");
+ return retval;
+}
+
+static void cyttsp_check_bl(struct work_struct *work)
+{
+ struct cyttsp *ts = container_of(work, struct cyttsp, work);
+ s32 retval;
+ struct cyttsp_xydata xy_data;
+
+ retval = ttsp_read_block_data(ts, CY_REG_BASE, 2, &xy_data);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Error, fail to read device\n", __func__);
+ goto reserve_next;
+ }
+
+ if (CY_MODE_ERROR(ts->platform_data->power_state,
+ xy_data.hst_mode, xy_data.tt_mode)) {
+ printk(KERN_ERR "%s: Error, mode error detected "
+ "on watchdog timeout ps=%d mode=%02X bl_mode=%02X\n",
+ __func__, ts->platform_data->power_state,
+ xy_data.hst_mode, xy_data.tt_mode);
+ retval = cyttsp_power_on(ts);
+ if (retval < 0)
+ printk(KERN_ERR "%s: Error, power on fail\n", __func__);
+ }
+
+reserve_next:
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+}
+
+static int cyttsp_resume(struct cyttsp *ts)
+{
+ int retval = 0;
+ struct cyttsp_xydata xydata;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ if (ts->platform_data->use_sleep && (ts->platform_data->power_state !=
+ CY_ACTIVE_STATE)) {
+ if (ts->platform_data->wakeup) {
+ retval = ts->platform_data->wakeup();
+ if (retval < 0)
+ printk(KERN_ERR "%s: Error, wakeup failed!\n",
+ __func__);
+ } else {
+ printk(KERN_ERR "%s: Error, wakeup not implemented "
+ "(check board file).\n", __func__);
+ retval = -ENOSYS;
+ }
+ if (!(retval < 0)) {
+ retval = ttsp_read_block_data(ts, CY_REG_BASE,
+ sizeof(xydata), &xydata);
+ if (!(retval < 0) && !GET_HSTMODE(xydata.hst_mode))
+ ts->platform_data->power_state =
+ CY_ACTIVE_STATE;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: Wake Up %s\n", __func__,
+ (retval < 0) ? "FAIL" : "PASS");)
+ return retval;
+}
+
+static int cyttsp_suspend(struct cyttsp *ts)
+{
+ u8 sleep_mode = 0;
+ int retval = 0;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ if (ts->platform_data->use_sleep &&
+ (ts->platform_data->power_state == CY_ACTIVE_STATE)) {
+ sleep_mode = CY_DEEP_SLEEP_MODE;
+ retval = ttsp_write_block_data(ts,
+ CY_REG_BASE, sizeof(sleep_mode), &sleep_mode);
+ if (!(retval < 0))
+ ts->platform_data->power_state = CY_SLEEP_STATE;
+ msleep(CY_MODE_CHANGE_DELAY);
+ }
+ DBG(printk(KERN_INFO"%s: Sleep Power state is %s\n", __func__,
+ (ts->platform_data->power_state == CY_ACTIVE_STATE) ?
+ "ACTIVE" :
+ ((ts->platform_data->power_state == CY_SLEEP_STATE) ?
+ "SLEEP" : "LOW POWER"));)
+ return retval;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void cyttsp_ts_early_suspend(struct early_suspend *h)
+{
+ struct cyttsp *ts = container_of(h, struct cyttsp, early_suspend);
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ LOCK(ts->mutex);
+ if (!ts->fw_loader_mode) {
+ disable_irq_nosync(ts->irq);
+ ts->suspended = 1;
+ /* kill watchdog */
+ del_timer(&ts->timer);
+ cancel_work_sync(&ts->work);
+ cyttsp_suspend(ts);
+ }
+ regulator_disable(ts->regulator);
+ UNLOCK(ts->mutex);
+}
+
+static void cyttsp_ts_late_resume(struct early_suspend *h)
+{
+ struct cyttsp *ts = container_of(h, struct cyttsp, early_suspend);
+
+ regulator_enable(ts->regulator);
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ LOCK(ts->mutex);
+ if (!ts->fw_loader_mode && ts->suspended) {
+ ts->suspended = 0;
+ if (cyttsp_resume(ts) < 0)
+ printk(KERN_ERR "%s: Error, cyttsp_resume.\n",
+ __func__);
+ enable_irq(ts->irq);
+ /* resume watchdog */
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+ }
+ UNLOCK(ts->mutex);
+}
+#endif
+
+static int cyttsp_wr_reg(struct cyttsp *ts, u8 reg_id, u8 reg_data)
+{
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ return ttsp_write_block_data(ts,
+ CY_REG_BASE + reg_id, sizeof(u8), &reg_data);
+}
+
+static int cyttsp_rd_reg(struct cyttsp *ts, u8 reg_id, u8 *reg_data)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ return ttsp_read_block_data(ts,
+ CY_REG_BASE + reg_id, sizeof(u8), reg_data);
+}
+
+static ssize_t firmware_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct cyttsp *ts = dev_get_drvdata(dev);
+ LOCK(ts->mutex);
+ if (!ts->fw_loader_mode) {
+ unsigned short val = *(unsigned short *)buf;
+ u8 reg_data = val & 0xff;
+ ts->reg_id = (val & 0xff00) >> 8;
+ if (!(ts->reg_id & 0x80)) {
+ /* write user specified operational register */
+ if (ts->reg_id == 0x00) {
+ switch (GET_HSTMODE(reg_data)) {
+ case GET_HSTMODE(CY_OPERATE_MODE):
+ cyttsp_wr_reg(ts, ts->reg_id, reg_data);
+ ts->platform_data->power_state =
+ CY_ACTIVE_STATE;
+ enable_irq(ts->irq);
+ mod_timer(&ts->timer, CY_WDG_TIMEOUT);
+ printk(KERN_INFO "%s: "
+ "Switch to Operational Mode "
+ "ps=%d\n", __func__,
+ ts->platform_data->power_state);
+ break;
+ case GET_HSTMODE(CY_SYSINFO_MODE):
+ ts->platform_data->power_state =
+ CY_READY_STATE;
+ disable_irq_nosync(ts->irq);
+ /* kill watchdog */
+ del_timer(&ts->timer);
+ cancel_work_sync(&ts->work);
+ cyttsp_wr_reg(ts, ts->reg_id, reg_data);
+ printk(KERN_INFO "%s: "
+ "Switch to SysInfo Mode "
+ "ps=%d\n", __func__,
+ ts->platform_data->power_state);
+ break;
+ default:
+ cyttsp_wr_reg(ts, ts->reg_id, reg_data);
+ break;
+ }
+ } else
+ cyttsp_wr_reg(ts, ts->reg_id, reg_data);
+ printk(KERN_INFO "%s: "
+ "write(reg=%02X(%d) dat=0x%02X(%d))\n",
+ __func__,
+ ts->reg_id & ~0x80, ts->reg_id & ~0x80,
+ reg_data, reg_data);
+ } else {
+ /* save user specified operational read register */
+ DBG2(printk(KERN_INFO "%s: read(r=0x%02X)\n",
+ __func__, ts->reg_id);)
+ }
+ } else {
+ int retval = 0;
+ int tries = 0;
+ DBG({
+ char str[128];
+ char *p = str;
+ int i;
+ for (i = 0; i < size; i++, p += 2)
+ sprintf(p, "%02x", (unsigned char)buf[i]);
+ printk(KERN_DEBUG "%s: size %d, pos %ld payload %s\n",
+ __func__, size, (long)pos, str);
+ })
+ do {
+ retval = ttsp_write_block_data(ts,
+ CY_REG_BASE, size, buf);
+ if (retval < 0)
+ msleep(500);
+ } while ((retval < 0) && (tries++ < 10));
+ }
+ UNLOCK(ts->mutex);
+ return size;
+}
+
+static ssize_t firmware_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *ba,
+ char *buf, loff_t pos, size_t size)
+{
+ int count = 0;
+ u8 reg_data;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct cyttsp *ts = dev_get_drvdata(dev);
+
+ DBG2(printk(KERN_INFO"%s: Enter (mode=%d)\n",
+ __func__, ts->fw_loader_mode);)
+
+ LOCK(ts->mutex);
+ if (!ts->fw_loader_mode) {
+ /* read user specified operational register */
+ cyttsp_rd_reg(ts, ts->reg_id & ~0x80, &reg_data);
+ *(unsigned short *)buf = reg_data << 8;
+ count = sizeof(unsigned short);
+ printk(KERN_INFO "%s: read(reg=%02X(%d) dat=0x%02X(%d))\n",
+ __func__, ts->reg_id & ~0x80, ts->reg_id & ~0x80,
+ reg_data, reg_data);
+ } else {
+ int retval = 0;
+ int tries = 0;
+
+ do {
+ retval = cyttsp_load_bl_regs(ts);
+ if (retval < 0)
+ msleep(500);
+ } while ((retval < 0) && (tries++ < 10));
+
+ if (retval < 0) {
+ printk(KERN_ERR "%s: error reading status\n", __func__);
+ count = 0;
+ } else {
+ *(unsigned short *)buf = ts->bl_data.bl_status << 8 |
+ ts->bl_data.bl_error;
+ count = sizeof(unsigned short);
+ }
+
+ DBG2(printk(KERN_INFO
+ "%s:bl_f=0x%02X bl_s=0x%02X bl_e=0x%02X\n",
+ __func__,
+ ts->bl_data.bl_file,
+ ts->bl_data.bl_status,
+ ts->bl_data.bl_error);)
+ }
+ UNLOCK(ts->mutex);
+ return count;
+}
+
+static struct bin_attribute cyttsp_firmware = {
+ .attr = {
+ .name = "firmware",
+ .mode = 0644,
+ },
+ .size = 128,
+ .read = firmware_read,
+ .write = firmware_write,
+};
+
+static ssize_t attr_fwloader_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cyttsp *ts = dev_get_drvdata(dev);
+ return sprintf(buf, "0x%02X%02X 0x%02X%02X 0x%02X%02X 0x%02X%02X%02X\n",
+ ts->sysinfo_data.tts_verh, ts->sysinfo_data.tts_verl,
+ ts->sysinfo_data.app_idh, ts->sysinfo_data.app_idl,
+ ts->sysinfo_data.app_verh, ts->sysinfo_data.app_verl,
+ ts->sysinfo_data.cid[0], ts->sysinfo_data.cid[1],
+ ts->sysinfo_data.cid[2]);
+}
+
+static ssize_t attr_fwloader_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ char *p;
+ int ret;
+ struct cyttsp *ts = dev_get_drvdata(dev);
+ unsigned val = simple_strtoul(buf, &p, 10);
+
+ ret = p - buf;
+ if (*p && isspace(*p))
+ ret++;
+ DBG2(printk(KERN_DEBUG "%s: %u\n", __func__, val);)
+
+ LOCK(ts->mutex)
+ if (val == 3) {
+ sysfs_remove_bin_file(&dev->kobj, &cyttsp_firmware);
+ DBG2(printk(KERN_INFO "%s: FW loader closed for reg r/w\n",
+ __func__);)
+ } else if (val == 2) {
+ if (sysfs_create_bin_file(&dev->kobj, &cyttsp_firmware))
+ printk(KERN_ERR "%s: unable to create file\n",
+ __func__);
+ DBG2(printk(KERN_INFO "%s: FW loader opened for reg r/w\n",
+ __func__);)
+ if (ts->suspended) {
+ cyttsp_resume(ts);
+ ts->suspended = 0;
+ enable_irq(ts->irq);
+ /* resume watchdog */
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+ }
+ } else if ((val == 1) && !ts->fw_loader_mode) {
+ ts->fw_loader_mode = 1;
+ if (ts->suspended) {
+ cyttsp_resume(ts);
+ ts->suspended = 0;
+ enable_irq(ts->irq);
+ /* resume watchdog */
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+ }
+ disable_irq_nosync(ts->irq);
+ /* kill watchdog */
+ del_timer(&ts->timer);
+ cancel_work_sync(&ts->work);
+ if (sysfs_create_bin_file(&dev->kobj, &cyttsp_firmware))
+ printk(KERN_ERR "%s: unable to create file\n",
+ __func__);
+ DBG2(printk(KERN_INFO
+ "%s: FW loader opened for start load: ps=%d mode=%d\n",
+ __func__,
+ ts->platform_data->power_state, ts->fw_loader_mode);)
+ cyttsp_soft_reset(ts);
+ printk(KERN_INFO "%s: FW loader started.\n", __func__);
+ ts->platform_data->power_state = CY_LDR_STATE;
+ } else if (!val && ts->fw_loader_mode) {
+ sysfs_remove_bin_file(&dev->kobj, &cyttsp_firmware);
+ ts->fw_loader_mode = 0;
+ printk(KERN_INFO "%s: FW loader finished.\n", __func__);
+ enable_irq(ts->irq);
+ ret = cyttsp_power_on(ts);
+ if (ret < 0)
+ printk(KERN_ERR "%s: Error, power on fail\n", __func__);
+ /* resume watchdog */
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+ }
+ UNLOCK(ts->mutex);
+ return ret == size ? ret : -EINVAL;
+}
+
+int cyttsp_reset_controller(struct cyttsp *ts)
+{
+ int ret;
+
+ ret = gpio_request(ts->platform_data->rst_gpio, "reset_pin");
+ if (ret) {
+ printk(KERN_ERR "touch gpio failed\n");
+ return ret;
+ }
+ ret = gpio_direction_output(ts->platform_data->rst_gpio, 1);
+ if (ret < 0) {
+ printk(KERN_ERR "reset gpio direction failed\n");
+ goto out;
+ }
+ /*
+ * The start up procedure
+ * Set the RESET pin to low
+ * Wait for a period of 1 milisecond
+ * Set the RESET pin to high
+ * Wait for a period of 5 milisecond
+ * Start the initial Sequence
+ */
+ gpio_set_value(ts->platform_data->rst_gpio, 0);
+ mdelay(1);
+ gpio_set_value(ts->platform_data->rst_gpio, 1);
+ mdelay(5);
+out:
+ gpio_free(ts->platform_data->rst_gpio);
+ return 0;
+}
+
+static struct device_attribute fwloader =
+ __ATTR(fwloader, 0644, attr_fwloader_show, attr_fwloader_store);
+
+void *cyttsp_core_init(struct cyttsp_bus_ops *bus_ops, struct device *pdev)
+{
+ struct input_dev *input_device;
+ struct cyttsp *ts;
+ int retval = 0, ret = 0;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (ts == NULL) {
+ printk(KERN_ERR "%s: Error, kzalloc\n", __func__);
+ goto error_alloc_data_failed;
+ }
+ mutex_init(&ts->mutex);
+ ts->pdev = pdev;
+ ts->platform_data = pdev->platform_data;
+ ts->bus_ops = bus_ops;
+ init_completion(&ts->int_running);
+
+ if (ts->platform_data->init)
+ retval = ts->platform_data->init(1);
+ if (retval) {
+ printk(KERN_ERR "%s: platform init failed!\n", __func__);
+ goto error_init;
+ }
+ ret = cyttsp_reset_controller(ts);
+ if (ret < 0) {
+ printk(KERN_ERR "controller reset failed\n");
+ goto error_reset;
+ }
+
+ /* Create the input device and register it. */
+ input_device = input_allocate_device();
+ if (!input_device) {
+ retval = -ENOMEM;
+ printk(KERN_ERR "%s: Error, failed to allocate input device\n",
+ __func__);
+ goto error_input_allocate_device;
+ }
+
+ ts->input = input_device;
+ input_device->name = ts->platform_data->name;
+ input_device->phys = ts->phys;
+ input_device->dev.parent = ts->pdev;
+
+ /* enable interrupts */
+ ts->irq = gpio_to_irq(ts->platform_data->irq_gpio);
+ retval = request_threaded_irq(ts->irq, NULL, cyttsp_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ ts->input->name, ts);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: IRQ request failed r=%d\n",
+ __func__, retval);
+ ts->platform_data->power_state = CY_INVALID_STATE;
+ goto error_set_irq;
+ }
+ /* setup watchdog */
+ INIT_WORK(&ts->work, cyttsp_check_bl);
+ setup_timer(&ts->timer, cyttsp_timer, (unsigned long) ts);
+
+ DBG(printk(KERN_INFO "%s: call power_on\n", __func__);)
+ retval = cyttsp_power_on(ts);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Error, power on failed!\n", __func__);
+ goto error_power_on;
+ }
+
+ set_bit(EV_SYN, input_device->evbit);
+ set_bit(EV_KEY, input_device->evbit);
+ set_bit(EV_ABS, input_device->evbit);
+ set_bit(BTN_TOUCH, input_device->keybit);
+ set_bit(BTN_2, input_device->keybit);
+ if (ts->platform_data->use_gestures)
+ set_bit(BTN_3, input_device->keybit);
+
+ input_set_abs_params(input_device, ABS_X, 0, ts->platform_data->maxx,
+ 0, 0);
+ input_set_abs_params(input_device, ABS_Y, 0, ts->platform_data->maxy,
+ 0, 0);
+ input_set_abs_params(input_device, ABS_TOOL_WIDTH, 0,
+ CY_LARGE_TOOL_WIDTH, 0, 0);
+ input_set_abs_params(input_device, ABS_PRESSURE, 0, CY_MAXZ, 0, 0);
+ input_set_abs_params(input_device, ABS_HAT0X, 0,
+ ts->platform_data->maxx, 0, 0);
+ input_set_abs_params(input_device, ABS_HAT0Y, 0,
+ ts->platform_data->maxy, 0, 0);
+ if (ts->platform_data->use_gestures) {
+ input_set_abs_params(input_device, ABS_HAT1X, 0, CY_MAXZ,
+ 0, 0);
+ input_set_abs_params(input_device, ABS_HAT1Y, 0, CY_MAXZ,
+ 0, 0);
+ }
+ if (ts->platform_data->use_mt) {
+ input_set_abs_params(input_device, ABS_MT_POSITION_X, 0,
+ ts->platform_data->maxx, 0, 0);
+ input_set_abs_params(input_device, ABS_MT_POSITION_Y, 0,
+ ts->platform_data->maxy, 0, 0);
+ input_set_abs_params(input_device, ABS_MT_TOUCH_MAJOR, 0,
+ CY_MAXZ, 0, 0);
+ input_set_abs_params(input_device, ABS_MT_WIDTH_MAJOR, 0,
+ CY_LARGE_TOOL_WIDTH, 0, 0);
+ if (ts->platform_data->use_trk_id)
+ input_set_abs_params(input_device, ABS_MT_TRACKING_ID,
+ 0, CY_NUM_TRK_ID, 0, 0);
+ }
+
+ if (ts->platform_data->use_virtual_keys)
+ input_set_capability(input_device, EV_KEY, KEY_PROG1);
+
+ retval = input_register_device(input_device);
+ if (retval) {
+ printk(KERN_ERR "%s: Error, failed to register input device\n",
+ __func__);
+ goto error_input_register_device;
+ }
+ DBG(printk(KERN_INFO "%s: Registered input device %s\n",
+ __func__, input_device->name);)
+ ts->regulator = regulator_get(ts->pdev, "vcpin");
+ if (IS_ERR(ts->regulator)) {
+ printk(KERN_ERR "%s: Error, regulator_get failed\n", __func__);
+ ts->regulator = NULL;
+ goto error_input_register_device;
+ } else {
+ ret = regulator_enable(ts->regulator);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: regulator enable failed\n",
+ __func__);
+ goto out_regulator_error;
+ }
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ts->early_suspend.suspend = cyttsp_ts_early_suspend;
+ ts->early_suspend.resume = cyttsp_ts_late_resume;
+ register_early_suspend(&ts->early_suspend);
+#endif
+ retval = device_create_file(pdev, &fwloader);
+ if (retval) {
+ printk(KERN_ERR "%s: Error, could not create attribute\n",
+ __func__);
+ goto device_create_error;
+ }
+ dev_set_drvdata(pdev, ts);
+
+ return ts;
+
+device_create_error:
+ regulator_disable(ts->regulator);
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&ts->early_suspend);
+#endif
+out_regulator_error:
+ regulator_put(ts->regulator);
+error_input_register_device:
+ input_unregister_device(input_device);
+error_power_on:
+ cancel_work_sync(&ts->work);
+ del_timer_sync(&ts->timer);
+ if (ts->irq >= 0)
+ free_irq(ts->irq, ts);
+error_set_irq:
+ input_free_device(input_device);
+error_reset:
+error_input_allocate_device:
+ if (ts->platform_data->init)
+ ts->platform_data->init(0);
+error_init:
+ kfree(ts);
+error_alloc_data_failed:
+ return NULL;
+}
+
+/* registered in driver struct */
+void cyttsp_core_release(void *handle)
+{
+ struct cyttsp *ts = handle;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&ts->early_suspend);
+#endif
+ del_timer_sync(&ts->timer);
+ cancel_work_sync(&ts->work);
+ free_irq(ts->irq, ts);
+ input_unregister_device(ts->input);
+ input_free_device(ts->input);
+ if (ts->platform_data->init)
+ ts->platform_data->init(0);
+ kfree(ts);
+}
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen driver core");
+MODULE_AUTHOR("Cypress");
+
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
new file mode 100755
index 00000000000..6af486177a0
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -0,0 +1,44 @@
+/* Header file for:
+ * Cypress TrueTouch(TM) Standard Product I2C touchscreen driver.
+ * drivers/input/touchscreen/cyttsp_core.h
+ *
+ * Copyright (C) 2009-2011 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ */
+
+
+#ifndef __CYTTSP_CORE_H__
+#define __CYTTSP_CORE_H__
+
+#include <linux/kernel.h>
+
+struct cyttsp_bus_ops {
+ s32 (*write)(void *handle, u8 addr, u8 length, const void *values);
+ s32 (*read)(void *handle, u8 addr, u8 length, void *values);
+ s32 (*ext)(void *handle, void *values);
+};
+
+void *cyttsp_core_init(struct cyttsp_bus_ops *bus_ops, struct device *pdev);
+void cyttsp_core_release(void *handle);
+
+#endif /* __CYTTSP_CORE_H__ */
diff --git a/drivers/input/touchscreen/cyttsp_ldr.h b/drivers/input/touchscreen/cyttsp_ldr.h
new file mode 100755
index 00000000000..95db89d0d13
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp_ldr.h
@@ -0,0 +1,333 @@
+/*
+ * Source for:
+ * Cypress TrueTouch(TM) Standard Product touchscreen driver.
+ *
+ * Copyright (C) 2009-2011 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ */
+/*
+ ************************************************************************
+ * Compiled image bootloader functions
+ ************************************************************************
+ */
+#include "cyttsp_fw.h"
+#define CY_BL_PAGE_SIZE 16
+#define CY_BL_NUM_PAGES 5
+#define CY_MAX_DATA_LEN (CY_BL_PAGE_SIZE * 2)
+
+/* Timeout timer */
+static int cyttsp_check_polling(struct cyttsp *ts)
+{
+ return ts->platform_data->use_timer;
+}
+
+static void cyttsp_to_timer(unsigned long handle)
+{
+ struct cyttsp *ts = (struct cyttsp *)handle;
+
+ DBG(printk(KERN_INFO"%s: TTSP timeout timer event!\n", __func__);)
+ ts->to_timeout = true;
+ return;
+}
+
+static void cyttsp_setup_to_timer(struct cyttsp *ts)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ setup_timer(&ts->to_timer, cyttsp_to_timer, (unsigned long) ts);
+}
+
+static void cyttsp_kill_to_timer(struct cyttsp *ts)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ del_timer(&ts->to_timer);
+}
+
+static void cyttsp_start_to_timer(struct cyttsp *ts, int ms)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ ts->to_timeout = false;
+ mod_timer(&ts->to_timer, jiffies + ms);
+}
+
+static bool cyttsp_timeout(struct cyttsp *ts)
+{
+ if (cyttsp_check_polling(ts))
+ return false;
+ else
+ return ts->to_timeout;
+}
+
+static void cyttsp_set_bl_ready(struct cyttsp *ts, bool set)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ ts->bl_ready = set;
+ DBG(printk(KERN_INFO"%s: bl_ready=%d\n", __func__, (int)ts->bl_ready);)
+}
+
+static bool cyttsp_check_bl_ready(struct cyttsp *ts)
+{
+ if (cyttsp_check_polling(ts))
+ return true;
+ else
+ return ts->bl_ready;
+}
+
+static bool cyttsp_bl_err_status(struct cyttsp *ts)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ return (((ts->bl_data.bl_status == 0x10) &&
+ (ts->bl_data.bl_error == 0x20)) ||
+ ((ts->bl_data.bl_status == 0x11) &&
+ (ts->bl_data.bl_error == 0x20)));
+}
+
+static bool cyttsp_wait_bl_ready(struct cyttsp *ts,
+ int pre_delay, int loop_delay, int max_try,
+ bool (*done)(struct cyttsp *ts))
+{
+ int tries;
+ bool rdy = false, tmo = false;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ DBG(printk(KERN_INFO"%s: pre-dly=%d loop-dly=%d, max-try=%d\n",
+ __func__, pre_delay, loop_delay, max_try);)
+
+ tries = 0;
+ ts->bl_data.bl_file = 0;
+ ts->bl_data.bl_status = 0;
+ ts->bl_data.bl_error = 0;
+ if (cyttsp_check_polling(ts)) {
+ msleep(pre_delay);
+ do {
+ msleep(abs(loop_delay));
+ cyttsp_load_bl_regs(ts);
+ } while (!done(ts) &&
+ tries++ < max_try);
+ DBG(printk(KERN_INFO"%s: polling mode tries=%d\n",
+ __func__, tries);)
+ } else {
+ cyttsp_start_to_timer(ts, abs(loop_delay) * max_try);
+ while (!rdy && !tmo) {
+ rdy = cyttsp_check_bl_ready(ts);
+ tmo = cyttsp_timeout(ts);
+ if (loop_delay < 0)
+ udelay(abs(loop_delay));
+ else
+ msleep(abs(loop_delay));
+ tries++;
+ }
+ DBG2(printk(KERN_INFO"%s: irq mode tries=%d rdy=%d tmo=%d\n",
+ __func__, tries, (int)rdy, (int)tmo);)
+ cyttsp_load_bl_regs(ts);
+ }
+
+ if (tries >= max_try || tmo)
+ return true; /* timeout */
+ else
+ return false;
+}
+
+static int cyttsp_wr_blk_chunks(struct cyttsp *ts, u8 cmd,
+ u8 length, const u8 *values)
+{
+ int retval = 0;
+ int block = 1;
+ bool timeout;
+
+ u8 dataray[CY_MAX_DATA_LEN];
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ /* first page already includes the bl page offset */
+ memcpy(dataray, values, CY_BL_PAGE_SIZE + 1);
+ cyttsp_set_bl_ready(ts, false);
+ retval = ttsp_write_block_data(ts, cmd, CY_BL_PAGE_SIZE + 1, dataray);
+ values += CY_BL_PAGE_SIZE + 1;
+ length -= CY_BL_PAGE_SIZE + 1;
+ if (retval)
+ return retval;
+
+ /* remaining blocks require bl page offset stuffing */
+ while (length && (block < CY_BL_NUM_PAGES) && !(retval < 0)) {
+ dataray[0] = CY_BL_PAGE_SIZE * block;
+ timeout = cyttsp_wait_bl_ready(ts,
+ 1, -100, 100, cyttsp_bl_err_status);
+ if (timeout)
+ return -EIO;
+ memcpy(&dataray[1], values, length >= CY_BL_PAGE_SIZE ?
+ CY_BL_PAGE_SIZE : length);
+ cyttsp_set_bl_ready(ts, false);
+ retval = ttsp_write_block_data(ts, cmd,
+ length >= CY_BL_PAGE_SIZE ?
+ CY_BL_PAGE_SIZE + 1 : length + 1, dataray);
+ values += CY_BL_PAGE_SIZE;
+ length = length >= CY_BL_PAGE_SIZE ?
+ length - CY_BL_PAGE_SIZE : 0;
+ block++;
+ }
+
+ return retval;
+}
+
+static int cyttsp_load_app(struct cyttsp *ts)
+{
+ int retval = 0;
+ int rec;
+ bool timeout;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ printk(KERN_INFO "%s: "
+ "load file - tver=0x%02X%02X a_id=0x%02X%02X aver=0x%02X%02X\n",
+ __func__,
+ cyttsp_fw_tts_verh, cyttsp_fw_tts_verl,
+ cyttsp_fw_app_idh, cyttsp_fw_app_idl,
+ cyttsp_fw_app_verh, cyttsp_fw_app_verl);
+
+ /* download new TTSP Application to the Bootloader */
+ rec = 0;
+
+ /* send bootload initiation command */
+ printk(KERN_INFO"%s: Send BL Enter\n", __func__);
+ cyttsp_set_bl_ready(ts, false);
+ retval = ttsp_write_block_data(ts, CY_REG_BASE,
+ cyttsp_fw[rec].Length, cyttsp_fw[rec].Block);
+ rec++;
+ if (retval)
+ return retval;
+ timeout = cyttsp_wait_bl_ready(ts, 1, 100, 100, cyttsp_bl_err_status);
+ DBG(printk(KERN_INFO "%s: BL ENTER f=%02X s=%02X e=%02X t=%d\n",
+ __func__,
+ ts->bl_data.bl_file, ts->bl_data.bl_status,
+ ts->bl_data.bl_error, timeout);)
+ if (timeout)
+ goto loader_exit;
+
+ /* send bootload firmware load blocks */
+ printk(KERN_INFO"%s: Send BL Blocks\n", __func__);
+ while (cyttsp_fw[rec].Command == CY_BL_WRITE_BLK) {
+ DBG2(printk(KERN_INFO "%s:"
+ "BL DNLD Rec=% 3d Len=% 3d Addr=%04X\n",
+ __func__,
+ cyttsp_fw[rec].Record, cyttsp_fw[rec].Length,
+ cyttsp_fw[rec].Address);
+ )
+ retval = cyttsp_wr_blk_chunks(ts, CY_REG_BASE,
+ cyttsp_fw[rec].Length, cyttsp_fw[rec].Block);
+ if (retval < 0) {
+ DBG(printk(KERN_INFO "%s:"
+ "BL fail Rec=%3d retval=%d\n",
+ __func__,
+ cyttsp_fw[rec].Record, retval);
+ )
+ break;
+ } else {
+ cyttsp_wait_bl_ready(ts, 10, 1, 1000,
+ cyttsp_bl_err_status);
+ DBG(printk(KERN_INFO "%s: BL _LOAD "
+ "f=%02X s=%02X e=%02X\n",
+ __func__,
+ ts->bl_data.bl_file, ts->bl_data.bl_status,
+ ts->bl_data.bl_error);)
+ }
+ rec++;
+ }
+ if (retval < 0)
+ goto loader_exit;
+
+ /* send bootload terminate command */
+ printk(KERN_INFO"%s: Send BL Terminate\n", __func__);
+ cyttsp_set_bl_ready(ts, false);
+ retval = ttsp_write_block_data(ts, CY_REG_BASE,
+ cyttsp_fw[rec].Length, cyttsp_fw[rec].Block);
+ if (retval < 0)
+ goto loader_exit;
+ else
+ cyttsp_wait_bl_ready(ts, 1, 100, 100, cyttsp_bl_err_status);
+
+loader_exit:
+ /* reset TTSP Device back to bootloader mode */
+ retval = cyttsp_soft_reset(ts);
+
+ return retval;
+}
+
+static int cyttsp_loader(struct cyttsp *ts)
+{
+ int retval;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ retval = cyttsp_load_bl_regs(ts);
+ if (retval < 0)
+ return retval;
+
+ printk(KERN_INFO "%s:"
+ "blttsp=0x%02X%02X flttsp=0x%02X%02X force=%d\n",
+ __func__,
+ ts->bl_data.ttspver_hi, ts->bl_data.ttspver_lo,
+ cyttsp_fw_tts_verh, cyttsp_fw_tts_verl,
+ ts->platform_data->use_force_fw_update);
+ printk(KERN_INFO "%s:"
+ "blappid=0x%02X%02X flappid=0x%02X%02X\n",
+ __func__,
+ ts->bl_data.appid_hi, ts->bl_data.appid_lo,
+ cyttsp_fw_app_idh, cyttsp_fw_app_idl);
+ printk(KERN_INFO "%s:"
+ "blappver=0x%02X%02X flappver=0x%02X%02X\n",
+ __func__,
+ ts->bl_data.appver_hi, ts->bl_data.appver_lo,
+ cyttsp_fw_app_verh, cyttsp_fw_app_verl);
+ printk(KERN_INFO "%s:"
+ "blcid=0x%02X%02X%02X flcid=0x%02X%02X%02X\n",
+ __func__,
+ ts->bl_data.cid_0, ts->bl_data.cid_1, ts->bl_data.cid_2,
+ cyttsp_fw_cid_0, cyttsp_fw_cid_1, cyttsp_fw_cid_2);
+
+ if (CY_DIFF(ts->bl_data.ttspver_hi, cyttsp_fw_tts_verh) ||
+ CY_DIFF(ts->bl_data.ttspver_lo, cyttsp_fw_tts_verl) ||
+ CY_DIFF(ts->bl_data.appid_hi, cyttsp_fw_app_idh) ||
+ CY_DIFF(ts->bl_data.appid_lo, cyttsp_fw_app_idl) ||
+ CY_DIFF(ts->bl_data.appver_hi, cyttsp_fw_app_verh) ||
+ CY_DIFF(ts->bl_data.appver_lo, cyttsp_fw_app_verl) ||
+ CY_DIFF(ts->bl_data.cid_0, cyttsp_fw_cid_0) ||
+ CY_DIFF(ts->bl_data.cid_1, cyttsp_fw_cid_1) ||
+ CY_DIFF(ts->bl_data.cid_2, cyttsp_fw_cid_2) ||
+ ts->platform_data->use_force_fw_update) {
+ /* load new app into TTSP Device */
+ cyttsp_setup_to_timer(ts);
+ ts->platform_data->power_state = CY_LDR_STATE;
+ retval = cyttsp_load_app(ts);
+ cyttsp_kill_to_timer(ts);
+
+ } else {
+ /* firmware file is a match with firmware in the TTSP device */
+ DBG(printk(KERN_INFO "%s: FW matches - no loader\n", __func__);)
+ }
+
+ if (retval < 0)
+ return retval;
+
+ return retval;
+}
+
diff --git a/drivers/input/touchscreen/cyttsp_spi.c b/drivers/input/touchscreen/cyttsp_spi.c
new file mode 100755
index 00000000000..d4f7ffeed1b
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp_spi.c
@@ -0,0 +1,302 @@
+/* Source for:
+ * Cypress TrueTouch(TM) Standard Product I2C touchscreen driver.
+ * drivers/input/touchscreen/cyttsp_spi.c
+ *
+ * Copyright (C) 2009-2011 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/cyttsp.h>
+#include "cyttsp_core.h"
+
+#define DBG(x)
+
+#define CY_SPI_WR_OP 0x00 /* r/~w */
+#define CY_SPI_RD_OP 0x01
+#define CY_SPI_CMD_BYTES 4
+#define CY_SPI_SYNC_BYTE 2
+#define CY_SPI_SYNC_ACK1 0x62
+#define CY_SPI_SYNC_ACK2 0x9D
+#define CY_SPI_DATA_SIZE 128
+#define CY_SPI_DATA_BUF_SIZE (CY_SPI_CMD_BYTES + CY_SPI_DATA_SIZE)
+#define CY_SPI_BITS_PER_WORD 8
+
+struct cyttsp_spi {
+ struct cyttsp_bus_ops ops;
+ struct spi_device *spi_client;
+ void *ttsp_client;
+ u8 wr_buf[CY_SPI_DATA_BUF_SIZE];
+ u8 rd_buf[CY_SPI_DATA_BUF_SIZE];
+};
+
+static int cyttsp_spi_xfer_(u8 op, struct cyttsp_spi *ts_spi,
+ u8 reg, u8 *buf, int length)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer[2];
+ u8 *wr_buf = ts_spi->wr_buf;
+ u8 *rd_buf = ts_spi->rd_buf;
+ int retval;
+ DBG(printk(KERN_INFO "%s: Enter\n", __func__);)
+ if (length > CY_SPI_DATA_SIZE) {
+ printk(KERN_ERR "%s: length %d is too big.\n",
+ __func__, length);
+ return -EINVAL;
+ }
+ DBG(printk(KERN_INFO "%s: OP=%s length=%d\n", __func__,
+ op == CY_SPI_RD_OP ? "Read" : "Write", length);)
+
+ memset(wr_buf, 0, CY_SPI_DATA_BUF_SIZE);
+ memset(rd_buf, 0, CY_SPI_DATA_BUF_SIZE);
+
+ wr_buf[0] = 0x00; /* header byte 0 */
+ wr_buf[1] = 0xFF; /* header byte 1 */
+ wr_buf[2] = reg; /* reg index */
+ wr_buf[3] = op; /* r/~w */
+ if (op == CY_SPI_WR_OP)
+ memcpy(wr_buf + CY_SPI_CMD_BYTES, buf, length);
+ DBG(
+ if (op == CY_SPI_RD_OP)
+ memset(rd_buf, CY_SPI_SYNC_NACK, CY_SPI_DATA_BUF_SIZE);)
+ DBG(
+ for (i = 0; i < (length + CY_SPI_CMD_BYTES); i++) {
+ if ((op == CY_SPI_RD_OP) && (i < CY_SPI_CMD_BYTES))
+ printk(KERN_INFO "%s: read op. wr[%d]:0x%02x\n",
+ __func__, i, wr_buf[i]);
+ if (op == CY_SPI_WR_OP)
+ printk(KERN_INFO "%s: write op. wr[%d]:0x%02x\n",
+ __func__, i, wr_buf[i]);
+ })
+
+ memset((void *)xfer, 0, sizeof(xfer));
+ spi_message_init(&msg);
+ xfer[0].tx_buf = wr_buf;
+ xfer[0].rx_buf = rd_buf;
+ if (op == CY_SPI_WR_OP) {
+ xfer[0].len = length + CY_SPI_CMD_BYTES;
+ spi_message_add_tail(&xfer[0], &msg);
+ } else if (op == CY_SPI_RD_OP) {
+ xfer[0].len = CY_SPI_CMD_BYTES;
+ spi_message_add_tail(&xfer[0], &msg);
+
+ xfer[1].rx_buf = buf;
+ xfer[1].len = length;
+ spi_message_add_tail(&xfer[1], &msg);
+ }
+
+ retval = spi_sync(ts_spi->spi_client, &msg);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: spi sync error %d, len=%d, op=%d\n",
+ __func__, retval, xfer[1].len, op);
+ retval = 0;
+ }
+
+ if ((rd_buf[CY_SPI_SYNC_BYTE] == CY_SPI_SYNC_ACK1) &&
+ (rd_buf[CY_SPI_SYNC_BYTE+1] == CY_SPI_SYNC_ACK2))
+ retval = 0;
+ else {
+ DBG(
+ for (i = 0; i < (CY_SPI_CMD_BYTES); i++)
+ printk(KERN_INFO "%s: test rd_buf[%d]:0x%02x\n",
+ __func__, i, rd_buf[i]);
+ for (i = 0; i < (length); i++)
+ printk(KERN_INFO "%s: test buf[%d]:0x%02x\n",
+ __func__, i, buf[i]);)
+ retval = 1;
+ }
+ return retval;
+}
+
+static int cyttsp_spi_xfer(u8 op, struct cyttsp_spi *ts,
+ u8 reg, u8 *buf, int length)
+{
+ int tries;
+ int retval;
+ DBG(printk(KERN_INFO "%s: Enter\n", __func__);)
+
+ if (op == CY_SPI_RD_OP) {
+ for (tries = CY_NUM_RETRY; tries; tries--) {
+ retval = cyttsp_spi_xfer_(op, ts, reg, buf, length);
+ if (retval == 0)
+ break;
+ else
+ msleep(10);
+ }
+ } else {
+ retval = cyttsp_spi_xfer_(op, ts, reg, buf, length);
+ }
+ return retval;
+}
+
+static s32 ttsp_spi_read_block_data(void *handle, u8 addr,
+ u8 length, void *data)
+{
+ int retval;
+ struct cyttsp_spi *ts = container_of(handle, struct cyttsp_spi, ops);
+
+ DBG(printk(KERN_INFO "%s: Enter\n", __func__);)
+
+ retval = cyttsp_spi_xfer(CY_SPI_RD_OP, ts, addr, data, length);
+ if (retval < 0)
+ printk(KERN_ERR "%s: ttsp_spi_read_block_data failed\n",
+ __func__);
+
+ /* Do not print the above error if the data sync bytes were not found.
+ This is a normal condition for the bootloader loader startup and need
+ to retry until data sync bytes are found. */
+ if (retval > 0)
+ retval = -1; /* now signal fail; so retry can be done */
+
+ return retval;
+}
+
+static s32 ttsp_spi_write_block_data(void *handle, u8 addr,
+ u8 length, const void *data)
+{
+ int retval;
+ struct cyttsp_spi *ts = container_of(handle, struct cyttsp_spi, ops);
+
+ DBG(printk(KERN_INFO "%s: Enter\n", __func__);)
+
+ retval = cyttsp_spi_xfer(CY_SPI_WR_OP, ts, addr, (void *)data, length);
+ if (retval < 0)
+ printk(KERN_ERR "%s: ttsp_spi_write_block_data failed\n",
+ __func__);
+
+ /* Do not print the above error if the data sync bytes were not found.
+ This is a normal condition for the bootloader loader startup and need
+ to retry until data sync bytes are found. */
+ if (retval > 0)
+ retval = -1; /* now signal fail; so retry can be done */
+
+ return retval;
+}
+
+static s32 ttsp_spi_tch_ext(void *handle, void *values)
+{
+ int retval = 0;
+ struct cyttsp_spi *ts = container_of(handle, struct cyttsp_spi, ops);
+
+ DBG(printk(KERN_INFO "%s: Enter\n", __func__);)
+
+ /* Add custom touch extension handling code here */
+ /* set: retval < 0 for any returned system errors,
+ retval = 0 if normal touch handling is required,
+ retval > 0 if normal touch handling is *not* required */
+ if (!ts || !values)
+ retval = -EIO;
+
+ return retval;
+}
+
+static int __devinit cyttsp_spi_probe(struct spi_device *spi)
+{
+ struct cyttsp_spi *ts_spi;
+ int retval;
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ /* Set up SPI*/
+ spi->bits_per_word = CY_SPI_BITS_PER_WORD;
+ spi->mode = SPI_MODE_0;
+ retval = spi_setup(spi);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: SPI setup error %d\n", __func__, retval);
+ return retval;
+ }
+ ts_spi = kzalloc(sizeof(*ts_spi), GFP_KERNEL);
+ if (ts_spi == NULL) {
+ printk(KERN_ERR "%s: Error, kzalloc\n", __func__);
+ retval = -ENOMEM;
+ goto error_alloc_data_failed;
+ }
+ ts_spi->spi_client = spi;
+ dev_set_drvdata(&spi->dev, ts_spi);
+ ts_spi->ops.write = ttsp_spi_write_block_data;
+ ts_spi->ops.read = ttsp_spi_read_block_data;
+ ts_spi->ops.ext = ttsp_spi_tch_ext;
+
+ ts_spi->ttsp_client = cyttsp_core_init(&ts_spi->ops, &spi->dev);
+ if (!ts_spi->ttsp_client) {
+ retval = -ENODEV;
+ goto ttsp_core_err;
+ }
+ printk(KERN_INFO "%s: Successful registration %s\n",
+ __func__, CY_SPI_NAME);
+
+ return 0;
+
+ttsp_core_err:
+ kfree(ts_spi);
+error_alloc_data_failed:
+ return retval;
+}
+
+/* registered in driver struct */
+static int __devexit cyttsp_spi_remove(struct spi_device *spi)
+{
+ struct cyttsp_spi *ts_spi = dev_get_drvdata(&spi->dev);
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ cyttsp_core_release(ts_spi->ttsp_client);
+ kfree(ts_spi);
+ return 0;
+}
+
+
+static struct spi_driver cyttsp_spi_driver = {
+ .driver = {
+ .name = CY_SPI_NAME,
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = cyttsp_spi_probe,
+ .remove = __devexit_p(cyttsp_spi_remove),
+};
+
+static int __init cyttsp_spi_init(void)
+{
+ int err;
+
+ err = spi_register_driver(&cyttsp_spi_driver);
+ printk(KERN_INFO "%s: Cypress TrueTouch(R) Standard Product SPI "
+ "Touchscreen Driver (Built %s @ %s) returned %d\n",
+ __func__, __DATE__, __TIME__, err);
+
+ return err;
+}
+module_init(cyttsp_spi_init);
+
+static void __exit cyttsp_spi_exit(void)
+{
+ spi_unregister_driver(&cyttsp_spi_driver);
+ printk(KERN_INFO "%s: module exit\n", __func__);
+}
+module_exit(cyttsp_spi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product SPI driver");
+MODULE_AUTHOR("Cypress");
+
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi.c b/drivers/input/touchscreen/synaptics_i2c_rmi.c
new file mode 100644
index 00000000000..5729602cbb6
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi.c
@@ -0,0 +1,675 @@
+/* drivers/input/keyboard/synaptics_i2c_rmi.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/hrtimer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/synaptics_i2c_rmi.h>
+
+static struct workqueue_struct *synaptics_wq;
+
+struct synaptics_ts_data {
+ uint16_t addr;
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ int use_irq;
+ bool has_relative_report;
+ struct hrtimer timer;
+ struct work_struct work;
+ uint16_t max[2];
+ int snap_state[2][2];
+ int snap_down_on[2];
+ int snap_down_off[2];
+ int snap_up_on[2];
+ int snap_up_off[2];
+ int snap_down[2];
+ int snap_up[2];
+ uint32_t flags;
+ int reported_finger_count;
+ int8_t sensitivity_adjust;
+ int (*power)(int on);
+ struct early_suspend early_suspend;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h);
+static void synaptics_ts_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_init_panel(struct synaptics_ts_data *ts)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ goto err_page_select_failed;
+ }
+ ret = i2c_smbus_write_byte_data(ts->client, 0x41, 0x04); /* Set "No Clip Z" */
+ if (ret < 0)
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for No Clip Z\n");
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0x44,
+ ts->sensitivity_adjust);
+ if (ret < 0)
+ pr_err("synaptics_ts: failed to set Sensitivity Adjust\n");
+
+err_page_select_failed:
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x04); /* page select = 0x04 */
+ if (ret < 0)
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf0, 0x81); /* normal operation, 80 reports per second */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume: i2c_smbus_write_byte_data failed\n");
+ return ret;
+}
+
+static void synaptics_ts_work_func(struct work_struct *work)
+{
+ int i;
+ int ret;
+ int bad_data = 0;
+ struct i2c_msg msg[2];
+ uint8_t start_reg;
+ uint8_t buf[15];
+ struct synaptics_ts_data *ts = container_of(work, struct synaptics_ts_data, work);
+ int buf_len = ts->has_relative_report ? 15 : 13;
+
+ msg[0].addr = ts->client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &start_reg;
+ start_reg = 0x00;
+ msg[1].addr = ts->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = buf_len;
+ msg[1].buf = buf;
+
+ /* printk("synaptics_ts_work_func\n"); */
+ for (i = 0; i < ((ts->use_irq && !bad_data) ? 1 : 10); i++) {
+ ret = i2c_transfer(ts->client->adapter, msg, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_ts_work_func: i2c_transfer failed\n");
+ bad_data = 1;
+ } else {
+ /* printk("synaptics_ts_work_func: %x %x %x %x %x %x" */
+ /* " %x %x %x %x %x %x %x %x %x, ret %d\n", */
+ /* buf[0], buf[1], buf[2], buf[3], */
+ /* buf[4], buf[5], buf[6], buf[7], */
+ /* buf[8], buf[9], buf[10], buf[11], */
+ /* buf[12], buf[13], buf[14], ret); */
+ if ((buf[buf_len - 1] & 0xc0) != 0x40) {
+ printk(KERN_WARNING "synaptics_ts_work_func:"
+ " bad read %x %x %x %x %x %x %x %x %x"
+ " %x %x %x %x %x %x, ret %d\n",
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11],
+ buf[12], buf[13], buf[14], ret);
+ if (bad_data)
+ synaptics_init_panel(ts);
+ bad_data = 1;
+ continue;
+ }
+ bad_data = 0;
+ if ((buf[buf_len - 1] & 1) == 0) {
+ /* printk("read %d coordinates\n", i); */
+ break;
+ } else {
+ int pos[2][2];
+ int f, a;
+ int base;
+ /* int x = buf[3] | (uint16_t)(buf[2] & 0x1f) << 8; */
+ /* int y = buf[5] | (uint16_t)(buf[4] & 0x1f) << 8; */
+ int z = buf[1];
+ int w = buf[0] >> 4;
+ int finger = buf[0] & 7;
+
+ /* int x2 = buf[3+6] | (uint16_t)(buf[2+6] & 0x1f) << 8; */
+ /* int y2 = buf[5+6] | (uint16_t)(buf[4+6] & 0x1f) << 8; */
+ /* int z2 = buf[1+6]; */
+ /* int w2 = buf[0+6] >> 4; */
+ /* int finger2 = buf[0+6] & 7; */
+
+ /* int dx = (int8_t)buf[12]; */
+ /* int dy = (int8_t)buf[13]; */
+ int finger2_pressed;
+
+ /* printk("x %4d, y %4d, z %3d, w %2d, F %d, 2nd: x %4d, y %4d, z %3d, w %2d, F %d, dx %4d, dy %4d\n", */
+ /* x, y, z, w, finger, */
+ /* x2, y2, z2, w2, finger2, */
+ /* dx, dy); */
+
+ base = 2;
+ for (f = 0; f < 2; f++) {
+ uint32_t flip_flag = SYNAPTICS_FLIP_X;
+ for (a = 0; a < 2; a++) {
+ int p = buf[base + 1];
+ p |= (uint16_t)(buf[base] & 0x1f) << 8;
+ if (ts->flags & flip_flag)
+ p = ts->max[a] - p;
+ if (ts->flags & SYNAPTICS_SNAP_TO_INACTIVE_EDGE) {
+ if (ts->snap_state[f][a]) {
+ if (p <= ts->snap_down_off[a])
+ p = ts->snap_down[a];
+ else if (p >= ts->snap_up_off[a])
+ p = ts->snap_up[a];
+ else
+ ts->snap_state[f][a] = 0;
+ } else {
+ if (p <= ts->snap_down_on[a]) {
+ p = ts->snap_down[a];
+ ts->snap_state[f][a] = 1;
+ } else if (p >= ts->snap_up_on[a]) {
+ p = ts->snap_up[a];
+ ts->snap_state[f][a] = 1;
+ }
+ }
+ }
+ pos[f][a] = p;
+ base += 2;
+ flip_flag <<= 1;
+ }
+ base += 2;
+ if (ts->flags & SYNAPTICS_SWAP_XY)
+ swap(pos[f][0], pos[f][1]);
+ }
+ if (z) {
+ input_report_abs(ts->input_dev, ABS_X, pos[0][0]);
+ input_report_abs(ts->input_dev, ABS_Y, pos[0][1]);
+ }
+ input_report_abs(ts->input_dev, ABS_PRESSURE, z);
+ input_report_abs(ts->input_dev, ABS_TOOL_WIDTH, w);
+ input_report_key(ts->input_dev, BTN_TOUCH, finger);
+ finger2_pressed = finger > 1 && finger != 7;
+ input_report_key(ts->input_dev, BTN_2, finger2_pressed);
+ if (finger2_pressed) {
+ input_report_abs(ts->input_dev, ABS_HAT0X, pos[1][0]);
+ input_report_abs(ts->input_dev, ABS_HAT0Y, pos[1][1]);
+ }
+
+ if (!finger)
+ z = 0;
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[0][0]);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[0][1]);
+ input_mt_sync(ts->input_dev);
+ if (finger2_pressed) {
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[1][0]);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[1][1]);
+ input_mt_sync(ts->input_dev);
+ } else if (ts->reported_finger_count > 1) {
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0);
+ input_mt_sync(ts->input_dev);
+ }
+ ts->reported_finger_count = finger;
+ input_sync(ts->input_dev);
+ }
+ }
+ }
+ if (ts->use_irq)
+ enable_irq(ts->client->irq);
+}
+
+static enum hrtimer_restart synaptics_ts_timer_func(struct hrtimer *timer)
+{
+ struct synaptics_ts_data *ts = container_of(timer, struct synaptics_ts_data, timer);
+ /* printk("synaptics_ts_timer_func\n"); */
+
+ queue_work(synaptics_wq, &ts->work);
+
+ hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t synaptics_ts_irq_handler(int irq, void *dev_id)
+{
+ struct synaptics_ts_data *ts = dev_id;
+
+ /* printk("synaptics_ts_irq_handler\n"); */
+ disable_irq_nosync(ts->client->irq);
+ queue_work(synaptics_wq, &ts->work);
+ return IRQ_HANDLED;
+}
+
+static int synaptics_ts_probe(
+ struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct synaptics_ts_data *ts;
+ uint8_t buf0[4];
+ uint8_t buf1[8];
+ struct i2c_msg msg[2];
+ int ret = 0;
+ uint16_t max_x, max_y;
+ int fuzz_x, fuzz_y, fuzz_p, fuzz_w;
+ struct synaptics_i2c_rmi_platform_data *pdata;
+ unsigned long irqflags;
+ int inactive_area_left;
+ int inactive_area_right;
+ int inactive_area_top;
+ int inactive_area_bottom;
+ int snap_left_on;
+ int snap_left_off;
+ int snap_right_on;
+ int snap_right_off;
+ int snap_top_on;
+ int snap_top_off;
+ int snap_bottom_on;
+ int snap_bottom_off;
+ uint32_t panel_version;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ printk(KERN_ERR "synaptics_ts_probe: need I2C_FUNC_I2C\n");
+ ret = -ENODEV;
+ goto err_check_functionality_failed;
+ }
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (ts == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_data_failed;
+ }
+ INIT_WORK(&ts->work, synaptics_ts_work_func);
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+ pdata = client->dev.platform_data;
+ if (pdata)
+ ts->power = pdata->power;
+ if (ts->power) {
+ ret = ts->power(1);
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_ts_probe power on failed\n");
+ goto err_power_failed;
+ }
+ }
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf4, 0x01); /* device command = reset */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+ /* fail? */
+ }
+ {
+ int retry = 10;
+ while (retry-- > 0) {
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe4);
+ if (ret >= 0)
+ break;
+ msleep(100);
+ }
+ }
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: Product Major Version %x\n", ret);
+ panel_version = ret << 8;
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe5);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: Product Minor Version %x\n", ret);
+ panel_version |= ret;
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe3);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: product property %x\n", ret);
+
+ if (pdata) {
+ while (pdata->version > panel_version)
+ pdata++;
+ ts->flags = pdata->flags;
+ ts->sensitivity_adjust = pdata->sensitivity_adjust;
+ irqflags = pdata->irqflags;
+ inactive_area_left = pdata->inactive_left;
+ inactive_area_right = pdata->inactive_right;
+ inactive_area_top = pdata->inactive_top;
+ inactive_area_bottom = pdata->inactive_bottom;
+ snap_left_on = pdata->snap_left_on;
+ snap_left_off = pdata->snap_left_off;
+ snap_right_on = pdata->snap_right_on;
+ snap_right_off = pdata->snap_right_off;
+ snap_top_on = pdata->snap_top_on;
+ snap_top_off = pdata->snap_top_off;
+ snap_bottom_on = pdata->snap_bottom_on;
+ snap_bottom_off = pdata->snap_bottom_off;
+ fuzz_x = pdata->fuzz_x;
+ fuzz_y = pdata->fuzz_y;
+ fuzz_p = pdata->fuzz_p;
+ fuzz_w = pdata->fuzz_w;
+ } else {
+ irqflags = 0;
+ inactive_area_left = 0;
+ inactive_area_right = 0;
+ inactive_area_top = 0;
+ inactive_area_bottom = 0;
+ snap_left_on = 0;
+ snap_left_off = 0;
+ snap_right_on = 0;
+ snap_right_off = 0;
+ snap_top_on = 0;
+ snap_top_off = 0;
+ snap_bottom_on = 0;
+ snap_bottom_off = 0;
+ fuzz_x = 0;
+ fuzz_y = 0;
+ fuzz_p = 0;
+ fuzz_w = 0;
+ }
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xf0);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: device control %x\n", ret);
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xf1);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: interrupt enable %x\n", ret);
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+ goto err_detect_failed;
+ }
+
+ msg[0].addr = ts->client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = buf0;
+ buf0[0] = 0xe0;
+ msg[1].addr = ts->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 8;
+ msg[1].buf = buf1;
+ ret = i2c_transfer(ts->client->adapter, msg, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_transfer failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: 0xe0: %x %x %x %x %x %x %x %x\n",
+ buf1[0], buf1[1], buf1[2], buf1[3],
+ buf1[4], buf1[5], buf1[6], buf1[7]);
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ goto err_detect_failed;
+ }
+ ret = i2c_smbus_read_word_data(ts->client, 0x02);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->has_relative_report = !(ret & 0x100);
+ printk(KERN_INFO "synaptics_ts_probe: Sensor properties %x\n", ret);
+ ret = i2c_smbus_read_word_data(ts->client, 0x04);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->max[0] = max_x = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+ ret = i2c_smbus_read_word_data(ts->client, 0x06);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->max[1] = max_y = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+ if (ts->flags & SYNAPTICS_SWAP_XY)
+ swap(max_x, max_y);
+
+ ret = synaptics_init_panel(ts); /* will also switch back to page 0x04 */
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_init_panel failed\n");
+ goto err_detect_failed;
+ }
+
+ ts->input_dev = input_allocate_device();
+ if (ts->input_dev == NULL) {
+ ret = -ENOMEM;
+ printk(KERN_ERR "synaptics_ts_probe: Failed to allocate input device\n");
+ goto err_input_dev_alloc_failed;
+ }
+ ts->input_dev->name = "synaptics-rmi-touchscreen";
+ set_bit(EV_SYN, ts->input_dev->evbit);
+ set_bit(EV_KEY, ts->input_dev->evbit);
+ set_bit(BTN_TOUCH, ts->input_dev->keybit);
+ set_bit(BTN_2, ts->input_dev->keybit);
+ set_bit(EV_ABS, ts->input_dev->evbit);
+ inactive_area_left = inactive_area_left * max_x / 0x10000;
+ inactive_area_right = inactive_area_right * max_x / 0x10000;
+ inactive_area_top = inactive_area_top * max_y / 0x10000;
+ inactive_area_bottom = inactive_area_bottom * max_y / 0x10000;
+ snap_left_on = snap_left_on * max_x / 0x10000;
+ snap_left_off = snap_left_off * max_x / 0x10000;
+ snap_right_on = snap_right_on * max_x / 0x10000;
+ snap_right_off = snap_right_off * max_x / 0x10000;
+ snap_top_on = snap_top_on * max_y / 0x10000;
+ snap_top_off = snap_top_off * max_y / 0x10000;
+ snap_bottom_on = snap_bottom_on * max_y / 0x10000;
+ snap_bottom_off = snap_bottom_off * max_y / 0x10000;
+ fuzz_x = fuzz_x * max_x / 0x10000;
+ fuzz_y = fuzz_y * max_y / 0x10000;
+ ts->snap_down[!!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_left;
+ ts->snap_up[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x + inactive_area_right;
+ ts->snap_down[!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_top;
+ ts->snap_up[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y + inactive_area_bottom;
+ ts->snap_down_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_on;
+ ts->snap_down_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_off;
+ ts->snap_up_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_on;
+ ts->snap_up_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_off;
+ ts->snap_down_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_on;
+ ts->snap_down_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_off;
+ ts->snap_up_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_on;
+ ts->snap_up_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_off;
+ printk(KERN_INFO "synaptics_ts_probe: max_x %d, max_y %d\n", max_x, max_y);
+ printk(KERN_INFO "synaptics_ts_probe: inactive_x %d %d, inactive_y %d %d\n",
+ inactive_area_left, inactive_area_right,
+ inactive_area_top, inactive_area_bottom);
+ printk(KERN_INFO "synaptics_ts_probe: snap_x %d-%d %d-%d, snap_y %d-%d %d-%d\n",
+ snap_left_on, snap_left_off, snap_right_on, snap_right_off,
+ snap_top_on, snap_top_off, snap_bottom_on, snap_bottom_off);
+ input_set_abs_params(ts->input_dev, ABS_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 255, fuzz_p, 0);
+ input_set_abs_params(ts->input_dev, ABS_TOOL_WIDTH, 0, 15, fuzz_w, 0);
+ input_set_abs_params(ts->input_dev, ABS_HAT0X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_HAT0Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, fuzz_p, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 15, fuzz_w, 0);
+ /* ts->input_dev->name = ts->keypad_info->name; */
+ ret = input_register_device(ts->input_dev);
+ if (ret) {
+ printk(KERN_ERR "synaptics_ts_probe: Unable to register %s input device\n", ts->input_dev->name);
+ goto err_input_register_device_failed;
+ }
+ if (client->irq) {
+ ret = request_irq(client->irq, synaptics_ts_irq_handler, irqflags, client->name, ts);
+ if (ret == 0) {
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+ if (ret)
+ free_irq(client->irq, ts);
+ }
+ if (ret == 0)
+ ts->use_irq = 1;
+ else
+ dev_err(&client->dev, "request_irq failed\n");
+ }
+ if (!ts->use_irq) {
+ hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ts->timer.function = synaptics_ts_timer_func;
+ hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ts->early_suspend.suspend = synaptics_ts_early_suspend;
+ ts->early_suspend.resume = synaptics_ts_late_resume;
+ register_early_suspend(&ts->early_suspend);
+#endif
+
+ printk(KERN_INFO "synaptics_ts_probe: Start touchscreen %s in %s mode\n", ts->input_dev->name, ts->use_irq ? "interrupt" : "polling");
+
+ return 0;
+
+err_input_register_device_failed:
+ input_free_device(ts->input_dev);
+
+err_input_dev_alloc_failed:
+err_detect_failed:
+err_power_failed:
+ kfree(ts);
+err_alloc_data_failed:
+err_check_functionality_failed:
+ return ret;
+}
+
+static int synaptics_ts_remove(struct i2c_client *client)
+{
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+ unregister_early_suspend(&ts->early_suspend);
+ if (ts->use_irq)
+ free_irq(client->irq, ts);
+ else
+ hrtimer_cancel(&ts->timer);
+ input_unregister_device(ts->input_dev);
+ kfree(ts);
+ return 0;
+}
+
+static int synaptics_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ int ret;
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+ if (ts->use_irq)
+ disable_irq(client->irq);
+ else
+ hrtimer_cancel(&ts->timer);
+ ret = cancel_work_sync(&ts->work);
+ if (ret && ts->use_irq) /* if work was pending disable-count is now 2 */
+ enable_irq(client->irq);
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+
+ ret = i2c_smbus_write_byte_data(client, 0xf0, 0x86); /* deep sleep */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+ if (ts->power) {
+ ret = ts->power(0);
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume power off failed\n");
+ }
+ return 0;
+}
+
+static int synaptics_ts_resume(struct i2c_client *client)
+{
+ int ret;
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+ if (ts->power) {
+ ret = ts->power(1);
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume power on failed\n");
+ }
+
+ synaptics_init_panel(ts);
+
+ if (ts->use_irq)
+ enable_irq(client->irq);
+
+ if (!ts->use_irq)
+ hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ else
+ i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+
+ return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h)
+{
+ struct synaptics_ts_data *ts;
+ ts = container_of(h, struct synaptics_ts_data, early_suspend);
+ synaptics_ts_suspend(ts->client, PMSG_SUSPEND);
+}
+
+static void synaptics_ts_late_resume(struct early_suspend *h)
+{
+ struct synaptics_ts_data *ts;
+ ts = container_of(h, struct synaptics_ts_data, early_suspend);
+ synaptics_ts_resume(ts->client);
+}
+#endif
+
+static const struct i2c_device_id synaptics_ts_id[] = {
+ { SYNAPTICS_I2C_RMI_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver synaptics_ts_driver = {
+ .probe = synaptics_ts_probe,
+ .remove = synaptics_ts_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ .suspend = synaptics_ts_suspend,
+ .resume = synaptics_ts_resume,
+#endif
+ .id_table = synaptics_ts_id,
+ .driver = {
+ .name = SYNAPTICS_I2C_RMI_NAME,
+ },
+};
+
+static int __devinit synaptics_ts_init(void)
+{
+ synaptics_wq = create_singlethread_workqueue("synaptics_wq");
+ if (!synaptics_wq)
+ return -ENOMEM;
+ return i2c_add_driver(&synaptics_ts_driver);
+}
+
+static void __exit synaptics_ts_exit(void)
+{
+ i2c_del_driver(&synaptics_ts_driver);
+ if (synaptics_wq)
+ destroy_workqueue(synaptics_wq);
+}
+
+module_init(synaptics_ts_init);
+module_exit(synaptics_ts_exit);
+
+MODULE_DESCRIPTION("Synaptics Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff203a42186..a09df711541 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -50,6 +50,14 @@ config LEDS_LM3530
controlled manually or using PWM input or using ambient
light automatically.
+config LEDS_AB5500
+ tristate "HVLED driver for AB5500"
+ depends on AB5500_CORE
+ help
+ This option enables support for the HVLED in AB5500
+ multi function device. Currently Ab5500 v1.0 chip leds
+ are supported.
+
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e4f6bf56888..5f28e852188 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
+obj-$(CONFIG_LEDS_AB5500) += leds-ab5500.o
obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
diff --git a/drivers/leds/leds-ab5500.c b/drivers/leds/leds-ab5500.c
new file mode 100644
index 00000000000..294551b1962
--- /dev/null
+++ b/drivers/leds/leds-ab5500.c
@@ -0,0 +1,811 @@
+/*
+ * leds-ab5500.c - driver for High Voltage (HV) LED in ST-Ericsson AB5500 chip
+ *
+ * Copyright (C) 2011 ST-Ericsson SA.
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
+ */
+
+/*
+ * Driver for HVLED in ST-Ericsson AB5500 analog baseband controller
+ *
+ * This chip can drive upto 3 leds, of upto 40mA of led sink current.
+ * These leds can be programmed to blink between two intensities with
+ * fading delay of half, one or two seconds.
+ *
+ * Leds can be controlled via sysfs entries in
+ * "/sys/class/leds/< red | green | blue >"
+ *
+ * For each led,
+ *
+ * Modes of operation:
+ * - manual: echo 0 > fade_auto (default, no auto blinking)
+ * - auto: echo 1 > fade_auto
+ *
+ * Soft scaling delay between two intensities:
+ * - 1/2 sec: echo 1 > fade_delay
+ * - 1 sec: echo 2 > fade_delay
+ * - 2 sec: echo 3 > fade_delay
+ *
+ * Possible sequence of operation:
+ * - continuous glow: set brightness (brt)
+ * - blink between LED_OFF and LED_FULL:
+ * set fade delay -> set fade auto
+ * - blink between previous two brightness (only for LED-1):
+ * set brt1 -> set brt2 -> set fade auto
+ *
+ * Delay can be set in any step, its affect will be seen on switching mode.
+ *
+ * Note: Blink/Fade feature is supported in AB5500 v2 onwards
+ *
+ */
+
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/leds-ab5500.h>
+#include <linux/types.h>
+
+#include <mach/hardware.h>
+
+#define AB5500LED_NAME "ab5500-leds"
+#define AB5500_LED_MAX 0x03
+
+/* Register offsets */
+#define AB5500_LED_REG_ENABLE 0x03
+#define AB5500_LED_FADE_CTRL 0x0D
+
+/* LED-0 Register Addr. Offsets */
+#define AB5500_LED0_PWM_DUTY 0x01
+#define AB5500_LED0_PWMFREQ 0x02
+#define AB5500_LED0_SINKCTL 0x0A
+#define AB5500_LED0_FADE_HI 0x11
+#define AB5500_LED0_FADE_LO 0x17
+
+/* LED-1 Register Addr. Offsets */
+#define AB5500_LED1_PWM_DUTY 0x05
+#define AB5500_LED1_PWMFREQ 0x06
+#define AB5500_LED1_SINKCTL 0x0B
+#define AB5500_LED1_FADE_HI 0x13
+#define AB5500_LED1_FADE_LO 0x19
+
+/* LED-2 Register Addr. Offsets */
+#define AB5500_LED2_PWM_DUTY 0x08
+#define AB5500_LED2_PWMFREQ 0x09
+#define AB5500_LED2_SINKCTL 0x0C
+#define AB5500_LED2_FADE_HI 0x15
+#define AB5500_LED2_FADE_LO 0x1B
+
+/* led-0/1/2 enable bit */
+#define AB5500_LED_ENABLE_MASK 0x04
+
+/* led intensity */
+#define AB5500_LED_INTENSITY_OFF 0x0
+#define AB5500_LED_INTENSITY_MAX 0x3FF
+#define AB5500_LED_INTENSITY_STEP (AB5500_LED_INTENSITY_MAX/LED_FULL)
+
+/* pwm frequency */
+#define AB5500_LED_PWMFREQ_MAX 0x0F /* 373.39 @sysclk=26MHz */
+#define AB5500_LED_PWMFREQ_SHIFT 4
+
+/* LED sink current control */
+#define AB5500_LED_SINKCURR_MAX 0x0F /* 40mA MAX */
+#define AB5500_LED_SINKCURR_SHIFT 4
+
+/* fade Control shift and masks */
+#define AB5500_FADE_DELAY_SHIFT 0x00
+#define AB5500_FADE_MODE_MASK 0x80
+#define AB5500_FADE_DELAY_MASK 0x03
+#define AB5500_FADE_START_MASK 0x04
+#define AB5500_FADE_ON_MASK 0x70
+#define AB5500_LED_FADE_ENABLE(ledid) (0x40 >> (ledid))
+
+struct ab5500_led {
+ u8 id;
+ u8 max_current;
+ u16 brt_val;
+ u16 fade_hi;
+ u16 fade_lo;
+ bool led_on;
+ struct led_classdev led_cdev;
+ struct work_struct led_work;
+};
+
+struct ab5500_hvleds {
+ struct mutex lock;
+ struct device *dev;
+ struct ab5500_hvleds_platform_data *pdata;
+ struct ab5500_led leds[AB5500_HVLEDS_MAX];
+ bool hw_fade;
+ bool fade_auto;
+ enum ab5500_fade_delay fade_delay;
+};
+
+static u8 ab5500_led_pwmduty_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_PWM_DUTY,
+ AB5500_LED1_PWM_DUTY,
+ AB5500_LED2_PWM_DUTY,
+};
+
+static u8 ab5500_led_pwmfreq_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_PWMFREQ,
+ AB5500_LED1_PWMFREQ,
+ AB5500_LED2_PWMFREQ,
+};
+
+static u8 ab5500_led_sinkctl_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_SINKCTL,
+ AB5500_LED1_SINKCTL,
+ AB5500_LED2_SINKCTL
+};
+
+static u8 ab5500_led_fade_hi_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_FADE_HI,
+ AB5500_LED1_FADE_HI,
+ AB5500_LED2_FADE_HI,
+};
+
+static u8 ab5500_led_fade_lo_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_FADE_LO,
+ AB5500_LED1_FADE_LO,
+ AB5500_LED2_FADE_LO,
+};
+
+#define to_led(_x) container_of(_x, struct ab5500_led, _x)
+
+static inline struct ab5500_hvleds *led_to_hvleds(struct ab5500_led *led)
+{
+ return container_of(led, struct ab5500_hvleds, leds[led->id]);
+}
+
+static int ab5500_led_enable(struct ab5500_hvleds *hvleds,
+ unsigned int led_id)
+{
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id],
+ AB5500_LED_ENABLE_MASK,
+ AB5500_LED_ENABLE_MASK);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_pwmduty_reg[led_id], ret);
+
+ return ret;
+
+}
+
+static int ab5500_led_start_manual(struct ab5500_hvleds *hvleds)
+{
+ int ret;
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_mask_and_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ AB5500_LED_FADE_CTRL, AB5500_FADE_START_MASK,
+ AB5500_FADE_START_MASK);
+ if (ret < 0)
+ dev_err(hvleds->dev, "update reg 0x%x failed - %d\n",
+ AB5500_LED_FADE_CTRL, ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_disable(struct ab5500_hvleds *hvleds,
+ unsigned int led_id)
+{
+ int ret;
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id] - 1, 0);
+ ret |= abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id], 0);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_pwmduty_reg[led_id], ret);
+
+ return ret;
+}
+
+static int ab5500_led_pwmduty_write(struct ab5500_hvleds *hvleds,
+ unsigned int led_id, u16 val)
+{
+ int ret;
+ u8 val_lsb = val & 0xFF;
+ u8 val_msb = (val & 0x300) >> 8;
+
+ mutex_lock(&hvleds->lock);
+
+ dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val = %d\n"
+ "reg[%d] w val = %d\n",
+ ab5500_led_pwmduty_reg[led_id] - 1, val_lsb,
+ ab5500_led_pwmduty_reg[led_id], val_msb);
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id] - 1, val_lsb);
+ ret |= abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id], val_msb);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_pwmduty_reg[led_id], ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_pwmfreq_write(struct ab5500_hvleds *hvleds,
+ unsigned int led_id, u8 val)
+{
+ int ret;
+
+ val = (val & 0x0F) << AB5500_LED_PWMFREQ_SHIFT;
+
+ mutex_lock(&hvleds->lock);
+
+ dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val=%d\n",
+ ab5500_led_pwmfreq_reg[led_id], val);
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmfreq_reg[led_id], val);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_pwmfreq_reg[led_id], ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_sinkctl_write(struct ab5500_hvleds *hvleds,
+ unsigned int led_id, u8 val)
+{
+ int ret;
+
+ if (val > AB5500_LED_SINKCURR_MAX)
+ val = AB5500_LED_SINKCURR_MAX;
+
+ val = (val << AB5500_LED_SINKCURR_SHIFT);
+
+ dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val=%d\n",
+ ab5500_led_sinkctl_reg[led_id], val);
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_sinkctl_reg[led_id], val);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_sinkctl_reg[led_id], ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_fade_write(struct ab5500_hvleds *hvleds,
+ unsigned int led_id, bool on, u16 val)
+{
+ int ret;
+ int val_lsb = val & 0xFF;
+ int val_msb = (val & 0x300) >> 8;
+ u8 *fade_reg;
+
+ if (on)
+ fade_reg = ab5500_led_fade_hi_reg;
+ else
+ fade_reg = ab5500_led_fade_lo_reg;
+
+ dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val = %d\n"
+ "reg[%d] w val = %d\n",
+ fade_reg[led_id] - 1, val_lsb,
+ fade_reg[led_id], val_msb);
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ fade_reg[led_id] - 1, val_lsb);
+ ret |= abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ fade_reg[led_id], val_msb);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ fade_reg[led_id], ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_sinkctl_read(struct ab5500_hvleds *hvleds,
+ unsigned int led_id)
+{
+ int ret;
+ u8 val;
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_get_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_sinkctl_reg[led_id], &val);
+ if (ret < 0) {
+ dev_err(hvleds->dev, "reg[%d] r failed: %d\n",
+ ab5500_led_sinkctl_reg[led_id], ret);
+ mutex_unlock(&hvleds->lock);
+ return ret;
+ }
+
+ val = (val & 0xF0) >> AB5500_LED_SINKCURR_SHIFT;
+
+ mutex_unlock(&hvleds->lock);
+
+ return val;
+}
+
+static void ab5500_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct ab5500_led *led = to_led(led_cdev);
+
+ /* adjust LED_FULL to 10bit range */
+ brt_val &= LED_FULL;
+ led->brt_val = brt_val * AB5500_LED_INTENSITY_STEP;
+
+ schedule_work(&led->led_work);
+}
+
+static void ab5500_led_work(struct work_struct *led_work)
+{
+ struct ab5500_led *led = to_led(led_work);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ if (led->led_on == true) {
+ ab5500_led_pwmduty_write(hvleds, led->id, led->brt_val);
+ if (hvleds->hw_fade && led->brt_val) {
+ ab5500_led_enable(hvleds, led->id);
+ ab5500_led_start_manual(hvleds);
+ }
+ }
+}
+
+static ssize_t ab5500_led_show_current(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int led_curr = 0;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ led_curr = ab5500_led_sinkctl_read(hvleds, led->id);
+
+ if (led_curr < 0)
+ return led_curr;
+
+ return sprintf(buf, "%d\n", led_curr);
+}
+
+static ssize_t ab5500_led_store_current(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ int ret;
+ unsigned long led_curr;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ if (strict_strtoul(buf, 0, &led_curr))
+ return -EINVAL;
+
+ if (led_curr > led->max_current)
+ led_curr = led->max_current;
+
+ ret = ab5500_led_sinkctl_write(hvleds, led->id, led_curr);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static ssize_t ab5500_led_store_fade_auto(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ int ret;
+ u8 fade_ctrl = 0;
+ unsigned long fade_auto;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ if (strict_strtoul(buf, 0, &fade_auto))
+ return -EINVAL;
+
+ if (fade_auto > 1) {
+ dev_err(hvleds->dev, "invalid mode\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_get_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ AB5500_LED_FADE_CTRL, &fade_ctrl);
+ if (ret < 0) {
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ AB5500_LED_FADE_CTRL, ret);
+ goto unlock_and_return;
+ }
+
+ /* manual mode */
+ if (fade_auto == false) {
+ fade_ctrl &= ~(AB5500_LED_FADE_ENABLE(led->id));
+ if (!(fade_ctrl & AB5500_FADE_ON_MASK))
+ fade_ctrl = 0;
+
+ ret = ab5500_led_disable(hvleds, led->id);
+ if (ret < 0)
+ goto unlock_and_return;
+ } else {
+ /* set led auto enable bit */
+ fade_ctrl |= AB5500_FADE_MODE_MASK;
+ fade_ctrl |= AB5500_LED_FADE_ENABLE(led->id);
+
+ /* set fade delay */
+ fade_ctrl &= ~AB5500_FADE_DELAY_MASK;
+ fade_ctrl |= hvleds->fade_delay << AB5500_FADE_DELAY_SHIFT;
+
+ /* set fade start manual */
+ fade_ctrl |= AB5500_FADE_START_MASK;
+
+ /* enble corresponding led */
+ ret = ab5500_led_enable(hvleds, led->id);
+ if (ret < 0)
+ goto unlock_and_return;
+
+ }
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ AB5500_LED_FADE_CTRL, fade_ctrl);
+ if (ret < 0) {
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ AB5500_LED_FADE_CTRL, ret);
+ goto unlock_and_return;
+ }
+
+ hvleds->fade_auto = fade_auto;
+
+ ret = len;
+
+unlock_and_return:
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static ssize_t ab5500_led_show_fade_auto(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ return sprintf(buf, "%d\n", hvleds->fade_auto);
+}
+
+static ssize_t ab5500_led_store_fade_delay(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ unsigned long fade_delay;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ if (strict_strtoul(buf, 0, &fade_delay))
+ return -EINVAL;
+
+ if (fade_delay > AB5500_FADE_DELAY_TWOSEC) {
+ dev_err(hvleds->dev, "invalid mode\n");
+ return -EINVAL;
+ }
+
+ hvleds->fade_delay = fade_delay;
+
+ return len;
+}
+
+/* led class device attributes */
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO,
+ ab5500_led_show_current, ab5500_led_store_current);
+static DEVICE_ATTR(fade_auto, S_IRUGO | S_IWUGO,
+ ab5500_led_show_fade_auto, ab5500_led_store_fade_auto);
+static DEVICE_ATTR(fade_delay, S_IRUGO | S_IWUGO,
+ NULL, ab5500_led_store_fade_delay);
+
+static int ab5500_led_init_registers(struct ab5500_hvleds *hvleds)
+{
+ int ret = 0;
+ unsigned int led_id;
+
+ /* fade - manual : dur mid : pwm duty mid */
+ if (!hvleds->hw_fade) {
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ AB5500_LED_REG_ENABLE, true);
+ if (ret < 0) {
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ AB5500_LED_REG_ENABLE, ret);
+ return ret;
+ }
+ }
+
+ for (led_id = 0; led_id < AB5500_HVLEDS_MAX; led_id++) {
+ if (hvleds->leds[led_id].led_on == false)
+ continue;
+
+ ret = ab5500_led_sinkctl_write(
+ hvleds, led_id,
+ hvleds->leds[led_id].max_current);
+ if (ret < 0)
+ return ret;
+
+ if (hvleds->hw_fade) {
+ ret = ab5500_led_pwmfreq_write(
+ hvleds, led_id,
+ AB5500_LED_PWMFREQ_MAX / 2);
+ if (ret < 0)
+ return ret;
+
+ /* fade high intensity */
+ ret = ab5500_led_fade_write(
+ hvleds, led_id, true,
+ hvleds->leds[led_id].fade_hi);
+ if (ret < 0)
+ return ret;
+
+ /* fade low intensity */
+ ret = ab5500_led_fade_write(
+ hvleds, led_id, false,
+ hvleds->leds[led_id].fade_lo);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* init led off */
+ ret |= ab5500_led_pwmduty_write(
+ hvleds, led_id, AB5500_LED_INTENSITY_OFF);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ab5500_led_register_leds(struct device *dev,
+ struct ab5500_hvleds_platform_data *pdata,
+ struct ab5500_hvleds *hvleds)
+{
+ int i_led;
+ int ret = 0;
+ struct ab5500_led_conf *pled;
+ struct ab5500_led *led;
+
+ hvleds->dev = dev;
+ hvleds->pdata = pdata;
+
+ if (abx500_get_chip_id(dev) == AB5500_2_0)
+ hvleds->hw_fade = true;
+ else
+ hvleds->hw_fade = false;
+
+ for (i_led = 0; i_led < AB5500_HVLEDS_MAX; i_led++) {
+ pled = &pdata->leds[i_led];
+ led = &hvleds->leds[i_led];
+
+ INIT_WORK(&led->led_work, ab5500_led_work);
+
+ led->id = pled->led_id;
+ led->max_current = pled->max_current;
+ led->led_on = pled->led_on;
+ led->led_cdev.name = pled->name;
+ led->led_cdev.brightness_set = ab5500_led_brightness_set;
+
+ /* Provide interface only for enabled LEDs */
+ if (led->led_on == false)
+ continue;
+
+ if (hvleds->hw_fade) {
+ led->fade_hi = (pled->fade_hi & LED_FULL);
+ led->fade_hi *= AB5500_LED_INTENSITY_STEP;
+ led->fade_lo = (pled->fade_lo & LED_FULL);
+ led->fade_lo *= AB5500_LED_INTENSITY_STEP;
+ }
+
+ ret = led_classdev_register(dev, &led->led_cdev);
+ if (ret < 0) {
+ dev_err(dev, "Register led class failed: %d\n", ret);
+ goto bailout1;
+ }
+
+ ret = device_create_file(led->led_cdev.dev,
+ &dev_attr_led_current);
+ if (ret < 0) {
+ dev_err(dev, "sysfs device creation failed: %d\n", ret);
+ goto bailout2;
+ }
+
+ if (hvleds->hw_fade) {
+ ret = device_create_file(led->led_cdev.dev,
+ &dev_attr_fade_auto);
+ if (ret < 0) {
+ dev_err(dev, "sysfs device "
+ "creation failed: %d\n", ret);
+ goto bailout3;
+ }
+
+ ret = device_create_file(led->led_cdev.dev,
+ &dev_attr_fade_delay);
+ if (ret < 0) {
+ dev_err(dev, "sysfs device "
+ "creation failed: %d\n", ret);
+ goto bailout4;
+ }
+ }
+ }
+
+ return ret;
+ for (; i_led >= 0; i_led--) {
+ if (hvleds->leds[i_led].led_on == false)
+ continue;
+
+ if (hvleds->hw_fade) {
+ device_remove_file(hvleds->leds[i_led].led_cdev.dev,
+ &dev_attr_fade_delay);
+bailout4:
+ device_remove_file(hvleds->leds[i_led].led_cdev.dev,
+ &dev_attr_fade_auto);
+ }
+bailout3:
+ device_remove_file(hvleds->leds[i_led].led_cdev.dev,
+ &dev_attr_led_current);
+bailout2:
+ led_classdev_unregister(&hvleds->leds[i_led].led_cdev);
+bailout1:
+ cancel_work_sync(&hvleds->leds[i_led].led_work);
+ }
+ return ret;
+}
+
+static int __devinit ab5500_hvleds_probe(struct platform_device *pdev)
+{
+ struct ab5500_hvleds_platform_data *pdata = pdev->dev.platform_data;
+ struct ab5500_hvleds *hvleds = NULL;
+ int ret = 0, i;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data required\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ hvleds = kzalloc(sizeof(struct ab5500_hvleds), GFP_KERNEL);
+ if (hvleds == NULL) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ mutex_init(&hvleds->lock);
+
+ /* init leds data and register led_classdev */
+ ret = ab5500_led_register_leds(&pdev->dev, pdata, hvleds);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "leds registration failed\n");
+ goto err_out;
+ }
+
+ /* init device registers and set initial led current */
+ ret = ab5500_led_init_registers(hvleds);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "reg init failed: %d\n", ret);
+ goto err_reg_init;
+ }
+
+ if (hvleds->hw_fade)
+ dev_info(&pdev->dev, "v2 enabled\n");
+ else
+ dev_info(&pdev->dev, "v1 enabled\n");
+
+ return ret;
+
+err_reg_init:
+ for (i = 0; i < AB5500_HVLEDS_MAX; i++) {
+ struct ab5500_led *led = &hvleds->leds[i];
+
+ if (led->led_on == false)
+ continue;
+
+ device_remove_file(led->led_cdev.dev, &dev_attr_led_current);
+ if (hvleds->hw_fade) {
+ device_remove_file(led->led_cdev.dev,
+ &dev_attr_fade_auto);
+ device_remove_file(led->led_cdev.dev,
+ &dev_attr_fade_delay);
+ }
+ led_classdev_unregister(&led->led_cdev);
+ cancel_work_sync(&led->led_work);
+ }
+err_out:
+ kfree(hvleds);
+ return ret;
+}
+
+static int __devexit ab5500_hvleds_remove(struct platform_device *pdev)
+{
+ struct ab5500_hvleds *hvleds = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < AB5500_HVLEDS_MAX; i++) {
+ struct ab5500_led *led = &hvleds->leds[i];
+
+ if (led->led_on == false)
+ continue;
+
+ device_remove_file(led->led_cdev.dev, &dev_attr_led_current);
+ if (hvleds->hw_fade) {
+ device_remove_file(led->led_cdev.dev,
+ &dev_attr_fade_auto);
+ device_remove_file(led->led_cdev.dev,
+ &dev_attr_fade_delay);
+ }
+ led_classdev_unregister(&led->led_cdev);
+ cancel_work_sync(&led->led_work);
+ }
+ kfree(hvleds);
+ return 0;
+}
+
+static struct platform_driver ab5500_hvleds_driver = {
+ .driver = {
+ .name = AB5500LED_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_hvleds_probe,
+ .remove = __devexit_p(ab5500_hvleds_remove),
+};
+
+static int __init ab5500_hvleds_module_init(void)
+{
+ return platform_driver_register(&ab5500_hvleds_driver);
+}
+
+static void __exit ab5500_hvleds_module_exit(void)
+{
+ platform_driver_unregister(&ab5500_hvleds_driver);
+}
+
+module_init(ab5500_hvleds_module_init);
+module_exit(ab5500_hvleds_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>");
+MODULE_DESCRIPTION("Driver for AB5500 HVLED");
+
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 666daf77872..775cd67c604 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -27,6 +27,7 @@ struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
unsigned int active_low;
+ unsigned int lth_brightness;
unsigned int period;
};
@@ -42,7 +43,10 @@ static void led_pwm_set(struct led_classdev *led_cdev,
pwm_config(led_dat->pwm, 0, period);
pwm_disable(led_dat->pwm);
} else {
- pwm_config(led_dat->pwm, brightness * period / max, period);
+ brightness = led_dat->lth_brightness + (brightness *
+ (led_dat->period - led_dat->lth_brightness) / max);
+ pwm_config(led_dat->pwm, brightness, led_dat->period);
+
pwm_enable(led_dat->pwm);
}
}
@@ -79,6 +83,8 @@ static int led_pwm_probe(struct platform_device *pdev)
led_dat->cdev.default_trigger = cur_led->default_trigger;
led_dat->active_low = cur_led->active_low;
led_dat->period = cur_led->pwm_period_ns;
+ led_dat->lth_brightness = cur_led->lth_brightness *
+ (cur_led->pwm_period_ns / cur_led->max_brightness);
led_dat->cdev.brightness_set = led_pwm_set;
led_dat->cdev.brightness = LED_OFF;
led_dat->cdev.max_brightness = cur_led->max_brightness;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f1391c21ef2..42e3b122c6a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -289,6 +289,17 @@ config MFD_TC3589X
additional drivers must be enabled in order to use the
functionality of the device.
+config MFD_TC35892
+ bool "Support Toshiba TC35892"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ Support for the Toshiba TC35892 I/O Expander.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
config MFD_TMIO
bool
default n
@@ -317,6 +328,27 @@ config MFD_TC6393XB
help
Support for Toshiba Mobile IO Controller TC6393XB
+config AB5500_CORE
+ bool "ST-Ericsson AB5500 Mixed Signal Circuit core functions"
+ select MFD_CORE
+ depends on GENERIC_HARDIRQS && ABX500_CORE
+ help
+ Select this to enable the AB5500 Mixed Signal IC core
+ functionality. This connects to a AB5500 chip on the I2C bus via
+ the Power and Reset Management Unit (PRCMU). It exposes a number
+ of symbols needed for dependent devices to read and write
+ registers and subscribe to events from this multi-functional IC.
+ This is needed to use other features of the AB5500 such as
+ battery-backed RTC, charging control, Regulators, LEDs, vibrator,
+ system power and temperature, power management and ALSA sound.
+
+config AB5500_GPADC
+ bool "AB5500 GPADC driver"
+ depends on AB5500_CORE
+ default y
+ help
+ AB5500 GPADC driver used to convert battery/usb voltage.
+
config PMIC_DA903X
bool "Dialog Semiconductor DA9030/DA9034 PMIC Support"
depends on I2C=y
@@ -596,7 +628,7 @@ config AB8500_CORE
config AB8500_I2C_CORE
bool "AB8500 register access via PRCMU I2C"
- depends on AB8500_CORE && MFD_DB8500_PRCMU
+ depends on AB8500_CORE
default y
help
This enables register access to the AB8500 chip via PRCMU I2C.
@@ -604,6 +636,14 @@ config AB8500_I2C_CORE
the I2C bus is connected to the Power Reset
and Mangagement Unit, PRCMU.
+config AB8500_DENC
+ bool "AB8500_DENC driver support(CVBS)"
+ depends on AB8500_CORE
+ help
+ Select this option to add driver support for analog TV out through
+ AB8500.
+
+
config AB8500_DEBUG
bool "Enable debug info via debugfs"
depends on AB8500_CORE && DEBUG_FS
@@ -614,10 +654,10 @@ config AB8500_DEBUG
config AB8500_GPADC
bool "AB8500 GPADC driver"
- depends on AB8500_CORE && REGULATOR_AB8500
+ depends on AB8500_CORE
default y
help
- AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage
+ AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage.
config MFD_DB8500_PRCMU
bool "ST-Ericsson DB8500 Power Reset Control Management Unit"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b2292eb7524..9bd04bf0ed7 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -2,6 +2,7 @@
# Makefile for multifunction miscellaneous devices
#
+obj-$(CONFIG_AB5500_CORE) += ab5500-core.o ab5500-power.o
88pm860x-objs := 88pm860x-core.o 88pm860x-i2c.o
obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o
obj-$(CONFIG_MFD_SM501) += sm501.o
@@ -17,6 +18,7 @@ obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
obj-$(CONFIG_MFD_STMPE) += stmpe.o
obj-$(CONFIG_MFD_TC3589X) += tc3589x.o
+obj-$(CONFIG_MFD_TC35892) += tc35892.o
obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
@@ -83,11 +85,13 @@ obj-$(CONFIG_AB5500_CORE) += ab5500-core.o
obj-$(CONFIG_AB5500_DEBUG) += ab5500-debugfs.o
obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
+obj-$(CONFIG_AB8500_DENC) += ab8500-denc.o
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
# ab8500-i2c need to come after db8500-prcmu (which provides the channel)
obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
obj-$(CONFIG_MFD_DB5500_PRCMU) += db5500-prcmu.o
+obj-$(CONFIG_AB5500_GPADC) += ab5500-gpadc.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c
index ec10629a0b0..c0f36e2389c 100644
--- a/drivers/mfd/ab5500-core.c
+++ b/drivers/mfd/ab5500-core.c
@@ -22,8 +22,8 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/random.h>
-#include <linux/mfd/ab5500/ab5500.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
#include <linux/list.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
@@ -388,7 +388,7 @@ static struct ab5500_i2c_banks ab5500_bank_ranges[AB5500_NUM_DEVICES] = {
},
},
},
- [AB5500_DEVID_FUELGAUGE] = {
+ [AB5500_DEVID_FG] = {
.nbanks = 1,
.bank = (struct ab5500_i2c_ranges []) {
{
@@ -594,9 +594,9 @@ static struct mfd_cell ab5500_devs[AB5500_NUM_DEVICES] = {
},
},
},
- [AB5500_DEVID_FUELGAUGE] = {
+ [AB5500_DEVID_FG] = {
.name = "ab5500-fuelgauge",
- .id = AB5500_DEVID_FUELGAUGE,
+ .id = AB5500_DEVID_FG,
.num_resources = 6,
.resources = (struct resource[]) {
{
@@ -992,6 +992,74 @@ static struct mfd_cell ab5500_devs[AB5500_NUM_DEVICES] = {
},
},
},
+ [AB5500_DEVID_TEMPMON] = {
+ .name = "abx500-temp",
+ .id = AB5500_DEVID_TEMPMON,
+ .num_resources = 1,
+ .resources = (struct resource[]) {
+ {
+ .name = "ABX500_TEMP_WARM",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 2),
+ .end = AB5500_IRQ(2, 2),
+ },
+ },
+ },
+ [AB5500_DEVID_ACCDET] = {
+ .name = "ab5500-acc-det",
+ .id = AB5500_DEVID_ACCDET,
+ .num_resources = 8,
+ .resources = (struct resource[]) {
+ {
+ .name = "acc_detedt22db_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 7),
+ .end = AB5500_IRQ(2, 7),
+ },
+ {
+ .name = "acc_detedt21db_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 6),
+ .end = AB5500_IRQ(2, 6),
+ },
+ {
+ .name = "acc_detedt21db_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 5),
+ .end = AB5500_IRQ(2, 5),
+ },
+ {
+ .name = "acc_detedt3db_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 4),
+ .end = AB5500_IRQ(3, 4),
+ },
+ {
+ .name = "acc_detedt3db_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 3),
+ .end = AB5500_IRQ(3, 3),
+ },
+ {
+ .name = "acc_detedt1db_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 2),
+ .end = AB5500_IRQ(3, 2),
+ },
+ {
+ .name = "acc_detedt1db_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 1),
+ .end = AB5500_IRQ(3, 1),
+ },
+ {
+ .name = "acc_detedt22db_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 0),
+ .end = AB5500_IRQ(3, 0),
+ },
+ },
+ },
};
/*
@@ -1302,6 +1370,10 @@ static const struct ab_family_id ids[] __initdata = {
.id = AB5500_1_1,
.name = "1.1"
},
+ {
+ .id = AB5500_2_0,
+ .name = "2.0"
+ },
/* Terminator */
{
.id = 0x00,
diff --git a/drivers/mfd/ab5500-gpadc.c b/drivers/mfd/ab5500-gpadc.c
new file mode 100644
index 00000000000..6756d3cf37a
--- /dev/null
+++ b/drivers/mfd/ab5500-gpadc.c
@@ -0,0 +1,1180 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Vijaya Kumar K <vijay.kilari@stericsson.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+
+/*
+ * Manual mode ADC registers
+ */
+#define AB5500_GPADC_MANUAL_STAT_REG 0x1F
+#define AB5500_GPADC_MANDATAL_REG 0x21
+#define AB5500_GPADC_MANDATAH_REG 0x20
+#define AB5500_GPADC_MANUAL_MUX_CTRL 0x22
+#define AB5500_GPADC_MANUAL_MODE_CTRL 0x23
+#define AB5500_GPADC_MANUAL_MODE_CTRL2 0x24
+/*
+ * Auto/Polling mode ADC registers
+ */
+#define AB5500_GPADC_AUTO_VBAT_MAX 0x26
+#define AB5500_GPADC_AUTO_VBAT_MIN_TXON 0x27
+#define AB5500_GPADC_AUTO_VBAT_MIN_NOTX 0x28
+#define AB5500_GPADC_AUTO_VBAT_AVGH 0x29
+#define AB5500_GPADC_AUTO_VBAT_AVGL 0x2A
+#define AB5500_GPADC_AUTO_ICHAR_MAX 0x2B
+#define AB5500_GPADC_AUTO_ICHAR_MIN 0x2C
+#define AB5500_GPADC_AUTO_ICHAR_AVG 0x2D
+#define AB5500_GPADC_AUTO_CTRL2 0x2F
+#define AB5500_GPADC_AUTO_CTRL1 0x30
+#define AB5500_GPADC_AUTO_PWR_CTRL 0x31
+#define AB5500_GPADC_AUTO_TRIG_VBAT_MIN_TXON 0x32
+#define AB5500_GPADC_AUTO_TRIG_VBAT_MIN_NOTX 0x33
+#define AB5500_GPADC_AUTO_TRIG_ADOUT0_CTRL 0x34
+#define AB5500_GPADC_AUTO_TRIG_ADOUT1_CTRL 0x35
+#define AB5500_GPADC_AUTO_TRIG0_MUX_CTRL 0x37
+#define AB5500_GPADC_AUTO_XTALTEMP_CTRL 0x57
+#define AB5500_GPADC_KELVIN_CTRL 0xFE
+
+/* gpadc constants */
+#define AB5500_INT_ADC_TRIG0 0x0
+#define AB5500_INT_ADC_TRIG1 0x1
+#define AB5500_INT_ADC_TRIG2 0x2
+#define AB5500_INT_ADC_TRIG3 0x3
+#define AB5500_INT_ADC_TRIG4 0x4
+#define AB5500_INT_ADC_TRIG5 0x5
+#define AB5500_INT_ADC_TRIG6 0x6
+#define AB5500_INT_ADC_TRIG7 0x7
+
+#define AB5500_GPADC_AUTO_TRIG_INDEX AB5500_GPADC_AUTO_TRIG0_MUX_CTRL
+#define GPADC_MANUAL_READY 0x01
+#define GPADC_MANUAL_ADOUT0_MASK 0x30
+#define GPADC_MANUAL_ADOUT1_MASK 0xC0
+#define GPADC_MANUAL_ADOUT0_ON 0x10
+#define GPADC_MANUAL_ADOUT1_ON 0x40
+#define MUX_SCALE_VBAT_MASK 0x02
+#define MUX_SCALE_45 0x02
+#define MUX_SCALE_BDATA_MASK 0x01
+#define MUX_SCALE_BDATA27 0x00
+#define MUX_SCALE_BDATA18 0x01
+#define MUX_SCALE_ACCDET2_MASK 0x01
+#define MUX_SCALE_ACCDET3_MASK 0x02
+#define ACCDET2_SCALE_VOL27 0x00
+#define ACCDET3_SCALE_VOL27 0x00
+#define TRIGX_FREQ_MASK 0x07
+#define AUTO_VBAT_MASK 0x10
+#define AUTO_VBAT_ON 0x10
+#define TRIG_VBAT_TXON_ARM_MASK 0x08
+#define TRIG_VBAT_NOTX_ARM_MASK 0x04
+#define TRIGX_ARM_MASK 0x20
+#define TRIGX_ARM 0x20
+#define TRIGX_MUX_SELECT 0x1F
+#define ADC_CAL_OFF_MASK 0x04
+#define ADC_ON_MODE_MASK 0x03
+#define ADC_CAL_ON 0x00
+#define ADC_FULLPWR 0x03
+#define ADC_XTAL_FORCE_MASK 0x80
+#define ADC_XTAL_FORCE_EN 0x80
+#define ADC_XTAL_FORCE_DI 0x00
+#define ADOUT0 0x01
+#define ADOUT1 0x02
+#define MIN_INDEX 0x02
+#define MAX_INDEX 0x03
+#define CTRL_INDEX 0x01
+
+/* GPADC constants from AB5500 spec */
+#define BTEMP_MIN 0
+#define BTEMP_MAX 1800
+#define BDATA_MIN 0
+#define BDATA_MAX 2750
+#define PCBTEMP_MIN 0
+#define PCBTEMP_MAX 1800
+#define XTALTEMP_MIN 0
+#define XTALTEMP_MAX 1800
+#define DIETEMP_MIN 0
+#define DIETEMP_MAX 1800
+#define VBUS_I_MIN 0
+#define VBUS_I_MAX 1600
+#define VBUS_V_MIN 0
+#define VBUS_V_MAX 20000
+#define ACCDET2_MIN 0
+#define ACCDET2_MAX 2500
+#define ACCDET3_MIN 0
+#define ACCDET3_MAX 2500
+#define VBAT_MIN 2300
+#define VBAT_MAX 4500
+#define BKBAT_MIN 0
+#define BKBAT_MAX 2750
+#define USBID_MIN 0
+#define USBID_MAX 1800
+#define KELVIN_MIN 0
+#define KELVIN_MAX 4500
+
+/* This is used for calibration */
+#define ADC_RESOLUTION 1023
+#define AUTO_ADC_RESOLUTION 255
+
+enum adc_auto_channels {
+ ADC_INPUT_TRIG0 = 0,
+ ADC_INPUT_TRIG1,
+ ADC_INPUT_TRIG2,
+ ADC_INPUT_TRIG3,
+ ADC_INPUT_TRIG4,
+ ADC_INPUT_TRIG5,
+ ADC_INPUT_TRIG6,
+ ADC_INPUT_TRIG7,
+ ADC_INPUT_VBAT_TXOFF,
+ ADC_INPUT_VBAT_TXON,
+ N_AUTO_TRIGGER
+};
+
+/**
+ * struct adc_auto_trigger - AB5500 GPADC auto trigger
+ * @adc_mux Mux input
+ * @flag Status of trigger
+ * @freq Frequency of conversion
+ * @adout Adout to pull
+ * @trig_min trigger minimum value
+ * @trig_max trigger maximum value
+ * @auto_adc_callback notification callback
+ */
+struct adc_auto_trigger {
+ u8 auto_mux;
+ u8 flag;
+ u8 freq;
+ u8 adout;
+ u8 trig_min;
+ u8 trig_max;
+ int (*auto_callb)(int mux);
+};
+
+/**
+ * struct ab5500_btemp_interrupts - ab5500 interrupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab5500_adc_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+/**
+ * struct ab5500_gpadc - AB5500 GPADC device information
+ * @chip_id ABB chip id
+ * @dev: pointer to the struct device
+ * @node: a list of AB5500 GPADCs, hence prepared for
+ reentrance
+ * @ab5500_gpadc_complete: pointer to the struct completion, to indicate
+ * the completion of gpadc conversion
+ * @ab5500_gpadc_lock: structure of type mutex
+ * @regu: pointer to the struct regulator
+ * @irq: interrupt number that is used by gpadc
+ * @cal_data array of ADC calibration data structs
+ * @auto_trig auto trigger channel
+ * @gpadc_trigX_work work items for trigger channels
+ */
+struct ab5500_gpadc {
+ u8 chip_id;
+ struct device *dev;
+ struct list_head node;
+ struct mutex ab5500_gpadc_lock;
+ struct regulator *regu;
+ int irq;
+ int prev_bdata;
+ spinlock_t gpadc_auto_lock;
+ struct adc_auto_trigger adc_trig[N_AUTO_TRIGGER];
+ struct workqueue_struct *gpadc_wq;
+ struct work_struct gpadc_trig0_work;
+ struct work_struct gpadc_trig1_work;
+ struct work_struct gpadc_trig2_work;
+ struct work_struct gpadc_trig3_work;
+ struct work_struct gpadc_trig4_work;
+ struct work_struct gpadc_trig5_work;
+ struct work_struct gpadc_trig6_work;
+ struct work_struct gpadc_trig7_work;
+ struct work_struct gpadc_trig_vbat_txon_work;
+ struct work_struct gpadc_trig_vbat_txoff_work;
+};
+
+static LIST_HEAD(ab5500_gpadc_list);
+
+struct adc_data {
+ u8 mux;
+ int min;
+ int max;
+ int adout;
+};
+
+#define ADC_DATA(_id, _mux, _min, _max, _adout) \
+ [_id] = { \
+ .mux = _mux, \
+ .min = _min, \
+ .max = _max, \
+ .adout = _adout \
+ }
+
+struct adc_data adc_tab[] = {
+ ADC_DATA(BTEMP_BALL, 0x0D, BTEMP_MIN, BTEMP_MAX, ADOUT0),
+ ADC_DATA(BAT_CTRL, 0x0D, BDATA_MIN, BDATA_MAX, 0),
+ ADC_DATA(MAIN_BAT_V, 0x0C, VBAT_MIN, VBAT_MAX, 0),
+ ADC_DATA(MAIN_BAT_V_TXON, 0x0C, VBAT_MIN, VBAT_MAX, 0),
+ ADC_DATA(VBUS_V, 0x10, VBUS_V_MIN, VBUS_V_MAX, 0),
+ ADC_DATA(USB_CHARGER_C, 0x0A, VBUS_I_MIN, VBUS_I_MAX, 0),
+ ADC_DATA(BK_BAT_V, 0x07, BKBAT_MIN, BKBAT_MAX, 0),
+ ADC_DATA(DIE_TEMP, 0x0F, DIETEMP_MIN, DIETEMP_MAX, ADOUT0),
+ ADC_DATA(PCB_TEMP, 0x13, PCBTEMP_MIN, PCBTEMP_MAX, ADOUT0),
+ ADC_DATA(XTAL_TEMP, 0x06, XTALTEMP_MIN, XTALTEMP_MAX, ADOUT0),
+ ADC_DATA(USB_ID, 0x1A, USBID_MIN, USBID_MAX, 0),
+ ADC_DATA(ACC_DETECT2, 0x18, ACCDET2_MIN, ACCDET2_MAX, 0),
+ ADC_DATA(ACC_DETECT3, 0x19, ACCDET3_MIN, ACCDET3_MAX, 0),
+ ADC_DATA(MAIN_BAT_V_TRIG_MIN, 0x0C, VBAT_MIN, VBAT_MAX, 0),
+ ADC_DATA(MAIN_BAT_V_TXON_TRIG_MIN, 0x0C, VBAT_MIN, VBAT_MAX, 0),
+};
+/**
+ * ab5500_gpadc_get() - returns a reference to the primary AB5500 GPADC
+ * (i.e. the first GPADC in the instance list)
+ */
+struct ab5500_gpadc *ab5500_gpadc_get(const char *name)
+{
+ struct ab5500_gpadc *gpadc;
+ list_for_each_entry(gpadc, &ab5500_gpadc_list, node) {
+ if (!strcmp(name, dev_name(gpadc->dev)))
+ return gpadc;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(ab5500_gpadc_get);
+
+#define CONV(min, max, x)\
+ ((min) + ((((max)-(min))*(x))/ADC_RESOLUTION))
+
+static int ab5500_gpadc_ad_to_voltage(struct ab5500_gpadc *gpadc,
+ u8 in, u16 ad_val)
+{
+ int res;
+
+ switch (in) {
+ case PCB_TEMP:
+ case BTEMP_BALL:
+ case MAIN_BAT_V:
+ case MAIN_BAT_V_TXON:
+ case ACC_DETECT2:
+ case ACC_DETECT3:
+ case VBUS_V:
+ case USB_CHARGER_C:
+ case BK_BAT_V:
+ case XTAL_TEMP:
+ case USB_ID:
+ case BAT_CTRL:
+ res = CONV(adc_tab[in].min, adc_tab[in].max, ad_val);
+ break;
+ case DIE_TEMP:
+ /*
+ * From the AB5500 product specification
+ * T(deg cel) = 27 - ((ADCode - 709)/2.4213)
+ * 27 + 709/2.4213 - ADCode/2.4123
+ * (320 - ADCode)/2.4213
+ */
+ res = 320 - (((unsigned long)ad_val * 1000000) / 242130) / 10;
+ break;
+ default:
+ dev_err(gpadc->dev,
+ "unknown channel, not possible to convert\n");
+ res = -EINVAL;
+ break;
+ }
+ return res;
+}
+
+/**
+ * ab5500_gpadc_convert() - gpadc conversion
+ * @input: analog input to be converted to digital data
+ *
+ * This function converts the selected analog i/p to digital
+ * data.
+ */
+int ab5500_gpadc_convert(struct ab5500_gpadc *gpadc, u8 input)
+{
+ int result, ret = -EINVAL;
+ u16 data = 0;
+ u8 looplimit = 0;
+ u8 status = 0;
+ u8 low_data, high_data, adout_mask, adout_val;
+
+ if (!gpadc)
+ return -ENODEV;
+
+ mutex_lock(&gpadc->ab5500_gpadc_lock);
+
+ switch (input) {
+ case MAIN_BAT_V:
+ case MAIN_BAT_V_TXON:
+ /*
+ * The value of mux scale volatage depends
+ * on the type of battery
+ * for LI-ion use MUX_SCALE_35 => 2.3-3.5V
+ * for LiFePo4 use MUX_SCALE_45 => 2.3-4.5V
+ * Check type of battery from platform data TODO ???
+ */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_VBAT_MASK, MUX_SCALE_45);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to read status\n");
+ goto out;
+ }
+ break;
+ case BTEMP_BALL:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_BDATA_MASK, MUX_SCALE_BDATA27);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set mux scale\n");
+ goto out;
+ }
+ break;
+ case BAT_CTRL:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_BDATA_MASK, MUX_SCALE_BDATA27);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set mux scale\n");
+ goto out;
+ }
+ break;
+ case XTAL_TEMP:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_XTALTEMP_CTRL,
+ ADC_XTAL_FORCE_MASK, ADC_XTAL_FORCE_EN);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set xtaltemp\n");
+ goto out;
+ }
+ break;
+ case ACC_DETECT2:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL2,
+ MUX_SCALE_ACCDET2_MASK, ACCDET2_SCALE_VOL27);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set accdet2\n");
+ goto out;
+ }
+ break;
+ case ACC_DETECT3:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL2,
+ MUX_SCALE_ACCDET3_MASK, ACCDET3_SCALE_VOL27);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set accdet3\n");
+ goto out;
+ }
+ break;
+ case USB_CHARGER_C:
+ case VBUS_V:
+ case BK_BAT_V:
+ case USB_ID:
+ case PCB_TEMP:
+ case DIE_TEMP:
+ break;
+ default:
+ dev_err(gpadc->dev, "gpadc: Wrong adc\n");
+ goto out;
+ break;
+ }
+ if (adc_tab[input].adout) {
+ adout_mask = adc_tab[input].adout == ADOUT0 ?
+ GPADC_MANUAL_ADOUT0_MASK : GPADC_MANUAL_ADOUT1_MASK;
+ adout_val = adc_tab[input].adout == ADOUT0 ?
+ GPADC_MANUAL_ADOUT0_ON : GPADC_MANUAL_ADOUT1_ON;
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ adout_mask, adout_val);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set ADOUT\n");
+ goto out;
+ }
+ }
+ ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_MANUAL_MUX_CTRL, adc_tab[input].mux);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: fail to trigger manual conv\n");
+ goto out;
+ }
+ /* wait for completion of conversion */
+ looplimit = 0;
+ do {
+ msleep(1);
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_STAT_REG,
+ &status);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to read status\n");
+ goto out;
+ }
+ if (status & GPADC_MANUAL_READY)
+ break;
+ } while (++looplimit < 2);
+ if (looplimit >= 2) {
+ dev_err(gpadc->dev, "timeout:failed to complete conversion\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Disable ADOUT for measurement
+ */
+ if (adc_tab[input].adout) {
+ adout_mask = adc_tab[input].adout == ADOUT0 ?
+ GPADC_MANUAL_ADOUT0_MASK : GPADC_MANUAL_ADOUT1_MASK;
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ adout_mask, 0x0);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to disable ADOUT\n");
+ goto out;
+ }
+ }
+ /*
+ * Disable XTAL TEMP
+ */
+ if (input == XTAL_TEMP) {
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_XTALTEMP_CTRL,
+ ADC_XTAL_FORCE_MASK, ADC_XTAL_FORCE_DI);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: fail to disable xtaltemp\n");
+ goto out;
+ }
+ }
+ /* Read the converted RAW data */
+ ret = abx500_get_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_MANDATAL_REG, &low_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: read low data failed\n");
+ goto out;
+ }
+
+ ret = abx500_get_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_MANDATAH_REG, &high_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: read high data failed\n");
+ goto out;
+ }
+
+ data = (high_data << 2) | (low_data >> 6);
+ if (input == BAT_CTRL || input == BTEMP_BALL) {
+ /*
+ * TODO: Re-check with h/w team
+ * discard null or value < 5, as there is some error
+ * in conversion
+ */
+ if (data < 5)
+ data = gpadc->prev_bdata;
+ else
+ gpadc->prev_bdata = data;
+ }
+ result = ab5500_gpadc_ad_to_voltage(gpadc, input, data);
+
+ mutex_unlock(&gpadc->ab5500_gpadc_lock);
+ return result;
+
+out:
+ mutex_unlock(&gpadc->ab5500_gpadc_lock);
+ dev_err(gpadc->dev,
+ "gpadc: Failed to AD convert channel %d\n", input);
+ return ret;
+}
+EXPORT_SYMBOL(ab5500_gpadc_convert);
+
+/**
+ * ab5500_gpadc_program_auto() - gpadc conversion auto conversion
+ * @trig_index: Generic trigger channel for conversion
+ *
+ * This function program the auto trigger channel
+ */
+static int ab5500_gpadc_program_auto(struct ab5500_gpadc *gpadc, int trig)
+{
+ int ret;
+ u8 adout;
+#define MIN_INDEX 0x02
+#define MAX_INDEX 0x03
+#define CTRL_INDEX 0x01
+ ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + MIN_INDEX,
+ gpadc->adc_trig[trig].trig_min);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program min\n");
+ return ret;
+ }
+ ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + MAX_INDEX,
+ gpadc->adc_trig[trig].trig_max);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program max\n");
+ return ret;
+ }
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2),
+ TRIGX_MUX_SELECT, gpadc->adc_trig[trig].auto_mux);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to select mux\n");
+ return ret;
+ }
+ if (gpadc->adc_trig[trig].adout) {
+ adout = gpadc->adc_trig[trig].adout == ADOUT0 ?
+ gpadc->adc_trig[trig].adout << 6 :
+ gpadc->adc_trig[trig].adout << 5;
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + CTRL_INDEX,
+ adout, adout);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program adout\n");
+ return ret;
+ }
+ }
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + CTRL_INDEX,
+ TRIGX_FREQ_MASK, gpadc->adc_trig[trig].freq);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program freq\n");
+ return ret;
+ }
+ return ret;
+
+}
+
+#define TRIG_V(trigval, min, max) \
+ ((((trigval) - (min)) * AUTO_ADC_RESOLUTION) / ((max) - (min)))
+
+static int ab5500_gpadc_vbat_auto_conf(struct ab5500_gpadc *gpadc,
+ struct adc_auto_input *in)
+{
+ int trig_min, ret;
+ u8 trig_reg, trig_arm;
+
+ /* Scale mux voltage */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_VBAT_MASK, MUX_SCALE_45);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to set vbat scale\n");
+ return ret;
+ }
+
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_CTRL1,
+ AUTO_VBAT_MASK, AUTO_VBAT_ON);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to set vbat on\n");
+ return ret;
+ }
+
+ trig_min = TRIG_V(in->min, adc_tab[in->mux].min, adc_tab[in->mux].max);
+
+ if (in->mux == MAIN_BAT_V_TRIG_MIN) {
+ trig_reg = AB5500_GPADC_AUTO_TRIG_VBAT_MIN_NOTX;
+ trig_arm = TRIG_VBAT_NOTX_ARM_MASK;
+ } else {
+ trig_reg = AB5500_GPADC_AUTO_TRIG_VBAT_MIN_TXON;
+ trig_arm = TRIG_VBAT_TXON_ARM_MASK;
+ }
+ ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ trig_reg, trig_min);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program vbat min\n");
+ return ret;
+ }
+ /*
+ * arm the trigger
+ */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_CTRL1, trig_arm, trig_arm);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to trig vbat\n");
+ return ret;
+ }
+ return ret;
+}
+/**
+ * ab5500_gpadc_convert_auto() - gpadc conversion
+ * @auto_input: input trigger for conversion
+ *
+ * This function converts the selected channel from
+ * analog to digital data in auto mode
+ */
+
+int ab5500_gpadc_convert_auto(struct ab5500_gpadc *gpadc,
+ struct adc_auto_input *in)
+{
+ int ret, trig;
+ unsigned long flags;
+
+ if (!gpadc)
+ return -ENODEV;
+ mutex_lock(&gpadc->ab5500_gpadc_lock);
+
+ if (in->mux == MAIN_BAT_V_TXON_TRIG_MIN) {
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ if (gpadc->adc_trig[ADC_INPUT_VBAT_TXON].flag == true) {
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ ret = -EBUSY;
+ dev_err(gpadc->dev, "gpadc: Auto vbat txon busy");
+ goto out;
+ }
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+
+ ret = ab5500_gpadc_vbat_auto_conf(gpadc, in);
+ if (ret < 0)
+ goto out;
+
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXON].auto_mux = in->mux;
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXON].auto_callb =
+ in->auto_adc_callback;
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXON].flag = true;
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ } else if (in->mux == MAIN_BAT_V_TRIG_MIN) {
+
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ if (gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].flag == true) {
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ ret = -EBUSY;
+ dev_err(gpadc->dev, "gpadc: Auto vbat busy");
+ goto out;
+ }
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+
+ ret = ab5500_gpadc_vbat_auto_conf(gpadc, in);
+ if (ret < 0)
+ goto out;
+
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].auto_mux = in->mux;
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].auto_callb =
+ in->auto_adc_callback;
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].flag = true;
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ } else {
+ /*
+ * check if free trigger is available
+ */
+ trig = ADC_INPUT_TRIG0;
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ while (gpadc->adc_trig[trig].flag == true &&
+ trig <= ADC_INPUT_TRIG7)
+ trig++;
+
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ if (trig > ADC_INPUT_TRIG7) {
+ ret = -EBUSY;
+ dev_err(gpadc->dev, "gpadc: no free channel\n");
+ goto out;
+ }
+ switch (in->mux) {
+ case BTEMP_BALL:
+ case MAIN_BAT_V:
+ /*
+ * The value of mux scale volatage depends
+ * on the type of battery
+ * for LI-ion use MUX_SCALE_35 => 2.3-3.5V
+ * for LiFePo4 use MUX_SCALE_45 => 2.3-4.5V
+ * Check type of battery from platform data TODO ???
+ */
+ ret = abx500_mask_and_set_register_interruptible(
+ gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_VBAT_MASK, MUX_SCALE_45);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: failed to read status\n");
+ goto out;
+ }
+ case ACC_DETECT2:
+ case ACC_DETECT3:
+ case VBUS_V:
+ case USB_CHARGER_C:
+ case BK_BAT_V:
+ case PCB_TEMP:
+ case USB_ID:
+ case BAT_CTRL:
+ gpadc->adc_trig[trig].trig_min =
+ (u8)TRIG_V(in->min, adc_tab[in->mux].min,
+ adc_tab[in->mux].max);
+ gpadc->adc_trig[trig].trig_max =
+ (u8)TRIG_V(in->max, adc_tab[in->mux].min,
+ adc_tab[in->mux].max);
+ gpadc->adc_trig[trig].adout =
+ adc_tab[in->mux].adout;
+ break;
+ case DIE_TEMP:
+ /*
+ * From the AB5500 product specification
+ * T(deg_cel) = 27 -(ADCode - 709)/2.4123)
+ * adc min and max values are based on the above formula.
+ */
+ gpadc->adc_trig[trig].trig_min =
+ 709 - (22413 * (in->min - 27))/10000;
+ gpadc->adc_trig[trig].trig_max =
+ 709 - (22413 * (in->max - 27))/10000;
+ gpadc->adc_trig[trig].adout =
+ adc_tab[in->mux].adout;
+ break;
+ default:
+ dev_err(gpadc->dev, "Unknow GPADC request\n");
+ break;
+ }
+ gpadc->adc_trig[trig].freq = in->freq;
+ gpadc->adc_trig[trig].auto_mux =
+ adc_tab[in->mux].mux;
+ gpadc->adc_trig[trig].auto_callb = in->auto_adc_callback;
+
+ ret = ab5500_gpadc_program_auto(gpadc, trig);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: fail to program auto ch\n");
+ goto out;
+ }
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig * 4),
+ TRIGX_ARM_MASK, TRIGX_ARM);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to trigger\n");
+ goto out;
+ }
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ gpadc->adc_trig[trig].flag = true;
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ }
+out:
+ mutex_unlock(&gpadc->ab5500_gpadc_lock);
+ return ret;
+
+}
+EXPORT_SYMBOL(ab5500_gpadc_convert_auto);
+
+static void ab5500_gpadc_trigx_work(struct ab5500_gpadc *gp, int trig)
+{
+ unsigned long flags;
+ if (gp->adc_trig[trig].auto_callb != NULL) {
+ gp->adc_trig[trig].auto_callb(gp->adc_trig[trig].auto_mux);
+ spin_lock_irqsave(&gp->gpadc_auto_lock, flags);
+ gp->adc_trig[trig].flag = false;
+ spin_unlock_irqrestore(&gp->gpadc_auto_lock, flags);
+ } else {
+ dev_err(gp->dev, "Unknown trig for %d\n", trig);
+ }
+}
+/**
+ * ab5500_gpadc_trig0_work() - work item for trig0 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 0 auto conversion.
+ */
+static void ab5500_gpadc_trig0_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig0_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG0);
+}
+
+/**
+ * ab5500_gpadc_trig1_work() - work item for trig1 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig1 auto conversion.
+ */
+static void ab5500_gpadc_trig1_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig1_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG1);
+}
+
+/**
+ * ab5500_gpadc_trig2_work() - work item for trig2 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 2 auto conversion.
+ */
+static void ab5500_gpadc_trig2_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig2_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG2);
+}
+
+/**
+ * ab5500_gpadc_trig3_work() - work item for trig3 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 3 auto conversion.
+ */
+static void ab5500_gpadc_trig3_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig3_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG3);
+}
+
+/**
+ * ab5500_gpadc_trig4_work() - work item for trig4 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 4 auto conversion.
+ */
+static void ab5500_gpadc_trig4_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig4_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG4);
+}
+
+/**
+ * ab5500_gpadc_trig5_work() - work item for trig5 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 5 auto conversion.
+ */
+static void ab5500_gpadc_trig5_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig5_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG5);
+}
+
+/**
+ * ab5500_gpadc_trig6_work() - work item for trig6 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 6 auto conversion.
+ */
+static void ab5500_gpadc_trig6_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig6_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG6);
+}
+
+/**
+ * ab5500_gpadc_trig7_work() - work item for trig7 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 7 auto conversion.
+ */
+static void ab5500_gpadc_trig7_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig7_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG7);
+}
+
+/**
+ * ab5500_gpadc_vbat_txon_work() - work item for vbat_txon trigger auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for vbat_txon trigger auto adc.
+ */
+static void ab5500_gpadc_vbat_txon_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig_vbat_txon_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_VBAT_TXON);
+}
+
+/**
+ * ab5500_gpadc_vbat_txoff_work() - work item for vbat_txoff trigger auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for vbat_txoff trigger auto adc.
+ */
+static void ab5500_gpadc_vbat_txoff_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig_vbat_txoff_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_VBAT_TXOFF);
+}
+
+/**
+ * ab5500_adc_trigx_handler() - isr for auto gpadc conversion trigger
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for auto gpadc conversion.
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_adc_trigx_handler(int irq, void *_gpadc)
+{
+ struct ab5500_platform_data *plat;
+ struct ab5500_gpadc *gpadc = _gpadc;
+ int dev_irq;
+
+ plat = dev_get_platdata(gpadc->dev->parent);
+ dev_irq = irq - plat->irq.base;
+
+ switch (dev_irq) {
+ case AB5500_INT_ADC_TRIG0:
+ dev_dbg(gpadc->dev, "Trigger 0 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig0_work);
+ break;
+ case AB5500_INT_ADC_TRIG1:
+ dev_dbg(gpadc->dev, "Trigger 1 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig1_work);
+ break;
+ case AB5500_INT_ADC_TRIG2:
+ dev_dbg(gpadc->dev, "Trigger 2 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig2_work);
+ break;
+ case AB5500_INT_ADC_TRIG3:
+ dev_dbg(gpadc->dev, "Trigger 3 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig3_work);
+ break;
+ case AB5500_INT_ADC_TRIG4:
+ dev_dbg(gpadc->dev, "Trigger 4 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig4_work);
+ break;
+ case AB5500_INT_ADC_TRIG5:
+ dev_dbg(gpadc->dev, "Trigger 5 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig5_work);
+ break;
+ case AB5500_INT_ADC_TRIG6:
+ dev_dbg(gpadc->dev, "Trigger 6 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig6_work);
+ break;
+ case AB5500_INT_ADC_TRIG7:
+ dev_dbg(gpadc->dev, "Trigger 7 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig7_work);
+ break;
+ default:
+ dev_dbg(gpadc->dev, "unknown trigx handler input\n");
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_adc_vbat_txon_handler() - isr for auto vbat_txon conversion trigger
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for auto vbat_txon conversion
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_adc_vbat_txon_handler(int irq, void *_gpadc)
+{
+ struct ab5500_gpadc *gpadc = _gpadc;
+
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig_vbat_txon_work);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_adc_vbat_txoff_handler() - isr for auto vbat_txoff conversion trigger
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for auto vbat_txoff conversion
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_adc_vbat_txoff_handler(int irq, void *_gpadc)
+{
+ struct ab5500_gpadc *gpadc = _gpadc;
+
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig_vbat_txoff_work);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_gpadc_configuration() - function for gpadc conversion
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This function configures the gpadc
+ */
+static int ab5500_gpadc_configuration(struct ab5500_gpadc *gpadc)
+{
+ int ret;
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_CTRL2,
+ ADC_CAL_OFF_MASK | ADC_ON_MODE_MASK,
+ ADC_CAL_ON | ADC_FULLPWR);
+ return ret;
+}
+
+/* ab5500 btemp driver interrupts and their respective isr */
+static struct ab5500_adc_interrupts ab5500_adc_irq[] = {
+ {"TRIGGER-0", ab5500_adc_trigx_handler},
+ {"TRIGGER-1", ab5500_adc_trigx_handler},
+ {"TRIGGER-2", ab5500_adc_trigx_handler},
+ {"TRIGGER-3", ab5500_adc_trigx_handler},
+ {"TRIGGER-4", ab5500_adc_trigx_handler},
+ {"TRIGGER-5", ab5500_adc_trigx_handler},
+ {"TRIGGER-6", ab5500_adc_trigx_handler},
+ {"TRIGGER-7", ab5500_adc_trigx_handler},
+ {"TRIGGER-VBAT-TXON", ab5500_adc_vbat_txon_handler},
+ {"TRIGGER-VBAT", ab5500_adc_vbat_txoff_handler},
+};
+
+static int __devinit ab5500_gpadc_probe(struct platform_device *pdev)
+{
+ int ret, irq, i, j;
+ struct ab5500_gpadc *gpadc;
+
+ gpadc = kzalloc(sizeof(struct ab5500_gpadc), GFP_KERNEL);
+ if (!gpadc) {
+ dev_err(&pdev->dev, "Error: No memory\n");
+ return -ENOMEM;
+ }
+ gpadc->dev = &pdev->dev;
+ mutex_init(&gpadc->ab5500_gpadc_lock);
+ spin_lock_init(&gpadc->gpadc_auto_lock);
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_adc_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_adc_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab5500_adc_irq[i].isr,
+ IRQF_NO_SUSPEND,
+ ab5500_adc_irq[i].name, gpadc);
+
+ if (ret) {
+ dev_err(gpadc->dev, "failed to request %s IRQ %d: %d\n"
+ , ab5500_adc_irq[i].name, irq, ret);
+ goto fail_irq;
+ }
+ dev_dbg(gpadc->dev, "Requested %s IRQ %d: %d\n",
+ ab5500_adc_irq[i].name, irq, ret);
+ }
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(gpadc->dev);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "failed to get chip ID\n");
+ goto fail_irq;
+ }
+ gpadc->chip_id = (u8) ret;
+
+ /* Create a work queue for gpadc auto */
+ gpadc->gpadc_wq =
+ create_singlethread_workqueue("ab5500_gpadc_wq");
+ if (gpadc->gpadc_wq == NULL) {
+ dev_err(gpadc->dev, "failed to create work queue\n");
+ goto fail_irq;
+ }
+
+ INIT_WORK(&gpadc->gpadc_trig0_work, ab5500_gpadc_trig0_work);
+ INIT_WORK(&gpadc->gpadc_trig1_work, ab5500_gpadc_trig1_work);
+ INIT_WORK(&gpadc->gpadc_trig2_work, ab5500_gpadc_trig2_work);
+ INIT_WORK(&gpadc->gpadc_trig3_work, ab5500_gpadc_trig3_work);
+ INIT_WORK(&gpadc->gpadc_trig4_work, ab5500_gpadc_trig4_work);
+ INIT_WORK(&gpadc->gpadc_trig5_work, ab5500_gpadc_trig5_work);
+ INIT_WORK(&gpadc->gpadc_trig6_work, ab5500_gpadc_trig6_work);
+ INIT_WORK(&gpadc->gpadc_trig7_work, ab5500_gpadc_trig7_work);
+ INIT_WORK(&gpadc->gpadc_trig_vbat_txon_work,
+ ab5500_gpadc_vbat_txon_work);
+ INIT_WORK(&gpadc->gpadc_trig_vbat_txoff_work,
+ ab5500_gpadc_vbat_txoff_work);
+
+ for (j = 0; j < N_AUTO_TRIGGER; j++)
+ gpadc->adc_trig[j].flag = false;
+
+ ret = ab5500_gpadc_configuration(gpadc);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: configuration failed\n");
+ goto free_wq;
+ }
+ list_add_tail(&gpadc->node, &ab5500_gpadc_list);
+
+ return 0;
+free_wq:
+ destroy_workqueue(gpadc->gpadc_wq);
+fail_irq:
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab5500_adc_irq[i].name);
+ free_irq(irq, gpadc);
+ }
+ kfree(gpadc);
+ gpadc = NULL;
+ return ret;
+}
+
+static int __devexit ab5500_gpadc_remove(struct platform_device *pdev)
+{
+ int i, irq;
+ struct ab5500_gpadc *gpadc = platform_get_drvdata(pdev);
+
+ /* remove this gpadc entry from the list */
+ list_del(&gpadc->node);
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_adc_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_adc_irq[i].name);
+ free_irq(irq, gpadc);
+ }
+ /* Flush work */
+ flush_workqueue(gpadc->gpadc_wq);
+
+ /* Delete the work queue */
+ destroy_workqueue(gpadc->gpadc_wq);
+
+ kfree(gpadc);
+ gpadc = NULL;
+ return 0;
+}
+
+static struct platform_driver ab5500_gpadc_driver = {
+ .probe = ab5500_gpadc_probe,
+ .remove = __devexit_p(ab5500_gpadc_remove),
+ .driver = {
+ .name = "ab5500-adc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_gpadc_init(void)
+{
+ return platform_driver_register(&ab5500_gpadc_driver);
+}
+
+static void __exit ab5500_gpadc_exit(void)
+{
+ platform_driver_unregister(&ab5500_gpadc_driver);
+}
+
+subsys_initcall_sync(ab5500_gpadc_init);
+module_exit(ab5500_gpadc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vijaya Kumar K");
+MODULE_ALIAS("platform:ab5500_adc");
+MODULE_DESCRIPTION("AB5500 GPADC driver");
diff --git a/drivers/mfd/ab5500-power.c b/drivers/mfd/ab5500-power.c
new file mode 100644
index 00000000000..9474c32809b
--- /dev/null
+++ b/drivers/mfd/ab5500-power.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+
+static struct device *dev;
+
+/* STARTUP */
+#define AB5500_SYSPOR_CONTROL 0x30
+
+/* VINT IO I2C CLOCK */
+#define AB5500_RTC_VINT 0x01
+
+int ab5500_clock_rtc_enable(int num, bool enable)
+{
+ /* RTC_CLK{0,1,2} are bits {4,3,2}, active low */
+ u8 mask = BIT(4 - num);
+ u8 value = enable ? 0 : mask;
+
+ /* Don't allow RTC_CLK0 to be controlled. */
+ if (num < 1 || num > 2)
+ return -EINVAL;
+
+ if (!dev)
+ return -EAGAIN;
+
+ return abx500_mask_and_set(dev, AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP,
+ AB5500_RTC_VINT, mask, value);
+}
+
+static void ab5500_power_off(void)
+{
+ sigset_t old;
+ sigset_t all;
+
+ sigfillset(&all);
+
+ if (!sigprocmask(SIG_BLOCK, &all, &old)) {
+ /* Clear dbb_on */
+ int ret = abx500_set(dev, AB5500_BANK_STARTUP,
+ AB5500_SYSPOR_CONTROL, 0);
+ WARN_ON(ret);
+ }
+}
+
+static int __devinit ab5500_power_probe(struct platform_device *pdev)
+{
+ struct ab5500_platform_data *plat = dev_get_platdata(pdev->dev.parent);
+
+ dev = &pdev->dev;
+
+ if (plat->pm_power_off)
+ pm_power_off = ab5500_power_off;
+
+ return 0;
+}
+
+static int __devexit ab5500_power_remove(struct platform_device *pdev)
+{
+ struct ab5500_platform_data *plat = dev_get_platdata(pdev->dev.parent);
+
+ if (plat->pm_power_off)
+ pm_power_off = NULL;
+ dev = NULL;
+
+ return 0;
+}
+
+static struct platform_driver ab5500_power_driver = {
+ .driver = {
+ .name = "ab5500-power",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_power_probe,
+ .remove = __devexit_p(ab5500_power_remove),
+};
+
+static int __init ab8500_sysctrl_init(void)
+{
+ return platform_driver_register(&ab5500_power_driver);
+}
+
+subsys_initcall(ab8500_sysctrl_init);
+
+MODULE_DESCRIPTION("AB5500 power driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index d3d572b2317..e2e633f0885 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -684,7 +684,7 @@ static struct resource __devinitdata ab8500_usb_resources[] = {
static struct resource __devinitdata ab8500_temp_resources[] = {
{
- .name = "AB8500_TEMP_WARM",
+ .name = "ABX500_TEMP_WARM",
.start = AB8500_INT_TEMP_WARM,
.end = AB8500_INT_TEMP_WARM,
.flags = IORESOURCE_IRQ,
@@ -706,6 +706,9 @@ static struct mfd_cell __devinitdata ab8500_devs[] = {
.name = "ab8500-regulator",
},
{
+ .name = "ab8500-regulator-debug",
+ },
+ {
.name = "ab8500-gpio",
.num_resources = ARRAY_SIZE(ab8500_gpio_resources),
.resources = ab8500_gpio_resources,
@@ -775,7 +778,7 @@ static struct mfd_cell __devinitdata ab8500_devs[] = {
.name = "ab8500-denc",
},
{
- .name = "ab8500-temp",
+ .name = "abx500-temp",
.num_resources = ARRAY_SIZE(ab8500_temp_resources),
.resources = ab8500_temp_resources,
},
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index dedb7f65cea..37491ef2f1a 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -4,6 +4,72 @@
* Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
* License Terms: GNU General Public License v2
*/
+/*
+ * AB8500 register access
+ * ======================
+ *
+ * read:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # echo ADDR > <debugfs>/ab8500/register-address
+ * # cat <debugfs>/ab8500/register-value
+ *
+ * write:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # echo ADDR > <debugfs>/ab8500/register-address
+ * # echo VALUE > <debugfs>/ab8500/register-value
+ *
+ * read all registers from a bank:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # cat <debugfs>/ab8500/all-bank-register
+ *
+ * BANK target AB8500 register bank
+ * ADDR target AB8500 register address
+ * VALUE decimal or 0x-prefixed hexadecimal
+ *
+ *
+ * User Space notification on AB8500 IRQ
+ * =====================================
+ *
+ * Allows user space entity to be notified when target AB8500 IRQ occurs.
+ * When subscribed, a sysfs entry is created in ab8500.i2c platform device.
+ * One can pool this file to get target IRQ occurence information.
+ *
+ * subscribe to an AB8500 IRQ:
+ * # echo IRQ > <debugfs>/ab8500/irq-subscribe
+ *
+ * unsubscribe from an AB8500 IRQ:
+ * # echo IRQ > <debugfs>/ab8500/irq-unsubscribe
+ *
+ *
+ * AB8500 register formated read/write access
+ * ==========================================
+ *
+ * Read: read data, data>>SHIFT, data&=MASK, output data
+ * [0xABCDEF98] shift=12 mask=0xFFF => 0x00000CDE
+ * Write: read data, data &= ~(MASK<<SHIFT), data |= (VALUE<<SHIFT), write data
+ * [0xABCDEF98] shift=12 mask=0xFFF value=0x123 => [0xAB123F98]
+ *
+ * Usage:
+ * # echo "CMD [OPTIONS] BANK ADRESS [VALUE]" > $debugfs/ab8500/hwreg
+ *
+ * CMD read read access
+ * write write access
+ *
+ * BANK target reg bank
+ * ADDRESS target reg address
+ * VALUE (write) value to be updated
+ *
+ * OPTIONS
+ * -d|-dec (read) output in decimal
+ * -h|-hexa (read) output in 0x-hexa (default)
+ * -l|-w|-b 32bit (default), 16bit or 8bit reg access
+ * -m|-mask MASK 0x-hexa mask (default 0xFFFFFFFF)
+ * -s|-shift SHIFT bit shift value (read:left, write:right)
+ * -o|-offset OFFSET address offset to add to ADDRESS value
+ *
+ * Warning: bit shift operation is applied to bit-mask.
+ * Warning: bit shift direction depends on read or right command.
+ */
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -11,13 +77,29 @@
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/ab8500.h>
+#include <linux/mfd/ab8500/gpadc.h>
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/string.h>
+#include <linux/ctype.h>
+#endif
static u32 debug_bank;
static u32 debug_address;
+static int irq_first;
+static int irq_last;
+static u32 irq_count[AB8500_NR_IRQS];
+
+static struct device_attribute *dev_attr[AB8500_NR_IRQS];
+static char *event_name[AB8500_NR_IRQS];
+
/**
* struct ab8500_reg_range
* @first: the first address of the range
@@ -42,15 +124,35 @@ struct ab8500_i2c_ranges {
const struct ab8500_reg_range *range;
};
+/* hwreg- "mask" and "shift" entries ressources */
+struct hwreg_cfg {
+ u32 bank; /* target bank */
+ u32 addr; /* target address */
+ uint fmt; /* format */
+ uint mask; /* read/write mask, applied before any bit shift */
+ int shift; /* bit shift (read:right shift, write:left shift */
+};
+/* fmt bit #0: 0=hexa, 1=dec */
+#define REG_FMT_DEC(c) ((c)->fmt & 0x1)
+#define REG_FMT_HEX(c) (!REG_FMT_DEC(c))
+
+static struct hwreg_cfg hwreg_cfg = {
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+};
+
#define AB8500_NAME_STRING "ab8500"
-#define AB8500_NUM_BANKS 22
+#define AB8500_ADC_NAME_STRING "gpadc"
+#define AB8500_NUM_BANKS 24
#define AB8500_REV_REG 0x80
static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
[0x0] = {
.num_ranges = 0,
- .range = 0,
+ .range = NULL,
},
[AB8500_SYS_CTRL1_BLOCK] = {
.num_ranges = 3,
@@ -215,7 +317,7 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
},
[AB8500_CHARGER] = {
- .num_ranges = 8,
+ .num_ranges = 9,
.range = (struct ab8500_reg_range[]) {
{
.first = 0x00,
@@ -249,6 +351,10 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
.first = 0xC0,
.last = 0xC2,
},
+ {
+ .first = 0xf5,
+ .last = 0xf6,
+ },
},
},
[AB8500_GAS_GAUGE] = {
@@ -268,6 +374,24 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
},
},
+ [AB8500_DEVELOPMENT] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x00,
+ },
+ },
+ },
+ [AB8500_DEBUG] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x05,
+ .last = 0x07,
+ },
+ },
+ },
[AB8500_AUDIO] = {
.num_ranges = 1,
.range = (struct ab8500_reg_range[]) {
@@ -354,6 +478,24 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
};
+static irqreturn_t ab8500_debug_handler(int irq, void *data)
+{
+ char buf[16];
+ struct kobject *kobj = (struct kobject *)data;
+ unsigned int irq_abb = irq - irq_first;
+
+ if (irq_abb < AB8500_NR_IRQS)
+ irq_count[irq_abb]++;
+ /*
+ * This makes it possible to use poll for events (POLLPRI | POLLERR)
+ * from userspace on sysfs file named <irq-nr>
+ */
+ sprintf(buf, "%d", irq);
+ sysfs_notify(kobj, NULL, buf);
+
+ return IRQ_HANDLED;
+}
+
static int ab8500_registers_print(struct seq_file *s, void *p)
{
struct device *dev = s->private;
@@ -515,10 +657,732 @@ static ssize_t ab8500_val_write(struct file *file,
printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__);
return -EINVAL;
}
+ return count;
+}
+
+/*
+ * - HWREG DB8500 formated routines
+ */
+static int ab8500_hwreg_print(struct seq_file *s, void *d)
+{
+ struct device *dev = s->private;
+ int ret;
+ u8 regvalue;
+
+ ret = abx500_get_register_interruptible(dev,
+ (u8)hwreg_cfg.bank, (u8)hwreg_cfg.addr, &regvalue);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ if (hwreg_cfg.shift >= 0)
+ regvalue >>= hwreg_cfg.shift;
+ else
+ regvalue <<= -hwreg_cfg.shift;
+ regvalue &= hwreg_cfg.mask;
+
+ if (REG_FMT_DEC(&hwreg_cfg))
+ seq_printf(s, "%d\n", regvalue);
+ else
+ seq_printf(s, "0x%02X\n", regvalue);
+ return 0;
+}
+
+static int ab8500_hwreg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_hwreg_print, inode->i_private);
+}
+
+static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
+{
+ int bat_ctrl_raw;
+ int bat_ctrl_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ bat_ctrl_raw = ab8500_gpadc_read_raw(gpadc, BAT_CTRL);
+ bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ BAT_CTRL, bat_ctrl_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ bat_ctrl_convert, bat_ctrl_raw);
+}
+
+static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_bat_ctrl_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_bat_ctrl_fops = {
+ .open = ab8500_gpadc_bat_ctrl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
+{
+ int btemp_ball_raw;
+ int btemp_ball_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ btemp_ball_raw = ab8500_gpadc_read_raw(gpadc, BTEMP_BALL);
+ btemp_ball_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
+ btemp_ball_raw);
+
+ return seq_printf(s,
+ "%d,0x%X\n", btemp_ball_convert, btemp_ball_raw);
+}
+
+static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_btemp_ball_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_btemp_ball_fops = {
+ .open = ab8500_gpadc_btemp_ball_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
+{
+ int main_charger_v_raw;
+ int main_charger_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ main_charger_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_V);
+ main_charger_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ MAIN_CHARGER_V, main_charger_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_charger_v_convert, main_charger_v_raw);
+}
+
+static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_charger_v_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_charger_v_fops = {
+ .open = ab8500_gpadc_main_charger_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
+{
+ int acc_detect1_raw;
+ int acc_detect1_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ acc_detect1_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT1);
+ acc_detect1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ACC_DETECT1,
+ acc_detect1_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ acc_detect1_convert, acc_detect1_raw);
+}
+
+static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_acc_detect1_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_acc_detect1_fops = {
+ .open = ab8500_gpadc_acc_detect1_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
+{
+ int acc_detect2_raw;
+ int acc_detect2_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ acc_detect2_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT2);
+ acc_detect2_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ ACC_DETECT2, acc_detect2_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ acc_detect2_convert, acc_detect2_raw);
+}
+
+static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_acc_detect2_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_acc_detect2_fops = {
+ .open = ab8500_gpadc_acc_detect2_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
+{
+ int aux1_raw;
+ int aux1_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ aux1_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX1);
+ aux1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX1,
+ aux1_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ aux1_convert, aux1_raw);
+}
+
+static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_aux1_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_aux1_fops = {
+ .open = ab8500_gpadc_aux1_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
+{
+ int aux2_raw;
+ int aux2_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ aux2_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX2);
+ aux2_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX2,
+ aux2_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ aux2_convert, aux2_raw);
+}
+
+static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_aux2_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_aux2_fops = {
+ .open = ab8500_gpadc_aux2_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
+{
+ int main_bat_v_raw;
+ int main_bat_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ main_bat_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_BAT_V);
+ main_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
+ main_bat_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_bat_v_convert, main_bat_v_raw);
+}
+
+static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_bat_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_bat_v_fops = {
+ .open = ab8500_gpadc_main_bat_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
+{
+ int vbus_v_raw;
+ int vbus_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ vbus_v_raw = ab8500_gpadc_read_raw(gpadc, VBUS_V);
+ vbus_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBUS_V,
+ vbus_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ vbus_v_convert, vbus_v_raw);
+}
+
+static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_vbus_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_vbus_v_fops = {
+ .open = ab8500_gpadc_vbus_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
+{
+ int main_charger_c_raw;
+ int main_charger_c_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ main_charger_c_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_C);
+ main_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ MAIN_CHARGER_C, main_charger_c_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_charger_c_convert, main_charger_c_raw);
+}
+
+static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_charger_c_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_charger_c_fops = {
+ .open = ab8500_gpadc_main_charger_c_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
+{
+ int usb_charger_c_raw;
+ int usb_charger_c_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ usb_charger_c_raw = ab8500_gpadc_read_raw(gpadc, USB_CHARGER_C);
+ usb_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ USB_CHARGER_C, usb_charger_c_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ usb_charger_c_convert, usb_charger_c_raw);
+}
+
+static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_usb_charger_c_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_usb_charger_c_fops = {
+ .open = ab8500_gpadc_usb_charger_c_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
+{
+ int bk_bat_v_raw;
+ int bk_bat_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ bk_bat_v_raw = ab8500_gpadc_read_raw(gpadc, BK_BAT_V);
+ bk_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ BK_BAT_V, bk_bat_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ bk_bat_v_convert, bk_bat_v_raw);
+}
+
+static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_bk_bat_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_bk_bat_v_fops = {
+ .open = ab8500_gpadc_bk_bat_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
+{
+ int die_temp_raw;
+ int die_temp_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ die_temp_raw = ab8500_gpadc_read_raw(gpadc, DIE_TEMP);
+ die_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, DIE_TEMP,
+ die_temp_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ die_temp_convert, die_temp_raw);
+}
+
+static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_die_temp_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_die_temp_fops = {
+ .open = ab8500_gpadc_die_temp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * return length of an ASCII numerical value, 0 is string is not a
+ * numerical value.
+ * string shall start at value 1st char.
+ * string can be tailed with \0 or space or newline chars only.
+ * value can be decimal or hexadecimal (prefixed 0x or 0X).
+ */
+static int strval_len(char *b)
+{
+ char *s = b;
+ if ((*s == '0') && ((*(s+1) == 'x') || (*(s+1) == 'X'))) {
+ s += 2;
+ for (; *s && (*s != ' ') && (*s != '\n'); s++) {
+ if (!isxdigit(*s))
+ return 0;
+ }
+ } else {
+ if (*s == '-')
+ s++;
+ for (; *s && (*s != ' ') && (*s != '\n'); s++) {
+ if (!isdigit(*s))
+ return 0;
+ }
+ }
+ return (int) (s-b);
+}
+
+/*
+ * parse hwreg input data.
+ * update global hwreg_cfg only if input data syntax is ok.
+ */
+static ssize_t hwreg_common_write(char *b, struct hwreg_cfg *cfg,
+ struct device *dev)
+{
+ uint write, val = 0;
+ struct hwreg_cfg loc = {
+ .bank = 0, /* default: invalid phys addr */
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+ };
+
+ /* read or write ? */
+ if (!strncmp(b, "read ", 5)) {
+ write = 0;
+ b += 5;
+ } else if (!strncmp(b, "write ", 6)) {
+ write = 1;
+ b += 6;
+ } else
+ return -EINVAL;
+
+ /* OPTIONS -l|-w|-b -s -m -o */
+ while ((*b == ' ') || (*b == '-')) {
+ if (*(b-1) != ' ') {
+ b++;
+ continue;
+ }
+ if ((!strncmp(b, "-d ", 3)) ||
+ (!strncmp(b, "-dec ", 5))) {
+ b += (*(b+2) == ' ') ? 3 : 5;
+ loc.fmt |= (1<<0);
+ } else if ((!strncmp(b, "-h ", 3)) ||
+ (!strncmp(b, "-hex ", 5))) {
+ b += (*(b+2) == ' ') ? 3 : 5;
+ loc.fmt &= ~(1<<0);
+ } else if ((!strncmp(b, "-m ", 3)) ||
+ (!strncmp(b, "-mask ", 6))) {
+ b += (*(b+2) == ' ') ? 3 : 6;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.mask = simple_strtoul(b, &b, 0);
+ } else if ((!strncmp(b, "-s ", 3)) ||
+ (!strncmp(b, "-shift ", 7))) {
+ b += (*(b+2) == ' ') ? 3 : 7;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.shift = simple_strtol(b, &b, 0);
+ } else {
+ return -EINVAL;
+ }
+ }
+ /* get arg BANK and ADDRESS */
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.bank = simple_strtoul(b, &b, 0);
+ while (*b == ' ')
+ b++;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.addr = simple_strtoul(b, &b, 0);
+
+ if (write) {
+ while (*b == ' ')
+ b++;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ val = simple_strtoul(b, &b, 0);
+ }
+
+ /* args are ok, update target cfg (mainly for read) */
+ *cfg = loc;
+
+#ifdef ABB_HWREG_DEBUG
+ pr_warn("HWREG request: %s, %s, addr=0x%08X, mask=0x%X, shift=%d"
+ "value=0x%X\n", (write) ? "write" : "read",
+ REG_FMT_DEC(cfg) ? "decimal" : "hexa",
+ cfg->addr, cfg->mask, cfg->shift, val);
+#endif
+
+ if (write) {
+ u8 regvalue;
+ int ret = abx500_get_register_interruptible(dev,
+ (u8)cfg->bank, (u8)cfg->addr, &regvalue);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ if (cfg->shift >= 0) {
+ regvalue &= ~(cfg->mask << (cfg->shift));
+ val = (val & cfg->mask) << (cfg->shift);
+ } else {
+ regvalue &= ~(cfg->mask >> (-cfg->shift));
+ val = (val & cfg->mask) >> (-cfg->shift);
+ }
+ val = val | regvalue;
+
+ ret = abx500_set_register_interruptible(dev,
+ (u8)cfg->bank, (u8)cfg->addr, (u8)val);
+ if (ret < 0) {
+ pr_err("abx500_set_reg failed %d, %d", ret, __LINE__);
+ return -EINVAL;
+ }
+
+ }
+ return 0;
+}
+
+static ssize_t ab8500_hwreg_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[128];
+ int buf_size, ret;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* get args and process */
+ ret = hwreg_common_write(buf, &hwreg_cfg, dev);
+ return (ret) ? ret : buf_size;
+}
+
+/*
+ * - irq subscribe/unsubscribe stuff
+ */
+static int ab8500_subscribe_unsubscribe_print(struct seq_file *s, void *p)
+{
+ seq_printf(s, "%d\n", irq_first);
+
+ return 0;
+}
+
+static int ab8500_subscribe_unsubscribe_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_subscribe_unsubscribe_print,
+ inode->i_private);
+}
+
+/*
+ * Userspace should use poll() on this file. When an event occur
+ * the blocking poll will be released.
+ */
+static ssize_t show_irq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long name;
+ unsigned int irq_index;
+ int err;
+
+ err = strict_strtoul(attr->attr.name, 0, &name);
+ if (err)
+ return err;
+
+ irq_index = name - irq_first;
+ if (irq_index >= AB8500_NR_IRQS)
+ return -EINVAL;
+ else
+ return sprintf(buf, "%u\n", irq_count[irq_index]);
+}
+
+static ssize_t ab8500_subscribe_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ unsigned int irq_index;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val < irq_first) {
+ dev_err(dev, "debugfs error input < %d\n", irq_first);
+ return -EINVAL;
+ }
+ if (user_val > irq_last) {
+ dev_err(dev, "debugfs error input > %d\n", irq_last);
+ return -EINVAL;
+ }
+
+ irq_index = user_val - irq_first;
+ if (irq_index >= AB8500_NR_IRQS)
+ return -EINVAL;
+
+ /*
+ * This will create a sysfs file named <irq-nr> which userspace can
+ * use to select or poll and get the AB8500 events
+ */
+ dev_attr[irq_index] = kmalloc(sizeof(struct device_attribute),
+ GFP_KERNEL);
+ event_name[irq_index] = kmalloc(buf_size, GFP_KERNEL);
+ sprintf(event_name[irq_index], "%lu", user_val);
+ dev_attr[irq_index]->show = show_irq;
+ dev_attr[irq_index]->store = NULL;
+ dev_attr[irq_index]->attr.name = event_name[irq_index];
+ dev_attr[irq_index]->attr.mode = S_IRUGO;
+ err = sysfs_create_file(&dev->kobj, &dev_attr[irq_index]->attr);
+ if (err < 0) {
+ printk(KERN_ERR "sysfs_create_file failed %d\n", err);
+ return err;
+ }
+
+ err = request_threaded_irq(user_val, NULL, ab8500_debug_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND, "ab8500-debug", &dev->kobj);
+ if (err < 0) {
+ printk(KERN_ERR "request_threaded_irq failed %d, %lu\n",
+ err, user_val);
+ sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr);
+ return err;
+ }
+
+ return buf_size;
+}
+
+static ssize_t ab8500_unsubscribe_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ unsigned int irq_index;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val < irq_first) {
+ dev_err(dev, "debugfs error input < %d\n", irq_first);
+ return -EINVAL;
+ }
+ if (user_val > irq_last) {
+ dev_err(dev, "debugfs error input > %d\n", irq_last);
+ return -EINVAL;
+ }
+
+ irq_index = user_val - irq_first;
+ if (irq_index >= AB8500_NR_IRQS)
+ return -EINVAL;
+
+ /* Set irq count to 0 when unsubscribe */
+ irq_count[irq_index] = 0;
+
+ if (dev_attr[irq_index])
+ sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr);
+
+
+ free_irq(user_val, &dev->kobj);
+ kfree(event_name[irq_index]);
+ kfree(dev_attr[irq_index]);
return count;
}
+/*
+ * - several deubgfs nodes fops
+ */
+
static const struct file_operations ab8500_bank_fops = {
.open = ab8500_bank_open,
.write = ab8500_bank_write,
@@ -546,65 +1410,177 @@ static const struct file_operations ab8500_val_fops = {
.owner = THIS_MODULE,
};
+static const struct file_operations ab8500_subscribe_fops = {
+ .open = ab8500_subscribe_unsubscribe_open,
+ .write = ab8500_subscribe_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_unsubscribe_fops = {
+ .open = ab8500_subscribe_unsubscribe_open,
+ .write = ab8500_unsubscribe_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_hwreg_fops = {
+ .open = ab8500_hwreg_open,
+ .write = ab8500_hwreg_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static struct dentry *ab8500_dir;
-static struct dentry *ab8500_reg_file;
-static struct dentry *ab8500_bank_file;
-static struct dentry *ab8500_address_file;
-static struct dentry *ab8500_val_file;
+static struct dentry *ab8500_gpadc_dir;
static int __devinit ab8500_debug_probe(struct platform_device *plf)
{
+ struct dentry *file;
debug_bank = AB8500_MISC;
debug_address = AB8500_REV_REG & 0x00FF;
+ irq_first = platform_get_irq_byname(plf, "IRQ_FIRST");
+ if (irq_first < 0) {
+ dev_err(&plf->dev, "First irq not found, err %d\n",
+ irq_first);
+ return irq_first;
+ }
+
+ irq_last = platform_get_irq_byname(plf, "IRQ_LAST");
+ if (irq_last < 0) {
+ dev_err(&plf->dev, "Last irq not found, err %d\n",
+ irq_last);
+ return irq_last;
+ }
+
ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
if (!ab8500_dir)
- goto exit_no_debugfs;
+ goto err;
+
+ ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING,
+ ab8500_dir);
+ if (!ab8500_gpadc_dir)
+ goto err;
+
+ file = debugfs_create_file("all-bank-registers", S_IRUGO,
+ ab8500_dir, &plf->dev, &ab8500_registers_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-bank", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_bank_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-address", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_address_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-value", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_val_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("irq-subscribe", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_subscribe_fops);
+ if (!file)
+ goto err;
- ab8500_reg_file = debugfs_create_file("all-bank-registers",
- S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
- if (!ab8500_reg_file)
- goto exit_destroy_dir;
+ file = debugfs_create_file("irq-unsubscribe", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_unsubscribe_fops);
+ if (!file)
+ goto err;
- ab8500_bank_file = debugfs_create_file("register-bank",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_bank_fops);
- if (!ab8500_bank_file)
- goto exit_destroy_reg;
+ file = debugfs_create_file("hwreg", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_hwreg_fops);
+ if (!file)
+ goto err;
- ab8500_address_file = debugfs_create_file("register-address",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev,
- &ab8500_address_fops);
- if (!ab8500_address_file)
- goto exit_destroy_bank;
+ file = debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bat_ctrl_fops);
+ if (!file)
+ goto err;
- ab8500_val_file = debugfs_create_file("register-value",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_val_fops);
- if (!ab8500_val_file)
- goto exit_destroy_address;
+ file = debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_btemp_ball_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect1_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect2_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux1_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux2_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_bat_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("vbus_v", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_vbus_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_c_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_charger_c_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bk_bat_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("die_temp", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_die_temp_fops);
+ if (!file)
+ goto err;
return 0;
-exit_destroy_address:
- debugfs_remove(ab8500_address_file);
-exit_destroy_bank:
- debugfs_remove(ab8500_bank_file);
-exit_destroy_reg:
- debugfs_remove(ab8500_reg_file);
-exit_destroy_dir:
- debugfs_remove(ab8500_dir);
-exit_no_debugfs:
+err:
+ if (ab8500_dir)
+ debugfs_remove_recursive(ab8500_dir);
dev_err(&plf->dev, "failed to create debugfs entries.\n");
return -ENOMEM;
}
static int __devexit ab8500_debug_remove(struct platform_device *plf)
{
- debugfs_remove(ab8500_val_file);
- debugfs_remove(ab8500_address_file);
- debugfs_remove(ab8500_bank_file);
- debugfs_remove(ab8500_reg_file);
- debugfs_remove(ab8500_dir);
-
+ debugfs_remove_recursive(ab8500_dir);
return 0;
}
diff --git a/drivers/mfd/ab8500-denc.c b/drivers/mfd/ab8500-denc.c
new file mode 100644
index 00000000000..1bf61e41bcd
--- /dev/null
+++ b/drivers/mfd/ab8500-denc.c
@@ -0,0 +1,538 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson AB8500 DENC base driver
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500/denc-regs.h>
+#include <linux/mfd/ab8500/denc.h>
+
+#define AB8500_NAME "ab8500"
+#define AB8500_DENC_NAME "ab8500_denc"
+
+struct device_usage {
+ struct list_head list;
+ struct platform_device *pdev;
+ bool taken;
+};
+static LIST_HEAD(device_list);
+
+/* To get rid of the extra bank parameter: */
+#define AB8500_REG_BANK_NR(__reg) ((0xff00 & (__reg)) >> 8)
+static inline u8 ab8500_rreg(struct device *dev, u32 reg)
+{
+ u8 val;
+ if (abx500_get_register_interruptible(dev, AB8500_REG_BANK_NR(reg),
+ reg, &val) < 0)
+ return 0;
+ else
+ return val;
+}
+
+static inline int ab8500_wreg(struct device *dev, u32 reg, u8 val)
+{
+ return abx500_set_register_interruptible(dev, AB8500_REG_BANK_NR(reg),
+ reg, val);
+}
+
+/* Only use in the macro below: */
+static inline int _ab8500_wreg_fld(struct device *dev, u32 reg, u8 val,
+ u8 mask, u8 shift)
+{
+ int ret;
+ u8 org_val;
+
+ ret = abx500_get_register_interruptible(dev, AB8500_REG_BANK_NR(reg),
+ reg, &org_val);
+ if (ret < 0)
+ return ret;
+ else
+ ab8500_wreg(dev, reg,
+ (org_val & ~mask) | ((val << shift) & mask));
+ return 0;
+}
+
+#define ab8500_wr_fld(__d, __reg, __fld, __val) \
+ _ab8500_wreg_fld(__d, __reg, __val, __reg##_##__fld##_MASK, \
+ __reg##_##__fld##_SHIFT)
+
+#define ab8500_set_fld(__cur_val, __reg, __fld, __val) \
+ (((__cur_val) & ~__reg##_##__fld##_MASK) | \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK))
+
+#define AB8500_DENC_TRACE(__pd) dev_dbg(&(__pd)->dev, "%s\n", __func__)
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_ab8500_denc_dir;
+static struct dentry *debugfs_ab8500_dump_regs_file;
+static void ab8500_denc_conf_ddr(struct platform_device *pdev);
+static int debugfs_ab8500_open_file(struct inode *inode, struct file *file);
+static ssize_t debugfs_ab8500_dump_regs(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos);
+
+static const struct file_operations debugfs_ab8500_dump_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_ab8500_open_file,
+ .read = debugfs_ab8500_dump_regs,
+};
+#endif /* CONFIG_DEBUG_FS */
+
+static int __devinit ab8500_denc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab8500_platform_data *ab8500_pdata =
+ dev_get_platdata(pdev->dev.parent);
+ struct ab8500_denc_platform_data *pdata;
+ struct device_usage *device_data;
+
+ AB8500_DENC_TRACE(pdev);
+
+ if (ab8500_pdata == NULL) {
+ dev_err(&pdev->dev, "AB8500 platform data missing\n");
+ return -EINVAL;
+ }
+
+ pdata = ab8500_pdata->denc;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "Denc platform data missing\n");
+ return -EINVAL;
+ }
+
+ device_data = kzalloc(sizeof(struct device_usage), GFP_KERNEL);
+ if (!device_data) {
+ dev_err(&pdev->dev, "Failed to allocate device data\n");
+ return -ENOMEM;
+ }
+ device_data->pdev = pdev;
+ list_add_tail(&device_data->list, &device_list);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_ab8500_denc_dir = debugfs_create_dir(pdev->name, NULL);
+ debugfs_ab8500_dump_regs_file = debugfs_create_file(
+ "dumpregs", S_IRUGO,
+ debugfs_ab8500_denc_dir, &pdev->dev,
+ &debugfs_ab8500_dump_regs_fops
+ );
+#endif /* CONFIG_DEBUG_FS */
+ return ret;
+}
+
+static int __devexit ab8500_denc_remove(struct platform_device *pdev)
+{
+ struct list_head *element;
+ struct device_usage *device_data;
+
+ AB8500_DENC_TRACE(pdev);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(debugfs_ab8500_dump_regs_file);
+ debugfs_remove(debugfs_ab8500_denc_dir);
+#endif /* CONFIG_DEBUG_FS */
+
+ list_for_each(element, &device_list) {
+ device_data = list_entry(element, struct device_usage, list);
+ if (device_data->pdev == pdev) {
+ list_del(element);
+ kzfree(device_data);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver ab8500_denc_driver = {
+ .probe = ab8500_denc_probe,
+ .remove = ab8500_denc_remove,
+ .driver = {
+ .name = "ab8500-denc",
+ },
+};
+
+static void setup_27mhz(struct platform_device *pdev, bool enable)
+{
+ u8 data = ab8500_rreg(&pdev->dev, AB8500_SYS_ULP_CLK_CONF);
+
+ AB8500_DENC_TRACE(pdev);
+ /* TODO: check if this field needs to be set */
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_PD_ENA,
+ true);
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_BUF_ENA,
+ enable);
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, TVOUT_CLK_INV,
+ false);
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, TVOUT_CLK_DE_IN,
+ false);
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_STRE,
+ 1);
+ ab8500_wreg(&pdev->dev, AB8500_SYS_ULP_CLK_CONF, data);
+
+ data = ab8500_rreg(&pdev->dev, AB8500_SYS_CLK_CTRL);
+ data = ab8500_set_fld(data, AB8500_SYS_CLK_CTRL, TVOUT_CLK_VALID,
+ enable);
+ data = ab8500_set_fld(data, AB8500_SYS_CLK_CTRL, TVOUT_PLL_ENA,
+ enable);
+ ab8500_wreg(&pdev->dev, AB8500_SYS_CLK_CTRL, data);
+}
+
+static u32 map_tv_std(enum ab8500_denc_TV_std std)
+{
+ switch (std) {
+ case TV_STD_PAL_BDGHI:
+ return AB8500_DENC_CONF0_STD_PAL_BDGHI;
+ case TV_STD_PAL_N:
+ return AB8500_DENC_CONF0_STD_PAL_N;
+ case TV_STD_PAL_M:
+ return AB8500_DENC_CONF0_STD_PAL_M;
+ case TV_STD_NTSC_M:
+ return AB8500_DENC_CONF0_STD_NTSC_M;
+ default:
+ return 0;
+ }
+}
+
+static u32 map_cr_filter(enum ab8500_denc_cr_filter_bandwidth bw)
+{
+ switch (bw) {
+ case TV_CR_NTSC_LOW_DEF_FILTER:
+ return AB8500_DENC_CONF1_FLT_1_1MHZ;
+ case TV_CR_PAL_LOW_DEF_FILTER:
+ return AB8500_DENC_CONF1_FLT_1_3MHZ;
+ case TV_CR_NTSC_HIGH_DEF_FILTER:
+ return AB8500_DENC_CONF1_FLT_1_6MHZ;
+ case TV_CR_PAL_HIGH_DEF_FILTER:
+ return AB8500_DENC_CONF1_FLT_1_9MHZ;
+ default:
+ return 0;
+ }
+}
+
+static u32 map_phase_rst_mode(enum ab8500_denc_phase_reset_mode mode)
+{
+ switch (mode) {
+ case TV_PHASE_RST_MOD_DISABLE:
+ return AB8500_DENC_CONF8_PH_RST_MODE_DISABLED;
+ case TV_PHASE_RST_MOD_FROM_PHASE_BUF:
+ return AB8500_DENC_CONF8_PH_RST_MODE_UPDATE_FROM_PHASE_BUF;
+ case TV_PHASE_RST_MOD_FROM_INC_DFS:
+ return AB8500_DENC_CONF8_PH_RST_MODE_UPDATE_FROM_INC_DFS;
+ case TV_PHASE_RST_MOD_RST:
+ return AB8500_DENC_CONF8_PH_RST_MODE_RESET;
+ default:
+ return 0;
+ }
+}
+
+static u32 map_plug_time(enum ab8500_denc_plug_time time)
+{
+ switch (time) {
+ case TV_PLUG_TIME_0_5S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_0_5S;
+ case TV_PLUG_TIME_1S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_1S;
+ case TV_PLUG_TIME_1_5S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_1_5S;
+ case TV_PLUG_TIME_2S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_2S;
+ case TV_PLUG_TIME_2_5S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_2_5S;
+ case TV_PLUG_TIME_3S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_3S;
+ default:
+ return 0;
+ }
+}
+
+struct platform_device *ab8500_denc_get_device(void)
+{
+ struct list_head *element;
+ struct device_usage *device_data;
+
+ pr_debug("%s\n", __func__);
+ list_for_each(element, &device_list) {
+ device_data = list_entry(element, struct device_usage, list);
+ if (!device_data->taken) {
+ device_data->taken = true;
+ return device_data->pdev;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(ab8500_denc_get_device);
+
+void ab8500_denc_put_device(struct platform_device *pdev)
+{
+ struct list_head *element;
+ struct device_usage *device_data;
+
+ AB8500_DENC_TRACE(pdev);
+ list_for_each(element, &device_list) {
+ device_data = list_entry(element, struct device_usage, list);
+ if (device_data->pdev == pdev)
+ device_data->taken = false;
+ }
+}
+EXPORT_SYMBOL(ab8500_denc_put_device);
+
+void ab8500_denc_reset(struct platform_device *pdev, bool hard)
+{
+ AB8500_DENC_TRACE(pdev);
+ if (hard) {
+ u8 data = ab8500_rreg(&pdev->dev, AB8500_CTRL3);
+ /* reset start */
+ ab8500_wreg(&pdev->dev, AB8500_CTRL3,
+ ab8500_set_fld(data, AB8500_CTRL3, RESET_DENC_N, 0)
+ );
+ /* reset done */
+ ab8500_wreg(&pdev->dev, AB8500_CTRL3,
+ ab8500_set_fld(data, AB8500_CTRL3, RESET_DENC_N, 1)
+ );
+ } else {
+ ab8500_wr_fld(&pdev->dev, AB8500_DENC_CONF6, SOFT_RESET, 1);
+ mdelay(10);
+ }
+}
+EXPORT_SYMBOL(ab8500_denc_reset);
+
+void ab8500_denc_power_up(struct platform_device *pdev)
+{
+ setup_27mhz(pdev, true);
+}
+EXPORT_SYMBOL(ab8500_denc_power_up);
+
+void ab8500_denc_power_down(struct platform_device *pdev)
+{
+ setup_27mhz(pdev, false);
+}
+EXPORT_SYMBOL(ab8500_denc_power_down);
+
+void ab8500_denc_conf(struct platform_device *pdev,
+ struct ab8500_denc_conf *conf)
+{
+ u8 data;
+
+ AB8500_DENC_TRACE(pdev);
+
+ ab8500_wreg(&pdev->dev, AB8500_DENC_CONF0,
+ AB8500_VAL2REG(AB8500_DENC_CONF0, STD, map_tv_std(conf->TV_std))
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF0, SYNC,
+ conf->test_pattern ? AB8500_DENC_CONF0_SYNC_AUTO_TEST :
+ AB8500_DENC_CONF0_SYNC_F_BASED_SLAVE
+ )
+ );
+ ab8500_wreg(&pdev->dev, AB8500_DENC_CONF1,
+ AB8500_VAL2REG(AB8500_DENC_CONF1, BLK_LI,
+ !conf->partial_blanking)
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF1, FLT,
+ map_cr_filter(conf->cr_filter))
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF1, CO_KI, conf->suppress_col)
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF1, SETUP_MAIN,
+ conf->black_level_setup)
+ /* TODO: handle cc field: set to 0 now */
+ );
+
+ data = ab8500_rreg(&pdev->dev, AB8500_DENC_CONF2);
+ data = ab8500_set_fld(data, AB8500_DENC_CONF2, N_INTRL,
+ conf->progressive);
+ ab8500_wreg(&pdev->dev, AB8500_DENC_CONF2, data);
+
+ ab8500_wreg(&pdev->dev, AB8500_DENC_CONF8,
+ AB8500_VAL2REG(AB8500_DENC_CONF8, PH_RST_MODE,
+ map_phase_rst_mode(conf->phase_reset_mode))
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF8, VAL_422_MUX,
+ conf->act_output)
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF8, BLK_ALL,
+ conf->blank_all)
+ );
+ data = ab8500_rreg(&pdev->dev, AB8500_TVOUT_CTRL);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, DAC_CTRL0,
+ conf->dac_enable);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, DAC_CTRL1,
+ conf->act_dc_output);
+ ab8500_wreg(&pdev->dev, AB8500_TVOUT_CTRL, data);
+
+ /* no support for DDR in early versions */
+ if (AB8500_REG2VAL(AB8500_REV, FULL_MASK,
+ ab8500_rreg(&pdev->dev, AB8500_REV)) > 0)
+ ab8500_denc_conf_ddr(pdev);
+}
+EXPORT_SYMBOL(ab8500_denc_conf);
+
+void ab8500_denc_conf_plug_detect(struct platform_device *pdev,
+ bool enable, bool load_RC,
+ enum ab8500_denc_plug_time time)
+{
+ u8 data;
+
+ AB8500_DENC_TRACE(pdev);
+ data = ab8500_rreg(&pdev->dev, AB8500_TVOUT_CTRL);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, TV_PLUG_ON, enable);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, TV_LOAD_RC, load_RC);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, PLUG_TV_TIME,
+ map_plug_time(time));
+ ab8500_wreg(&pdev->dev, AB8500_TVOUT_CTRL, data);
+}
+EXPORT_SYMBOL(ab8500_denc_conf_plug_detect);
+
+void ab8500_denc_mask_int_plug_det(struct platform_device *pdev, bool plug,
+ bool unplug)
+{
+ u8 data = ab8500_rreg(&pdev->dev, AB8500_IT_MASK1);
+
+ AB8500_DENC_TRACE(pdev);
+ data = ab8500_set_fld(data, AB8500_IT_MASK1, PLUG_TV_DET, plug);
+ data = ab8500_set_fld(data, AB8500_IT_MASK1, UNPLUG_TV_DET, unplug);
+ ab8500_wreg(&pdev->dev, AB8500_IT_MASK1, data);
+}
+EXPORT_SYMBOL(ab8500_denc_mask_int_plug_det);
+
+static void ab8500_denc_conf_ddr(struct platform_device *pdev)
+{
+ struct ab8500_platform_data *core_pdata;
+ struct ab8500_denc_platform_data *denc_pdata;
+
+ AB8500_DENC_TRACE(pdev);
+ core_pdata = dev_get_platdata(pdev->dev.parent);
+ denc_pdata = core_pdata->denc;
+ ab8500_wreg(&pdev->dev, AB8500_TVOUT_CTRL2,
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL2,
+ DENC_DDR, denc_pdata->ddr_enable) |
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL2, SWAP_DDR_DATA_IN,
+ denc_pdata->ddr_little_endian));
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int debugfs_ab8500_open_file(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUG_BUF_SIZE 900
+
+#define AB8500_GPIO_DIR5 0x1014
+#define AB8500_GPIO_DIR5_35_SHIFT 2
+#define AB8500_GPIO_DIR5_35_MASK (1 << AB8500_GPIO_DIR5_35_SHIFT)
+#define AB8500_GPIO_OUT5 0x1024
+#define AB8500_GPIO_OUT5_35_SHIFT 2
+#define AB8500_GPIO_OUT5_35_MASK (1 << AB8500_GPIO_OUT5_35_SHIFT)
+#define AB8500_GPIO_OUT5_35_VIDEO 0
+#define AB8500_GPIO_OUT5_35_AUDIO 1
+#define AB8500_GPIO_NPUD5 0x1034
+#define AB8500_GPIO_NPUD5_35_SHIFT 2
+#define AB8500_GPIO_NPUD5_35_MASK (1 << AB8500_GPIO_NPUD5_35_SHIFT)
+#define AB8500_GPIO_NPUD5_35_ACTIVE 0
+#define AB8500_GPIO_NPUD5_35_INACTIVE 1
+
+static ssize_t debugfs_ab8500_dump_regs(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ int ret = 0;
+ size_t data_size = 0;
+ char buffer[DEBUG_BUF_SIZE];
+ struct device *dev = file->private_data;
+
+ data_size += sprintf(buffer + data_size,
+ "AB8500 DENC registers:\n"
+ "------Regulators etc ----------\n"
+ "CTRL3 : 0x%04x = 0x%02x\n"
+ "SYSULPCLK_CONF: 0x%04x = 0x%02x\n"
+ "SYSCLK_CTRL : 0x%04x = 0x%02x\n"
+ "REGU_MISC1 : 0x%04x = 0x%02x\n"
+ "VAUX12_REGU : 0x%04x = 0x%02x\n"
+ "VAUX1_SEL1 : 0x%04x = 0x%02x\n"
+ "------TVout only --------------\n"
+ "DENC_CONF0 : 0x%04x = 0x%02x\n"
+ "DENC_CONF1 : 0x%04x = 0x%02x\n"
+ "DENC_CONF2 : 0x%04x = 0x%02x\n"
+ "DENC_CONF6 : 0x%04x = 0x%02x\n"
+ "DENC_CONF8 : 0x%04x = 0x%02x\n"
+ "TVOUT_CTRL : 0x%04x = 0x%02x\n"
+ "TVOUT_CTRL2 : 0x%04x = 0x%02x\n"
+ "IT_MASK1 : 0x%04x = 0x%02x\n"
+ "------AV connector-------------\n"
+ "GPIO_DIR5 : 0x%04x = 0x%02x\n"
+ "GPIO_OUT5 : 0x%04x = 0x%02x\n"
+ "GPIO_NPUD5 : 0x%04x = 0x%02x\n"
+ ,
+ AB8500_CTRL3, ab8500_rreg(dev, AB8500_CTRL3),
+ AB8500_SYS_ULP_CLK_CONF, ab8500_rreg(dev,
+ AB8500_SYS_ULP_CLK_CONF),
+ AB8500_SYS_CLK_CTRL, ab8500_rreg(dev, AB8500_SYS_CLK_CTRL),
+ AB8500_REGU_MISC1, ab8500_rreg(dev, AB8500_REGU_MISC1),
+ AB8500_VAUX12_REGU, ab8500_rreg(dev, AB8500_VAUX12_REGU),
+ AB8500_VAUX1_SEL, ab8500_rreg(dev, AB8500_VAUX1_SEL),
+ AB8500_DENC_CONF0, ab8500_rreg(dev, AB8500_DENC_CONF0),
+ AB8500_DENC_CONF1, ab8500_rreg(dev, AB8500_DENC_CONF1),
+ AB8500_DENC_CONF2, ab8500_rreg(dev, AB8500_DENC_CONF2),
+ AB8500_DENC_CONF6, ab8500_rreg(dev, AB8500_DENC_CONF6),
+ AB8500_DENC_CONF8, ab8500_rreg(dev, AB8500_DENC_CONF8),
+ AB8500_TVOUT_CTRL, ab8500_rreg(dev, AB8500_TVOUT_CTRL),
+ AB8500_TVOUT_CTRL2, ab8500_rreg(dev, AB8500_TVOUT_CTRL2),
+ AB8500_IT_MASK1, ab8500_rreg(dev, AB8500_IT_MASK1),
+ AB8500_GPIO_DIR5, ab8500_rreg(dev, AB8500_GPIO_DIR5),
+ AB8500_GPIO_OUT5, ab8500_rreg(dev, AB8500_GPIO_OUT5),
+ AB8500_GPIO_NPUD5, ab8500_rreg(dev, AB8500_GPIO_NPUD5)
+ );
+ if (data_size >= DEBUG_BUF_SIZE) {
+ printk(KERN_EMERG "AB8500 DENC: Buffer overrun\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* check if read done */
+ if (*f_pos > data_size)
+ goto out;
+
+ if (*f_pos + count > data_size)
+ count = data_size - *f_pos;
+
+ if (copy_to_user(buf, buffer + *f_pos, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+out:
+ return ret;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/* Module init */
+static int __init ab8500_denc_init(void)
+{
+ return platform_driver_register(&ab8500_denc_driver);
+}
+module_init(ab8500_denc_init);
+
+static void __exit ab8500_denc_exit(void)
+{
+ platform_driver_unregister(&ab8500_denc_driver);
+}
+module_exit(ab8500_denc_exit);
+
+MODULE_AUTHOR("Marcel Tunnissen <marcel.tuennissen@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson AB8500 DENC driver");
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index e985d1701a8..b6f1e80209a 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -130,16 +130,12 @@ static LIST_HEAD(ab8500_gpadc_list);
* ab8500_gpadc_get() - returns a reference to the primary AB8500 GPADC
* (i.e. the first GPADC in the instance list)
*/
-struct ab8500_gpadc *ab8500_gpadc_get(char *name)
+struct ab8500_gpadc *ab8500_gpadc_get(void)
{
struct ab8500_gpadc *gpadc;
+ gpadc = list_first_entry(&ab8500_gpadc_list, struct ab8500_gpadc, node);
- list_for_each_entry(gpadc, &ab8500_gpadc_list, node) {
- if (!strcmp(name, dev_name(gpadc->dev)))
- return gpadc;
- }
-
- return ERR_PTR(-ENOENT);
+ return gpadc;
}
EXPORT_SYMBOL(ab8500_gpadc_get);
@@ -344,7 +340,7 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
* Delay might be needed for ABB8500 cut 3.0, if not, remove
* when hardware will be availible
*/
- msleep(1);
+ mdelay(1);
break;
}
/* Intentional fallthrough */
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
index 9be541c6b00..3a94ec80192 100644
--- a/drivers/mfd/ab8500-i2c.c
+++ b/drivers/mfd/ab8500-i2c.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/ab8500.h>
-#include <linux/mfd/db8500-prcmu.h>
+#include <linux/mfd/dbx500-prcmu.h>
static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
{
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index f20feefac19..d159dbf93eb 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -7,12 +7,114 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reboot.h>
+#include <linux/signal.h>
+#include <linux/power_supply.h>
#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/ab8500/sysctrl.h>
+#include <linux/time.h>
+#include <linux/hwmon.h>
static struct device *sysctrl_dev;
+void ab8500_power_off(void)
+{
+ struct ab8500_platform_data *plat;
+ struct timespec ts;
+ sigset_t old;
+ sigset_t all;
+ static char *pss[] = {"ab8500_ac", "ab8500_usb"};
+ int i;
+ bool charger_present = false;
+ union power_supply_propval val;
+ struct power_supply *psy;
+ int ret;
+
+ /*
+ * If we have a charger connected and we're powering off,
+ * reboot into charge-only mode.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(pss); i++) {
+ psy = power_supply_get_by_name(pss[i]);
+ if (!psy)
+ continue;
+
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
+
+ if (!ret && val.intval) {
+ charger_present = true;
+ break;
+ }
+ }
+
+ if (!charger_present)
+ goto shutdown;
+
+ /* Check if battery is known */
+ psy = power_supply_get_by_name("ab8500_btemp");
+ if (psy) {
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_TECHNOLOGY,
+ &val);
+ if (!ret && val.intval != POWER_SUPPLY_TECHNOLOGY_UNKNOWN) {
+ printk(KERN_INFO
+ "Charger \"%s\" is connected with known battery."
+ " Rebooting.\n",
+ pss[i]);
+ machine_restart("charging");
+ }
+ }
+
+shutdown:
+ sigfillset(&all);
+
+ plat = dev_get_platdata(sysctrl_dev->parent);
+ getnstimeofday(&ts);
+ if (!sigprocmask(SIG_BLOCK, &all, &old)) {
+ if (ts.tv_sec == 0 ||
+ (ts.tv_sec - plat->thermal_set_time_sec >
+ plat->thermal_time_out))
+ plat->thermal_power_off_pending = false;
+ if (!plat->thermal_power_off_pending) {
+ (void)ab8500_sysctrl_set(AB8500_STW4500CTRL1,
+ AB8500_STW4500CTRL1_SWOFF |
+ AB8500_STW4500CTRL1_SWRESET4500N);
+ (void)sigprocmask(SIG_SETMASK, &old, NULL);
+ } else {
+ (void)ab8500_sysctrl_set(AB8500_STW4500CTRL1,
+ AB8500_STW4500CTRL1_THDB8500SWOFF |
+ AB8500_STW4500CTRL1_SWRESET4500N);
+ (void)sigprocmask(SIG_SETMASK, &old, NULL);
+ }
+ }
+}
+
+static int ab8500_notifier_call(struct notifier_block *this,
+ unsigned long val, void *data)
+{
+ struct ab8500_platform_data *plat;
+ static struct timespec ts;
+ if (sysctrl_dev == NULL)
+ return -EAGAIN;
+
+ plat = dev_get_platdata(sysctrl_dev->parent);
+ if (val) {
+ getnstimeofday(&ts);
+ plat->thermal_set_time_sec = ts.tv_sec;
+ plat->thermal_power_off_pending = true;
+ } else {
+ plat->thermal_set_time_sec = 0;
+ plat->thermal_power_off_pending = false;
+ }
+ return 0;
+}
+
+static struct notifier_block ab8500_notifier = {
+ .notifier_call = ab8500_notifier_call,
+};
+
static inline bool valid_bank(u8 bank)
{
return ((bank == AB8500_SYS_CTRL1_BLOCK) ||
@@ -51,7 +153,13 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
static int __devinit ab8500_sysctrl_probe(struct platform_device *pdev)
{
+ struct ab8500_platform_data *plat;
+
sysctrl_dev = &pdev->dev;
+ plat = dev_get_platdata(pdev->dev.parent);
+ if (plat->pm_power_off)
+ pm_power_off = ab8500_power_off;
+ hwmon_notifier_register(&ab8500_notifier);
return 0;
}
diff --git a/drivers/mfd/db5500-prcmu-regs.h b/drivers/mfd/db5500-prcmu-regs.h
new file mode 100644
index 00000000000..e8aa2901478
--- /dev/null
+++ b/drivers/mfd/db5500-prcmu-regs.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#ifndef __MACH_PRCMU_REGS_DB5500_H
+#define __MACH_PRCMU_REGS_DB5500_H
+
+#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end))
+
+#define PRCM_TCR 0x1C8
+#define PRCM_TCR_TENSEL_MASK BITS(0, 7)
+#define PRCM_TCR_STOP_TIMERS BIT(16)
+#define PRCM_TCR_DOZE_MODE BIT(17)
+
+/* PRCMU HW semaphore */
+#define PRCM_SEM 0x400
+#define PRCM_SEM_PRCM_SEM BIT(0)
+
+#define DB5500_PRCM_ACLK_MGT 0x004
+#define DB5500_PRCM_SVACLK_MGT 0x008
+#define DB5500_PRCM_SIACLK_MGT 0x00C
+#define DB5500_PRCM_SGACLK_MGT 0x014
+#define DB5500_PRCM_UARTCLK_MGT 0x018
+#define DB5500_PRCM_MSP02CLK_MGT 0x01C
+#define DB5500_PRCM_I2CCLK_MGT 0x020
+#define DB5500_PRCM_SDMMCCLK_MGT 0x024
+#define DB5500_PRCM_PER1CLK_MGT 0x02C
+#define DB5500_PRCM_PER2CLK_MGT 0x030
+#define DB5500_PRCM_PER3CLK_MGT 0x034
+#define DB5500_PRCM_PER5CLK_MGT 0x038
+#define DB5500_PRCM_PER6CLK_MGT 0x03C
+#define DB5500_PRCM_IRDACLK_MGT 0x040
+#define DB5500_PRCM_PWMCLK_MGT 0x044
+#define DB5500_PRCM_SPARE1CLK_MGT 0x048
+#define DB5500_PRCM_IRRCCLK_MGT 0x04C
+#define DB5500_PRCM_HDMICLK_MGT 0x058
+#define DB5500_PRCM_APEATCLK_MGT 0x05C
+#define DB5500_PRCM_APETRACECLK_MGT 0x060
+#define DB5500_PRCM_MCDECLK_MGT 0x064
+#define DB5500_PRCM_DSIALTCLK_MGT 0x06C
+#define DB5500_PRCM_DMACLK_MGT 0x074
+#define DB5500_PRCM_B2R2CLK_MGT 0x078
+#define DB5500_PRCM_TVCLK_MGT 0x07C
+#define DB5500_PRCM_RNGCLK_MGT 0x284
+
+#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4)
+#define PRCM_CLK_MGT_CLKPLLDIV_SHIFT 0
+#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7)
+#define PRCM_CLK_MGT_CLKEN BIT(8)
+
+#define PRCM_ARM_IT1_CLEAR 0x48C
+#define PRCM_ARM_IT1_VAL 0x494
+
+/* CPU mailbox registers */
+#define PRCM_MBOX_CPU_VAL 0x0FC
+#define PRCM_MBOX_CPU_SET 0x100
+
+/* System reset register */
+#define PRCM_APE_SOFTRST 0x228
+
+/* PRCMU clock/PLL/reset registers */
+#define PRCM_PLLDSI_FREQ 0x500
+#define PRCM_PLLDSI_ENABLE 0x504
+#define PRCM_PLLDSI_LOCKP 0x508
+#define PRCM_DSI_PLLOUT_SEL 0x530
+#define PRCM_DSITVCLK_DIV 0x52C
+#define PRCM_APE_RESETN_SET 0x1E4
+#define PRCM_APE_RESETN_CLR 0x1E8
+
+/* CLKOUTx SEL0 settings */
+#define CLKOUT_SEL0_REF_CLK 0x01 /* 0b 0001 */
+#define CLKOUT_SEL0_RTC_CLK0 0x02 /* 0b 0010 */
+#define CLKOUT_SEL0_ULP_CLK 0x04 /* 0b 0100 */
+#define CLKOUT_SEL0_SEL_CLK 0x08 /* 0b 1000 */
+
+/* CLKOUTx SEL settings */
+#define CLKOUT_SEL_STATIC0 0x0001 /* 0b 00 0000 0001 */
+#define CLKOUT_SEL_REFCLK 0x0002 /* 0b 00 0000 0010 */
+#define CLKOUT_SEL_ULPCLK 0x0004 /* 0b 00 0000 0100 */
+#define CLKOUT_SEL_ARMCLK 0x0008 /* 0b 00 0000 1000 */
+#define CLKOUT_SEL_SYSACC0CLK 0x0010 /* 0b 00 0001 0000 */
+#define CLKOUT_SEL_SOC0PLLCLK 0x0020 /* 0b 00 0010 0000 */
+#define CLKOUT_SEL_SOC1PLLCLK 0x0040 /* 0b 00 0100 0000 */
+#define CLKOUT_SEL_DDRPLLCLK 0x0080 /* 0b 00 1000 0000 */
+#define CLKOUT_SEL_TVCLK 0x0100 /* 0b 01 0000 0000 */
+#define CLKOUT_SEL_IRDACLK 0x0200 /* 0b 10 0000 0000 */
+
+/* CLKOUTx dividers */
+#define CLKOUT_DIV_2 0x00 /* 0b 000 */
+#define CLKOUT_DIV_4 0x01 /* 0b 001 */
+#define CLKOUT_DIV_8 0x02 /* 0b 010 */
+#define CLKOUT_DIV_16 0x03 /* 0b 011 */
+#define CLKOUT_DIV_32 0x04 /* 0b 100 */
+#define CLKOUT_DIV_64 0x05 /* 0b 101 */
+/* Values 0x06 and 0x07 will also set the CLKOUTx divider to 64. */
+
+/* PRCM_CLKOCR CLKOUTx Control registers */
+#define PRCM_CLKOCR 0x1CC
+#define PRCM_CLKOCR_CLKOUT0_SEL0_SHIFT 0
+#define PRCM_CLKOCR_CLKOUT0_SEL0_MASK BITS(0, 3)
+#define PRCM_CLKOCR_CLKOUT0_SEL_SHIFT 4
+#define PRCM_CLKOCR_CLKOUT0_SEL_MASK BITS(4, 13)
+#define PRCM_CLKOCR_CLKOUT1_SEL0_SHIFT 16
+#define PRCM_CLKOCR_CLKOUT1_SEL0_MASK BITS(16, 19)
+#define PRCM_CLKOCR_CLKOUT1_SEL_SHIFT 20
+#define PRCM_CLKOCR_CLKOUT1_SEL_MASK BITS(20, 29)
+
+/* PRCM_CLKODIV CLKOUTx Dividers */
+#define PRCM_CLKODIV 0x188
+#define PRCM_CLKODIV_CLKOUT0_DIV_SHIFT 0
+#define PRCM_CLKODIV_CLKOUT0_DIV_MASK BITS(0, 2)
+#define PRCM_CLKODIV_CLKOUT1_DIV_SHIFT 16
+#define PRCM_CLKODIV_CLKOUT1_DIV_MASK BITS(16, 18)
+
+#define PRCM_MMIP_LS_CLAMP_SET 0x420
+#define PRCM_MMIP_LS_CLAMP_CLR 0x424
+#define PRCM_DDR_SUBSYS_APE_MINBW 0x438
+
+/* Miscellaneous unit registers */
+#define PRCM_DSI_SW_RESET 0x324
+#define PRCM_RESOUTN_SET_OFFSET 0x214
+#define PRCM_RESOUTN_CLR_OFFSET 0x218
+
+#endif
diff --git a/drivers/mfd/db5500-prcmu.c b/drivers/mfd/db5500-prcmu.c
index bb115b2f04e..aee5ac397f9 100644
--- a/drivers/mfd/db5500-prcmu.c
+++ b/drivers/mfd/db5500-prcmu.c
@@ -19,12 +19,21 @@
#include <linux/irq.h>
#include <linux/jiffies.h>
#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/regulator/db5500-prcmu.h>
+#include <linux/regulator/machine.h>
#include <linux/interrupt.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/db5500-regs.h>
-#include "dbx500-prcmu-regs.h"
+#include <mach/prcmu-debug.h>
+
+#include "db5500-prcmu-regs.h"
+
+#define PRCMU_FW_VERSION_OFFSET 0xA4
+#define PRCM_SW_RST_REASON (tcdm_base + 0xFF8) /* 2 bytes */
#define _PRCM_MB_HEADER (tcdm_base + 0xFE8)
#define PRCM_REQ_MB0_HEADER (_PRCM_MB_HEADER + 0x0)
@@ -64,6 +73,52 @@
#define PRCM_ACK_MB6 (tcdm_base + 0xF0C)
#define PRCM_ACK_MB7 (tcdm_base + 0xF08)
+/* Share info */
+#define PRCM_SHARE_INFO (tcdm_base + 0xEC8)
+
+#define PRCM_SHARE_INFO_HOTDOG (PRCM_SHARE_INFO + 62)
+
+/* Mailbox 0 REQs */
+#define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0)
+#define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x1)
+#define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x2)
+#define PRCM_REQ_MB0_DDR_STATE (PRCM_REQ_MB0 + 0x3)
+#define PRCM_REQ_MB0_ESRAM0_STATE (PRCM_REQ_MB0 + 0x4)
+#define PRCM_REQ_MB0_WAKEUP_DBB (PRCM_REQ_MB0 + 0x8)
+#define PRCM_REQ_MB0_WAKEUP_ABB (PRCM_REQ_MB0 + 0xC)
+
+/* Mailbox 0 ACKs */
+#define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0)
+#define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1)
+#define PRCM_ACK_MB0_WAKEUP_0_DBB (PRCM_ACK_MB0 + 0x4)
+#define PRCM_ACK_MB0_WAKEUP_0_ABB (PRCM_ACK_MB0 + 0x8)
+#define PRCM_ACK_MB0_WAKEUP_1_DBB (PRCM_ACK_MB0 + 0x28)
+#define PRCM_ACK_MB0_WAKEUP_1_ABB (PRCM_ACK_MB0 + 0x2C)
+#define PRCM_ACK_MB0_EVENT_ABB_NUMBERS 20
+
+/* Request mailbox 1 fields. */
+#define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0)
+#define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1)
+
+/* Mailbox 1 ACKs */
+#define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0)
+#define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1)
+#define PRCM_ACK_MB1_ARM_VOLT_STATUS (PRCM_ACK_MB1 + 0x2)
+#define PRCM_ACK_MB1_APE_VOLT_STATUS (PRCM_ACK_MB1 + 0x3)
+
+/* Mailbox 2 REQs */
+#define PRCM_REQ_MB2_EPOD_CLIENT (PRCM_REQ_MB2 + 0x0)
+#define PRCM_REQ_MB2_EPOD_STATE (PRCM_REQ_MB2 + 0x1)
+#define PRCM_REQ_MB2_CLK_CLIENT (PRCM_REQ_MB2 + 0x2)
+#define PRCM_REQ_MB2_CLK_STATE (PRCM_REQ_MB2 + 0x3)
+#define PRCM_REQ_MB2_PLL_CLIENT (PRCM_REQ_MB2 + 0x4)
+#define PRCM_REQ_MB2_PLL_STATE (PRCM_REQ_MB2 + 0x5)
+
+/* Mailbox 2 ACKs */
+#define PRCM_ACK_MB2_EPOD_STATUS (PRCM_ACK_MB2 + 0x2)
+#define PRCM_ACK_MB2_CLK_STATUS (PRCM_ACK_MB2 + 0x6)
+#define PRCM_ACK_MB2_PLL_STATUS (PRCM_ACK_MB2 + 0xA)
+
enum mb_return_code {
RC_SUCCESS,
RC_FAIL,
@@ -71,12 +126,50 @@ enum mb_return_code {
/* Mailbox 0 headers. */
enum mb0_header {
- /* request */
- RMB0H_PWR_STATE_TRANS = 1,
- RMB0H_WAKE_UP_CFG,
- RMB0H_RD_WAKE_UP_ACK,
/* acknowledge */
- AMB0H_WAKE_UP = 1,
+ MB0H_WAKE_UP = 0,
+ /* request */
+ MB0H_PWR_STATE_TRANS,
+ MB0H_WAKE_UP_CFG,
+ MB0H_RD_WAKE_UP_ACK,
+};
+
+/* Mailbox 1 headers.*/
+enum mb1_header {
+ MB1H_ARM_OPP = 1,
+ MB1H_APE_OPP,
+ MB1H_ARM_APE_OPP,
+};
+
+/* Mailbox 2 headers. */
+enum mb2_header {
+ MB2H_EPOD_REQUEST = 1,
+ MB2H_CLK_REQUEST,
+ MB2H_PLL_REQUEST,
+};
+
+/* Mailbox 3 headers. */
+enum mb3_header {
+ MB3H_REFCLK_REQUEST = 1,
+};
+
+enum sysclk_state {
+ SYSCLK_OFF,
+ SYSCLK_ON,
+};
+
+/* Mailbox 4 headers */
+enum mb4_header {
+ MB4H_CFG_HOTDOG = 7,
+ MB4H_CFG_HOTMON = 8,
+ MB4H_CFG_HOTPERIOD = 10,
+};
+
+/* Mailbox 4 ACK headers */
+enum mb4_ack_header {
+ MB4H_ACK_CFG_HOTDOG = 5,
+ MB4H_ACK_CFG_HOTMON = 6,
+ MB4H_ACK_CFG_HOTPERIOD = 8,
};
/* Mailbox 5 headers. */
@@ -85,6 +178,69 @@ enum mb5_header {
MB5H_I2C_READ,
};
+enum db5500_arm_opp {
+ DB5500_ARM_100_OPP = 1,
+ DB5500_ARM_50_OPP,
+ DB5500_ARM_EXT_OPP,
+};
+
+enum db5500_ape_opp {
+ DB5500_APE_100_OPP = 1,
+ DB5500_APE_50_OPP
+};
+
+enum epod_state {
+ EPOD_OFF,
+ EPOD_ON,
+};
+enum epod_onoffret_state {
+ EPOD_OOR_OFF,
+ EPOD_OOR_RET,
+ EPOD_OOR_ON,
+};
+enum db5500_prcmu_pll {
+ DB5500_PLL_SOC0,
+ DB5500_PLL_SOC1,
+ DB5500_PLL_DDR,
+ DB5500_NUM_PLL_ID,
+};
+
+enum db5500_prcmu_clk {
+ DB5500_MSP1CLK,
+ DB5500_CDCLK,
+ DB5500_IRDACLK,
+ DB5500_TVCLK,
+ DB5500_NUM_CLK_CLIENTS,
+};
+
+enum on_off_ret {
+ OFF_ST,
+ RET_ST,
+ ON_ST,
+};
+
+enum db5500_ap_pwr_state {
+ DB5500_AP_SLEEP = 2,
+ DB5500_AP_DEEP_SLEEP,
+ DB5500_AP_IDLE,
+};
+
+/* Request mailbox 3 fields */
+#define PRCM_REQ_MB3_REFCLK_MGT (PRCM_REQ_MB3 + 0x0)
+
+/* Ack. mailbox 3 fields */
+#define PRCM_ACK_MB3_REFCLK_REQ (PRCM_ACK_MB3 + 0x0)
+
+
+/* Request mailbox 4 fields */
+#define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 32)
+#define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 34)
+#define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 36)
+#define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 38)
+
+/* Ack. mailbox 4 field */
+#define PRCM_ACK_MB4_REQUESTS (PRCM_ACK_MB4 + 0x0)
+
/* Request mailbox 5 fields. */
#define PRCM_REQ_MB5_I2C_SLAVE (PRCM_REQ_MB5 + 0)
#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 1)
@@ -125,13 +281,174 @@ enum mb5_header {
#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
/*
+ * Wakeups/IRQs
+ */
+
+#define WAKEUP_BIT_RTC BIT(0)
+#define WAKEUP_BIT_RTT0 BIT(1)
+#define WAKEUP_BIT_RTT1 BIT(2)
+#define WAKEUP_BIT_CD_IRQ BIT(3)
+#define WAKEUP_BIT_SRP_TIM BIT(4)
+#define WAKEUP_BIT_APE_REQ BIT(5)
+#define WAKEUP_BIT_USB BIT(6)
+#define WAKEUP_BIT_ABB BIT(7)
+#define WAKEUP_BIT_LOW_POWER_AUDIO BIT(8)
+#define WAKEUP_BIT_TEMP_SENSOR_LOW BIT(9)
+#define WAKEUP_BIT_ARM BIT(10)
+#define WAKEUP_BIT_AC_WAKE_ACK BIT(11)
+#define WAKEUP_BIT_TEMP_SENSOR_HIGH BIT(12)
+#define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20)
+#define WAKEUP_BIT_GPIO0 BIT(23)
+#define WAKEUP_BIT_GPIO1 BIT(24)
+#define WAKEUP_BIT_GPIO2 BIT(25)
+#define WAKEUP_BIT_GPIO3 BIT(26)
+#define WAKEUP_BIT_GPIO4 BIT(27)
+#define WAKEUP_BIT_GPIO5 BIT(28)
+#define WAKEUP_BIT_GPIO6 BIT(29)
+#define WAKEUP_BIT_GPIO7 BIT(30)
+#define WAKEUP_BIT_AC_REL_ACK BIT(30)
+
+/*
+ * This vector maps irq numbers to the bits in the bit field used in
+ * communication with the PRCMU firmware.
+ *
+ * The reason for having this is to keep the irq numbers contiguous even though
+ * the bits in the bit field are not. (The bits also have a tendency to move
+ * around, to further complicate matters.)
+ */
+#define IRQ_INDEX(_name) ((IRQ_DB5500_PRCMU_##_name) - IRQ_DB5500_PRCMU_BASE)
+#define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name)
+static u32 prcmu_irq_bit[NUM_DB5500_PRCMU_WAKEUPS] = {
+ IRQ_ENTRY(RTC),
+ IRQ_ENTRY(RTT0),
+ IRQ_ENTRY(RTT1),
+ IRQ_ENTRY(CD_IRQ),
+ IRQ_ENTRY(SRP_TIM),
+ IRQ_ENTRY(APE_REQ),
+ IRQ_ENTRY(USB),
+ IRQ_ENTRY(ABB),
+ IRQ_ENTRY(LOW_POWER_AUDIO),
+ IRQ_ENTRY(TEMP_SENSOR_LOW),
+ IRQ_ENTRY(TEMP_SENSOR_HIGH),
+ IRQ_ENTRY(ARM),
+ IRQ_ENTRY(AC_WAKE_ACK),
+ IRQ_ENTRY(MODEM_SW_RESET_REQ),
+ IRQ_ENTRY(GPIO0),
+ IRQ_ENTRY(GPIO1),
+ IRQ_ENTRY(GPIO2),
+ IRQ_ENTRY(GPIO3),
+ IRQ_ENTRY(GPIO4),
+ IRQ_ENTRY(GPIO5),
+ IRQ_ENTRY(GPIO6),
+ IRQ_ENTRY(GPIO7),
+ IRQ_ENTRY(AC_REL_ACK),
+};
+
+#define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1)
+#define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name)
+static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = {
+ WAKEUP_ENTRY(RTC),
+ WAKEUP_ENTRY(RTT0),
+ WAKEUP_ENTRY(RTT1),
+ WAKEUP_ENTRY(CD_IRQ),
+ WAKEUP_ENTRY(USB),
+ WAKEUP_ENTRY(ABB),
+ WAKEUP_ENTRY(ARM)
+};
+
+/*
* mb0_transfer - state needed for mailbox 0 communication.
- * @lock: The transaction lock.
+ * @lock The transaction lock.
+ * @dbb_irqs_lock lock used for (un)masking DBB wakeup interrupts
+ * @mask_work: Work structure used for (un)masking wakeup interrupts.
+ * @req: Request data that need to persist between requests.
*/
static struct {
spinlock_t lock;
+ spinlock_t dbb_irqs_lock;
+ struct work_struct mask_work;
+ struct {
+ u32 dbb_irqs;
+ u32 dbb_wakeups;
+ u32 abb_events;
+ } req;
} mb0_transfer;
+
+/*
+ * mb1_transfer - state needed for mailbox 1 communication.
+ * @lock: The transaction lock.
+ * @work: The transaction completion structure.
+ * @req_arm_opp Requested arm opp
+ * @req_ape_opp Requested ape opp
+ * @ack: Reply ("acknowledge") data.
+ */
+static struct {
+ struct mutex lock;
+ struct completion work;
+ u8 req_arm_opp;
+ u8 req_ape_opp;
+ struct {
+ u8 header;
+ u8 arm_opp;
+ u8 ape_opp;
+ u8 arm_voltage_st;
+ u8 ape_voltage_st;
+ } ack;
+} mb1_transfer;
+
+/*
+ * mb2_transfer - state needed for mailbox 2 communication.
+ * @lock: The transaction lock.
+ * @work: The transaction completion structure.
+ * @req: Request data that need to persist between requests.
+ * @ack: Reply ("acknowledge") data.
+ */
+static struct {
+ struct mutex lock;
+ struct completion work;
+ struct {
+ u8 epod_st[DB5500_NUM_EPOD_ID];
+ u8 pll_st[DB5500_NUM_PLL_ID];
+ } req;
+ struct {
+ u8 header;
+ u8 status;
+ } ack;
+} mb2_transfer;
+
+/*
+ * mb3_transfer - state needed for mailbox 3 communication.
+ * @sysclk_lock: A lock used to handle concurrent sysclk requests.
+ * @sysclk_work: Work structure used for sysclk requests.
+ * @req_st: Requested clock state.
+ * @ack: Acknowledgement data
+ */
+static struct {
+ struct mutex sysclk_lock;
+ struct completion sysclk_work;
+ enum sysclk_state req_st;
+ struct {
+ u8 header;
+ u8 status;
+ } ack;
+} mb3_transfer;
+
+/*
+ * mb4_transfer - state needed for mailbox 4 communication.
+ * @lock: The transaction lock.
+ * @work: The transaction completion structure.
+ * @ack: Acknowledgement data
+ */
+static struct {
+ struct mutex lock;
+ struct completion work;
+ struct {
+ u8 header;
+ u8 status;
+ } ack;
+} mb4_transfer;
+
/*
* mb5_transfer - state needed for mailbox 5 communication.
* @lock: The transaction lock.
@@ -148,9 +465,612 @@ static struct {
} ack;
} mb5_transfer;
+/* Spinlocks */
+static DEFINE_SPINLOCK(clkout_lock);
+
/* PRCMU TCDM base IO address. */
static __iomem void *tcdm_base;
+struct clk_mgt {
+ unsigned int offset;
+ u32 pllsw;
+ u32 div;
+ bool scalable;
+ bool force50;
+};
+
+/* PRCMU Firmware Details */
+static struct {
+ u16 board;
+ u8 fw_version;
+ u8 api_version;
+} prcmu_version;
+
+static DEFINE_SPINLOCK(clk_mgt_lock);
+
+#define CLK_MGT_ENTRY(_name, _scalable)[PRCMU_##_name] = { \
+ .offset = DB5500_PRCM_##_name##_MGT, \
+ .scalable = _scalable, \
+}
+
+static struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
+ CLK_MGT_ENTRY(SGACLK, true),
+ CLK_MGT_ENTRY(UARTCLK, false),
+ CLK_MGT_ENTRY(MSP02CLK, false),
+ CLK_MGT_ENTRY(I2CCLK, false),
+ [PRCMU_SDMMCCLK] {
+ .offset = DB5500_PRCM_SDMMCCLK_MGT,
+ .force50 = true,
+ .scalable = false,
+
+ },
+ [PRCMU_SPARE1CLK] {
+ .offset = DB5500_PRCM_SPARE1CLK_MGT,
+ .force50 = true,
+ .scalable = false,
+
+ },
+ CLK_MGT_ENTRY(PER1CLK, false),
+ CLK_MGT_ENTRY(PER2CLK, true),
+ CLK_MGT_ENTRY(PER3CLK, true),
+ CLK_MGT_ENTRY(PER5CLK, false), /* used for SPI */
+ CLK_MGT_ENTRY(PER6CLK, true),
+ CLK_MGT_ENTRY(PWMCLK, false),
+ CLK_MGT_ENTRY(IRDACLK, false),
+ CLK_MGT_ENTRY(IRRCCLK, false),
+ CLK_MGT_ENTRY(HDMICLK, false),
+ CLK_MGT_ENTRY(APEATCLK, false),
+ CLK_MGT_ENTRY(APETRACECLK, true),
+ CLK_MGT_ENTRY(MCDECLK, true),
+ CLK_MGT_ENTRY(DSIALTCLK, false),
+ CLK_MGT_ENTRY(DMACLK, true),
+ CLK_MGT_ENTRY(B2R2CLK, true),
+ CLK_MGT_ENTRY(TVCLK, false),
+ CLK_MGT_ENTRY(RNGCLK, false),
+ CLK_MGT_ENTRY(SIACLK, false),
+ CLK_MGT_ENTRY(SVACLK, false),
+};
+
+bool db5500_prcmu_is_ac_wake_requested(void)
+{
+ return false;
+}
+
+/**
+ * prcmu_config_clkout - Configure one of the programmable clock outputs.
+ * @clkout: The CLKOUT number (0 or 1).
+ * @source: Clock source.
+ * @div: The divider to be applied.
+ *
+ * Configures one of the programmable clock outputs (CLKOUTs).
+ */
+int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
+{
+ static bool configured[2] = {false, false};
+ int r = 0;
+ unsigned long flags;
+ u32 sel_val;
+ u32 div_val;
+ u32 sel_bits;
+ u32 div_bits;
+ u32 sel_mask;
+ u32 div_mask;
+ u8 sel0 = CLKOUT_SEL0_SEL_CLK;
+ u16 sel = 0;
+
+ BUG_ON(clkout > DB5500_CLKOUT1);
+ BUG_ON(source > DB5500_CLKOUT_IRDACLK);
+ BUG_ON(div > 7);
+
+ switch (source) {
+ case DB5500_CLKOUT_REF_CLK_SEL0:
+ sel0 = CLKOUT_SEL0_REF_CLK;
+ break;
+ case DB5500_CLKOUT_RTC_CLK0_SEL0:
+ sel0 = CLKOUT_SEL0_RTC_CLK0;
+ break;
+ case DB5500_CLKOUT_ULP_CLK_SEL0:
+ sel0 = CLKOUT_SEL0_ULP_CLK;
+ break;
+ case DB5500_CLKOUT_STATIC0:
+ sel = CLKOUT_SEL_STATIC0;
+ break;
+ case DB5500_CLKOUT_REFCLK:
+ sel = CLKOUT_SEL_REFCLK;
+ break;
+ case DB5500_CLKOUT_ULPCLK:
+ sel = CLKOUT_SEL_ULPCLK;
+ break;
+ case DB5500_CLKOUT_ARMCLK:
+ sel = CLKOUT_SEL_ARMCLK;
+ break;
+ case DB5500_CLKOUT_SYSACC0CLK:
+ sel = CLKOUT_SEL_SYSACC0CLK;
+ break;
+ case DB5500_CLKOUT_SOC0PLLCLK:
+ sel = CLKOUT_SEL_SOC0PLLCLK;
+ break;
+ case DB5500_CLKOUT_SOC1PLLCLK:
+ sel = CLKOUT_SEL_SOC1PLLCLK;
+ break;
+ case DB5500_CLKOUT_DDRPLLCLK:
+ sel = CLKOUT_SEL_DDRPLLCLK;
+ break;
+ case DB5500_CLKOUT_TVCLK:
+ sel = CLKOUT_SEL_TVCLK;
+ break;
+ case DB5500_CLKOUT_IRDACLK:
+ sel = CLKOUT_SEL_IRDACLK;
+ break;
+ }
+
+ switch (clkout) {
+ case DB5500_CLKOUT0:
+ sel_mask = PRCM_CLKOCR_CLKOUT0_SEL0_MASK |
+ PRCM_CLKOCR_CLKOUT0_SEL_MASK;
+ sel_bits = ((sel0 << PRCM_CLKOCR_CLKOUT0_SEL0_SHIFT) |
+ (sel << PRCM_CLKOCR_CLKOUT0_SEL_SHIFT));
+ div_mask = PRCM_CLKODIV_CLKOUT0_DIV_MASK;
+ div_bits = div << PRCM_CLKODIV_CLKOUT0_DIV_SHIFT;
+ break;
+ case DB5500_CLKOUT1:
+ sel_mask = PRCM_CLKOCR_CLKOUT1_SEL0_MASK |
+ PRCM_CLKOCR_CLKOUT1_SEL_MASK;
+ sel_bits = ((sel0 << PRCM_CLKOCR_CLKOUT1_SEL0_SHIFT) |
+ (sel << PRCM_CLKOCR_CLKOUT1_SEL_SHIFT));
+ div_mask = PRCM_CLKODIV_CLKOUT1_DIV_MASK;
+ div_bits = div << PRCM_CLKODIV_CLKOUT1_DIV_SHIFT;
+ break;
+ }
+
+ spin_lock_irqsave(&clkout_lock, flags);
+
+ if (configured[clkout]) {
+ r = -EINVAL;
+ goto unlock_and_return;
+ }
+
+ sel_val = readl(_PRCMU_BASE + PRCM_CLKOCR);
+ writel((sel_bits | (sel_val & ~sel_mask)),
+ (_PRCMU_BASE + PRCM_CLKOCR));
+
+ div_val = readl(_PRCMU_BASE + PRCM_CLKODIV);
+ writel((div_bits | (div_val & ~div_mask)),
+ (_PRCMU_BASE + PRCM_CLKODIV));
+
+ configured[clkout] = true;
+
+unlock_and_return:
+ spin_unlock_irqrestore(&clkout_lock, flags);
+
+ return r;
+}
+
+static int request_sysclk(bool enable)
+{
+ int r;
+
+ r = 0;
+ mutex_lock(&mb3_transfer.sysclk_lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
+ cpu_relax();
+
+ if (enable)
+ mb3_transfer.req_st = SYSCLK_ON;
+ else
+ mb3_transfer.req_st = SYSCLK_OFF;
+
+ writeb(mb3_transfer.req_st, (PRCM_REQ_MB3_REFCLK_MGT));
+
+ writeb(MB3H_REFCLK_REQUEST, (PRCM_REQ_MB3_HEADER));
+ writel(MBOX_BIT(3), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+ /*
+ * The firmware only sends an ACK if we want to enable the
+ * SysClk, and it succeeds.
+ */
+ if (!wait_for_completion_timeout(&mb3_transfer.sysclk_work,
+ msecs_to_jiffies(20000))) {
+ pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
+ __func__);
+ r = -EIO;
+ WARN(1, "Failed to set sysclk");
+ goto unlock_and_return;
+ }
+
+ if ((mb3_transfer.ack.header != MB3H_REFCLK_REQUEST) ||
+ (mb3_transfer.ack.status != mb3_transfer.req_st)) {
+ r = -EIO;
+ }
+
+unlock_and_return:
+ mutex_unlock(&mb3_transfer.sysclk_lock);
+
+ return r;
+}
+
+static int request_timclk(bool enable)
+{
+ u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
+
+ if (!enable)
+ val |= PRCM_TCR_STOP_TIMERS;
+ writel(val, _PRCMU_BASE + PRCM_TCR);
+
+ return 0;
+}
+
+static int request_clk(u8 clock, bool enable)
+{
+ int r = 0;
+
+ BUG_ON(clock >= DB5500_NUM_CLK_CLIENTS);
+
+ mutex_lock(&mb2_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
+ cpu_relax();
+
+ /* fill in mailbox */
+ writeb(clock, PRCM_REQ_MB2_CLK_CLIENT);
+ writeb(enable, PRCM_REQ_MB2_CLK_STATE);
+
+ writeb(MB2H_CLK_REQUEST, PRCM_REQ_MB2_HEADER);
+
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+ if (!wait_for_completion_timeout(&mb2_transfer.work,
+ msecs_to_jiffies(500))) {
+ pr_err("prcmu: request_clk() failed.\n");
+ r = -EIO;
+ WARN(1, "Failed in request_clk");
+ goto unlock_and_return;
+ }
+ if (mb2_transfer.ack.status != RC_SUCCESS ||
+ mb2_transfer.ack.header != MB2H_CLK_REQUEST)
+ r = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb2_transfer.lock);
+ return r;
+}
+
+static int request_reg_clock(u8 clock, bool enable)
+{
+ u32 val;
+ unsigned long flags;
+
+ WARN_ON(!clk_mgt[clock].offset);
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ /* Grab the HW semaphore. */
+ while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
+ if (enable) {
+ val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
+ } else {
+ clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
+ val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
+ }
+ writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
+
+ /* Release the HW semaphore. */
+ writel(0, _PRCMU_BASE + PRCM_SEM);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+
+ return 0;
+}
+
+/*
+ * request_pll() - Request for a pll to be enabled or disabled.
+ * @pll: The pll for which the request is made.
+ * @enable: Whether the clock should be enabled (true) or disabled (false).
+ *
+ * This function should only be used by the clock implementation.
+ * Do not use it from any other place!
+ */
+static int request_pll(u8 pll, bool enable)
+{
+ int r = 0;
+
+ BUG_ON(pll >= DB5500_NUM_PLL_ID);
+ mutex_lock(&mb2_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
+ cpu_relax();
+
+ mb2_transfer.req.pll_st[pll] = enable;
+
+ /* fill in mailbox */
+ writeb(pll, PRCM_REQ_MB2_PLL_CLIENT);
+ writeb(mb2_transfer.req.pll_st[pll], PRCM_REQ_MB2_PLL_STATE);
+
+ writeb(MB2H_PLL_REQUEST, PRCM_REQ_MB2_HEADER);
+
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+ if (!wait_for_completion_timeout(&mb2_transfer.work,
+ msecs_to_jiffies(500))) {
+ pr_err("prcmu: set_pll() failed.\n"
+ "prcmu: Please check your firmware version.\n");
+ r = -EIO;
+ WARN(1, "Failed to set pll");
+ goto unlock_and_return;
+ }
+ if (mb2_transfer.ack.status != RC_SUCCESS ||
+ mb2_transfer.ack.header != MB2H_PLL_REQUEST)
+ r = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb2_transfer.lock);
+
+ return r;
+}
+
+/**
+ * db5500_prcmu_request_clock() - Request for a clock to be enabled or disabled.
+ * @clock: The clock for which the request is made.
+ * @enable: Whether the clock should be enabled (true) or disabled (false).
+ *
+ * This function should only be used by the clock implementation.
+ * Do not use it from any other place!
+ */
+int db5500_prcmu_request_clock(u8 clock, bool enable)
+{
+ /* MSP1 & CD clocks are handled by FW */
+ if (clock == PRCMU_MSP1CLK)
+ return request_clk(DB5500_MSP1CLK, enable);
+ else if (clock == PRCMU_CDCLK)
+ return request_clk(DB5500_CDCLK, enable);
+ else if (clock < PRCMU_NUM_REG_CLOCKS)
+ return request_reg_clock(clock, enable);
+ else if (clock == PRCMU_TIMCLK)
+ return request_timclk(enable);
+ else if (clock == PRCMU_PLLSOC0)
+ return request_pll(DB5500_PLL_SOC0, enable);
+ else if (clock == PRCMU_PLLSOC1)
+ return request_pll(DB5500_PLL_SOC1, enable);
+ else if (clock == PRCMU_PLLDDR)
+ return request_pll(DB5500_PLL_DDR, enable);
+ else if (clock == PRCMU_SYSCLK)
+ return request_sysclk(enable);
+ else
+ return -EINVAL;
+}
+
+/* This function should only be called while mb0_transfer.lock is held. */
+static void config_wakeups(void)
+{
+ static u32 last_dbb_events;
+ static u32 last_abb_events;
+ u32 dbb_events;
+ u32 abb_events;
+
+ dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups;
+
+ abb_events = mb0_transfer.req.abb_events;
+
+ if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events))
+ return;
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
+ cpu_relax();
+
+ writel(dbb_events, PRCM_REQ_MB0_WAKEUP_DBB);
+ writel(abb_events, PRCM_REQ_MB0_WAKEUP_ABB);
+ writeb(MB0H_WAKE_UP_CFG, PRCM_REQ_MB0_HEADER);
+ writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+ last_dbb_events = dbb_events;
+ last_abb_events = abb_events;
+}
+
+int db5500_prcmu_config_esram0_deep_sleep(u8 state)
+{
+ unsigned long flags;
+
+ if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
+ (state < ESRAM0_DEEP_SLEEP_STATE_OFF))
+ return -EINVAL;
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ if (state == ESRAM0_DEEP_SLEEP_STATE_RET)
+ writeb(RET_ST, PRCM_REQ_MB0_ESRAM0_STATE);
+ else
+ writeb(OFF_ST, PRCM_REQ_MB0_ESRAM0_STATE);
+
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+
+ return 0;
+}
+
+int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
+{
+ int r = 0;
+ unsigned long flags;
+
+ /* Deep Idle is not supported in DB5500 */
+ BUG_ON((state < PRCMU_AP_SLEEP) || (state >= PRCMU_AP_DEEP_IDLE));
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
+ cpu_relax();
+
+ switch (state) {
+ case PRCMU_AP_IDLE:
+ writeb(DB5500_AP_IDLE, PRCM_REQ_MB0_AP_POWER_STATE);
+ /* TODO: Can be high latency */
+ writeb(DDR_PWR_STATE_UNCHANGED, PRCM_REQ_MB0_DDR_STATE);
+ break;
+ case PRCMU_AP_SLEEP:
+ writeb(DB5500_AP_SLEEP, PRCM_REQ_MB0_AP_POWER_STATE);
+ break;
+ case PRCMU_AP_DEEP_SLEEP:
+ writeb(DB5500_AP_DEEP_SLEEP, PRCM_REQ_MB0_AP_POWER_STATE);
+ break;
+ default:
+ r = -EINVAL;
+ goto unlock_return;
+ }
+ writeb((keep_ap_pll ? 1 : 0), PRCM_REQ_MB0_AP_PLL_STATE);
+ writeb((keep_ulp_clk ? 1 : 0), PRCM_REQ_MB0_ULP_CLOCK_STATE);
+
+ writeb(MB0H_PWR_STATE_TRANS, PRCM_REQ_MB0_HEADER);
+ writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+unlock_return:
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+
+ return r;
+}
+
+void db5500_prcmu_enable_wakeups(u32 wakeups)
+{
+ unsigned long flags;
+ u32 bits;
+ int i;
+
+ BUG_ON(wakeups != (wakeups & VALID_WAKEUPS));
+
+ for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) {
+ if (wakeups & BIT(i)) {
+ if (prcmu_wakeup_bit[i] == 0)
+ WARN(1, "WAKEUP NOT SUPPORTED");
+ else
+ bits |= prcmu_wakeup_bit[i];
+ }
+ }
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ mb0_transfer.req.dbb_wakeups = bits;
+ config_wakeups();
+
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+}
+
+void db5500_prcmu_config_abb_event_readout(u32 abb_events)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ mb0_transfer.req.abb_events = abb_events;
+ config_wakeups();
+
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+}
+
+void db5500_prcmu_get_abb_event_buffer(void __iomem **buf)
+{
+ if (readb(PRCM_ACK_MB0_READ_POINTER) & 1)
+ *buf = (PRCM_ACK_MB0_WAKEUP_1_ABB);
+ else
+ *buf = (PRCM_ACK_MB0_WAKEUP_0_ABB);
+}
+
+/* This function should be called with lock */
+static int mailbox4_request(u8 mb4_request, u8 ack_request)
+{
+ int ret = 0;
+
+ writeb(mb4_request, PRCM_REQ_MB4_HEADER);
+ writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+
+ if (!wait_for_completion_timeout(&mb4_transfer.work,
+ msecs_to_jiffies(500))) {
+ pr_err("prcmu: MB4 request %d failed", mb4_request);
+ ret = -EIO;
+ WARN(1, "prcmu: failed mb4 request");
+ goto failed;
+ }
+
+ if (mb4_transfer.ack.header != ack_request ||
+ mb4_transfer.ack.status != RC_SUCCESS)
+ ret = -EIO;
+failed:
+ return ret;
+}
+
+int db5500_prcmu_get_hotdog(void)
+{
+ return readw(PRCM_SHARE_INFO_HOTDOG);
+}
+
+int db5500_prcmu_config_hotdog(u8 threshold)
+{
+ int r = 0;
+
+ mutex_lock(&mb4_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ writew(threshold, PRCM_REQ_MB4_HOTDOG_THRESHOLD);
+ r = mailbox4_request(MB4H_CFG_HOTDOG, MB4H_ACK_CFG_HOTDOG);
+
+ mutex_unlock(&mb4_transfer.lock);
+
+ return r;
+}
+
+int db5500_prcmu_config_hotmon(u8 low, u8 high)
+{
+ int r = 0;
+
+ mutex_lock(&mb4_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ writew(low, PRCM_REQ_MB4_HOTMON_LOW);
+ writew(high, PRCM_REQ_MB4_HOTMON_HIGH);
+
+ r = mailbox4_request(MB4H_CFG_HOTMON, MB4H_ACK_CFG_HOTMON);
+
+ mutex_unlock(&mb4_transfer.lock);
+
+ return r;
+}
+
+static int config_hot_period(u16 val)
+{
+ int r = 0;
+
+ mutex_lock(&mb4_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ writew(val, PRCM_REQ_MB4_HOT_PERIOD);
+ r = mailbox4_request(MB4H_CFG_HOTPERIOD, MB4H_ACK_CFG_HOTPERIOD);
+
+ mutex_unlock(&mb4_transfer.lock);
+
+ return r;
+}
+
+/*
+ * period in milli seconds
+ */
+int db5500_prcmu_start_temp_sense(u16 period)
+{
+ if (period == 0xFFFF)
+ return -EINVAL;
+
+ return config_hot_period(period);
+}
+
+int db5500_prcmu_stop_temp_sense(void)
+{
+ return config_hot_period(0xFFFF);
+}
+
/**
* db5500_prcmu_abb_read() - Read register value(s) from the ABB.
* @slave: The I2C slave address.
@@ -170,14 +1090,14 @@ int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
mutex_lock(&mb5_transfer.lock);
- while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
cpu_relax();
writeb(slave, PRCM_REQ_MB5_I2C_SLAVE);
writeb(reg, PRCM_REQ_MB5_I2C_REG);
writeb(size, PRCM_REQ_MB5_I2C_SIZE);
writeb(MB5H_I2C_READ, PRCM_REQ_MB5_HEADER);
- writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
+ writel(MBOX_BIT(5), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
wait_for_completion(&mb5_transfer.work);
r = 0;
@@ -211,7 +1131,7 @@ int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
mutex_lock(&mb5_transfer.lock);
- while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
cpu_relax();
writeb(slave, PRCM_REQ_MB5_I2C_SLAVE);
writeb(reg, PRCM_REQ_MB5_I2C_REG);
@@ -219,7 +1139,7 @@ int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
memcpy_toio(PRCM_REQ_MB5_I2C_DATA, value, size);
writeb(MB5H_I2C_WRITE, PRCM_REQ_MB5_HEADER);
- writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
+ writel(MBOX_BIT(5), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
wait_for_completion(&mb5_transfer.work);
if ((mb5_transfer.ack.header == MB5H_I2C_WRITE) &&
@@ -233,42 +1153,337 @@ int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
return r;
}
+/**
+ * db5500_prcmu_set_arm_opp - set the appropriate ARM OPP
+ * @opp: The new ARM operating point to which transition is to be made
+ * Returns: 0 on success, non-zero on failure
+ *
+ * This function sets the the operating point of the ARM.
+ */
+int db5500_prcmu_set_arm_opp(u8 opp)
+{
+ int r;
+ u8 db5500_opp;
+
+ r = 0;
+
+ switch (opp) {
+ case ARM_EXTCLK:
+ db5500_opp = DB5500_ARM_EXT_OPP;
+ break;
+ case ARM_50_OPP:
+ db5500_opp = DB5500_ARM_50_OPP;
+ break;
+ case ARM_100_OPP:
+ db5500_opp = DB5500_ARM_100_OPP;
+ break;
+ default:
+ pr_err("prcmu: %s() received wrong opp value: %d\n",
+ __func__, opp);
+ r = -EINVAL;
+ goto bailout;
+ }
+
+ mutex_lock(&mb1_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
+ cpu_relax();
+
+ writeb(MB1H_ARM_OPP, PRCM_REQ_MB1_HEADER);
+
+ writeb(db5500_opp, PRCM_REQ_MB1_ARM_OPP);
+ writel(MBOX_BIT(1), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+ if (!wait_for_completion_timeout(&mb1_transfer.work,
+ msecs_to_jiffies(500))) {
+ r = -EIO;
+ WARN(1, "prcmu: failed to set arm opp");
+ goto unlock_and_return;
+ }
+
+ if (mb1_transfer.ack.header != MB1H_ARM_OPP ||
+ (mb1_transfer.ack.arm_opp != db5500_opp) ||
+ (mb1_transfer.ack.arm_voltage_st != RC_SUCCESS))
+ r = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb1_transfer.lock);
+bailout:
+ if (!r)
+ prcmu_debug_arm_opp_log(opp);
+ return r;
+}
+
+static void __init prcmu_ape_clocks_init(void)
+{
+ u8 opp = db5500_prcmu_get_ape_opp();
+ unsigned long flags;
+ int i;
+
+ WARN(opp != APE_100_OPP, "%s: Initial APE OPP (%u) not 100%%?\n",
+ __func__, opp);
+
+ for (i = 0; i < PRCMU_NUM_REG_CLOCKS; i++) {
+ struct clk_mgt *clkmgt = &clk_mgt[i];
+ u32 clkval;
+ u32 div;
+
+ if (!clkmgt->scalable && !clkmgt->force50)
+ continue;
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ clkval = readl(_PRCMU_BASE + clkmgt->offset);
+ div = clkval & PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ div >>= PRCM_CLK_MGT_CLKPLLDIV_SHIFT;
+
+ if (clkmgt->force50) {
+ div *= 2;
+
+ clkval &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ clkval |= div << PRCM_CLK_MGT_CLKPLLDIV_SHIFT;
+ writel(clkval, _PRCMU_BASE + clkmgt->offset);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+ continue;
+ }
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+
+ clkmgt->div = div;
+ if (!div)
+ pr_err("%s: scalable clock at offset %#x has zero divisor\n",
+ __func__, clkmgt->offset);
+ }
+}
+
+static void prcmu_ape_clocks_scale(u8 opp)
+{
+ unsigned long irqflags;
+ unsigned int i;
+ u32 clkval;
+
+ /*
+ * Note: calling printk() under the following lock can cause lock
+ * recursion via clk_enable() for the console UART!
+ */
+ spin_lock_irqsave(&clk_mgt_lock, irqflags);
+
+ /* take a lock on HW (HWSEM)*/
+ while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ for (i = 0; i < PRCMU_NUM_REG_CLOCKS; i++) {
+ u32 divval;
+
+ if (!clk_mgt[i].scalable)
+ continue;
+
+ clkval = readl(_PRCMU_BASE + clk_mgt[i].offset);
+ divval = clk_mgt[i].div;
+
+ pr_debug("PRCMU: reg %#x prev clk = 0x%x stored div = 0x%x\n",
+ clk_mgt[i].offset, clkval, divval);
+
+ if (opp == DB5500_APE_50_OPP)
+ divval *= 2;
+
+ clkval &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ clkval |= divval << PRCM_CLK_MGT_CLKPLLDIV_SHIFT;
+
+ pr_debug("PRCMU: wr 0x%x in reg 0x%x\n",
+ clkval, clk_mgt[i].offset);
+
+ writel(clkval, _PRCMU_BASE + clk_mgt[i].offset);
+ }
+
+ /* release lock */
+ writel(0, (_PRCMU_BASE + PRCM_SEM));
+
+ spin_unlock_irqrestore(&clk_mgt_lock, irqflags);
+}
+
+int db5500_prcmu_set_ape_opp(u8 opp)
+{
+ int ret = 0;
+ u8 db5500_opp;
+
+ if (opp == db5500_prcmu_get_ape_opp())
+ return ret;
+
+ if (cpu_is_u5500v1())
+ return -EINVAL;
+
+ switch (opp) {
+ case APE_100_OPP:
+ db5500_opp = DB5500_APE_100_OPP;
+ break;
+ case APE_50_OPP:
+ db5500_opp = DB5500_APE_50_OPP;
+ break;
+ default:
+ pr_err("prcmu: %s() received wrong opp value: %d\n",
+ __func__, opp);
+ ret = -EINVAL;
+ goto bailout;
+ }
+
+ mutex_lock(&mb1_transfer.lock);
+
+ prcmu_ape_clocks_scale(db5500_opp);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
+ cpu_relax();
+
+ writeb(MB1H_APE_OPP, PRCM_REQ_MB1_HEADER);
+ writeb(db5500_opp, PRCM_REQ_MB1_APE_OPP);
+ writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+
+ if (!wait_for_completion_timeout(&mb1_transfer.work,
+ msecs_to_jiffies(500))) {
+ ret = -EIO;
+ WARN(1, "prcmu: failed to set ape opp to %u", opp);
+ goto unlock_and_return;
+ }
+
+ if (mb1_transfer.ack.header != MB1H_APE_OPP ||
+ (mb1_transfer.ack.ape_opp != db5500_opp) ||
+ (mb1_transfer.ack.arm_voltage_st != RC_SUCCESS))
+ ret = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb1_transfer.lock);
+bailout:
+ return ret;
+}
+
+int db5500_prcmu_get_ape_opp(void)
+{
+ u8 opp = readb(PRCM_ACK_MB1_CURRENT_APE_OPP);
+
+ switch (opp) {
+ case DB5500_APE_100_OPP:
+ return APE_100_OPP;
+ case DB5500_APE_50_OPP:
+ return APE_50_OPP;
+ default:
+ pr_err("prcmu: %s() read unknown opp value: %d\n",
+ __func__, opp);
+ return APE_100_OPP;
+ }
+}
+
+int db5500_prcmu_get_ddr_opp(void)
+{
+ return readb(_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW);
+}
+
+int db5500_prcmu_set_ddr_opp(u8 opp)
+{
+ if (cpu_is_u5500v1())
+ return -EINVAL;
+
+ if (opp != DDR_100_OPP && opp != DDR_50_OPP)
+ return -EINVAL;
+
+ writeb(opp, _PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW);
+
+ return 0;
+}
+
+/**
+ * db5500_prcmu_get_arm_opp - get the current ARM OPP
+ *
+ * Returns: the current ARM OPP
+ */
+int db5500_prcmu_get_arm_opp(void)
+{
+ u8 opp = readb(PRCM_ACK_MB1_CURRENT_ARM_OPP);
+
+ switch (opp) {
+ case DB5500_ARM_EXT_OPP:
+ return ARM_EXTCLK;
+ case DB5500_ARM_50_OPP:
+ return ARM_50_OPP;
+ case DB5500_ARM_100_OPP:
+ return ARM_100_OPP;
+ default:
+ pr_err("prcmu: %s() read unknown opp value: %d\n",
+ __func__, opp);
+ return ARM_100_OPP;
+ }
+}
+
+int prcmu_resetout(u8 resoutn, u8 state)
+{
+ int offset;
+ int pin = -1;
+
+ offset = state > 0 ? PRCM_RESOUTN_SET_OFFSET : PRCM_RESOUTN_CLR_OFFSET;
+
+ switch (resoutn) {
+ case 0:
+ pin = PRCMU_RESOUTN0_PIN;
+ break;
+ case 1:
+ pin = PRCMU_RESOUTN1_PIN;
+ break;
+ case 2:
+ pin = PRCMU_RESOUTN2_PIN;
+ default:
+ break;
+ }
+
+ if (pin > 0)
+ writel(pin, _PRCMU_BASE + offset);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
int db5500_prcmu_enable_dsipll(void)
{
int i;
+ int ret = 0;
/* Enable DSIPLL_RESETN resets */
- writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
+ writel(PRCMU_RESET_DSIPLL, _PRCMU_BASE + PRCM_APE_RESETN_CLR);
/* Unclamp DSIPLL in/out */
- writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
+ writel(PRCMU_UNCLAMP_DSIPLL, _PRCMU_BASE + PRCM_MMIP_LS_CLAMP_CLR);
/* Set DSI PLL FREQ */
- writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ);
+ writel(PRCMU_PLLDSI_FREQ_SETTING, _PRCMU_BASE + PRCM_PLLDSI_FREQ);
writel(PRCMU_DSI_PLLOUT_SEL_SETTING,
- PRCM_DSI_PLLOUT_SEL);
+ _PRCMU_BASE + PRCM_DSI_PLLOUT_SEL);
/* Enable Escape clocks */
- writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
+ writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, _PRCMU_BASE + PRCM_DSITVCLK_DIV);
/* Start DSI PLL */
- writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
+ writel(PRCMU_ENABLE_PLLDSI, _PRCMU_BASE + PRCM_PLLDSI_ENABLE);
/* Reset DSI PLL */
- writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET);
+ writel(PRCMU_DSI_RESET_SW, _PRCMU_BASE + PRCM_DSI_SW_RESET);
for (i = 0; i < 10; i++) {
- if ((readl(PRCM_PLLDSI_LOCKP) &
+ if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) &
PRCMU_PLLDSI_LOCKP_LOCKED) == PRCMU_PLLDSI_LOCKP_LOCKED)
break;
udelay(100);
}
+
+ if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) &
+ PRCMU_PLLDSI_LOCKP_LOCKED)
+ != PRCMU_PLLDSI_LOCKP_LOCKED)
+ ret = -EIO;
/* Release DSIPLL_RESETN */
- writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET);
- return 0;
+ writel(PRCMU_RESET_DSIPLL, _PRCMU_BASE + PRCM_APE_RESETN_SET);
+ return ret;
}
int db5500_prcmu_disable_dsipll(void)
{
/* Disable dsi pll */
- writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
+ writel(PRCMU_DISABLE_PLLDSI, _PRCMU_BASE + PRCM_PLLDSI_ENABLE);
/* Disable escapeclock */
- writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
+ writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, _PRCMU_BASE + PRCM_DSITVCLK_DIV);
return 0;
}
@@ -276,27 +1491,132 @@ int db5500_prcmu_set_display_clocks(void)
{
/* HDMI and TVCLK Should be handled somewhere else */
/* PLLDIV=8, PLLSW=2, CLKEN=1 */
- writel(PRCMU_DSI_CLOCK_SETTING, PRCM_HDMICLK_MGT);
+ writel(PRCMU_DSI_CLOCK_SETTING, _PRCMU_BASE + DB5500_PRCM_HDMICLK_MGT);
/* PLLDIV=14, PLLSW=2, CLKEN=1 */
- writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT);
+ writel(PRCMU_DSI_LP_CLOCK_SETTING, _PRCMU_BASE + DB5500_PRCM_TVCLK_MGT);
return 0;
}
+/**
+ * db5500_prcmu_system_reset - System reset
+ *
+ * Saves the reset reason code and then sets the APE_SOFTRST register which
+ * fires an interrupt to fw
+ */
+void db5500_prcmu_system_reset(u16 reset_code)
+{
+ writew(reset_code, PRCM_SW_RST_REASON);
+ writel(1, _PRCMU_BASE + PRCM_APE_SOFTRST);
+}
+
+/**
+ * db5500_prcmu_get_reset_code - Retrieve SW reset reason code
+ *
+ * Retrieves the reset reason code stored by prcmu_system_reset() before
+ * last restart.
+ */
+u16 db5500_prcmu_get_reset_code(void)
+{
+ return readw(PRCM_SW_RST_REASON);
+}
+
static void ack_dbb_wakeup(void)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.lock, flags);
- while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
cpu_relax();
- writeb(RMB0H_RD_WAKE_UP_ACK, PRCM_REQ_MB0_HEADER);
- writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
+ writeb(MB0H_RD_WAKE_UP_ACK, PRCM_REQ_MB0_HEADER);
+ writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
+int db5500_prcmu_set_epod(u16 epod, u8 epod_state)
+{
+ int r = 0;
+ bool ram_retention = false;
+
+ /* check argument */
+ BUG_ON(epod < DB5500_EPOD_ID_BASE);
+ BUG_ON(epod_state > EPOD_STATE_ON);
+ BUG_ON((epod - DB5500_EPOD_ID_BASE) >= DB5500_NUM_EPOD_ID);
+
+ if (epod == DB5500_EPOD_ID_ESRAM12)
+ ram_retention = true;
+
+ /* check argument */
+ BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention);
+
+ /* get lock */
+ mutex_lock(&mb2_transfer.lock);
+
+ /* wait for mailbox */
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
+ cpu_relax();
+
+ /* Retention is allowed only for ESRAM12 */
+ if (epod == DB5500_EPOD_ID_ESRAM12) {
+ switch (epod_state) {
+ case EPOD_STATE_ON:
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_OOR_ON;
+ break;
+ case EPOD_STATE_OFF:
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_OOR_OFF;
+ break;
+ case EPOD_STATE_RAMRET:
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_OOR_RET;
+ break;
+ default:
+ r = -EINVAL;
+ goto unlock_and_return;
+ break;
+ }
+ } else {
+ if (epod_state == EPOD_STATE_ON)
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_ON;
+ else if (epod_state == EPOD_STATE_OFF)
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_OFF;
+ else {
+ r = -EINVAL;
+ goto unlock_and_return;
+ }
+ }
+ /* fill in mailbox */
+ writeb((epod - DB5500_EPOD_ID_BASE), PRCM_REQ_MB2_EPOD_CLIENT);
+ writeb(mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE],
+ PRCM_REQ_MB2_EPOD_STATE);
+
+ writeb(MB2H_EPOD_REQUEST, PRCM_REQ_MB2_HEADER);
+
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+ if (!wait_for_completion_timeout(&mb2_transfer.work,
+ msecs_to_jiffies(500))) {
+ pr_err("prcmu: set_epod() failed.\n"
+ "prcmu: Please check your firmware version.\n");
+ r = -EIO;
+ WARN(1, "Failed to set epod");
+ goto unlock_and_return;
+ }
+
+ if (mb2_transfer.ack.status != RC_SUCCESS ||
+ mb2_transfer.ack.header != MB2H_EPOD_REQUEST)
+ r = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb2_transfer.lock);
+ return r;
+}
+
static inline void print_unknown_header_warning(u8 n, u8 header)
{
pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
@@ -306,11 +1626,25 @@ static inline void print_unknown_header_warning(u8 n, u8 header)
static bool read_mailbox_0(void)
{
bool r;
+ u32 ev;
+ unsigned int n;
+
u8 header;
header = readb(PRCM_ACK_MB0_HEADER);
switch (header) {
- case AMB0H_WAKE_UP:
+ case MB0H_WAKE_UP:
+ if (readb(PRCM_ACK_MB0_READ_POINTER) & 1)
+ ev = readl(PRCM_ACK_MB0_WAKEUP_1_DBB);
+ else
+ ev = readl(PRCM_ACK_MB0_WAKEUP_0_DBB);
+
+ ev &= mb0_transfer.req.dbb_irqs;
+
+ for (n = 0; n < NUM_DB5500_PRCMU_WAKEUPS; n++) {
+ if (ev & prcmu_irq_bit[n])
+ generic_handle_irq(IRQ_DB5500_PRCMU_BASE + n);
+ }
r = true;
break;
default:
@@ -318,31 +1652,119 @@ static bool read_mailbox_0(void)
r = false;
break;
}
- writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR);
+ writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
return r;
}
static bool read_mailbox_1(void)
{
- writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR);
+ u8 header;
+ bool do_complete = true;
+
+ header = mb1_transfer.ack.header = readb(PRCM_ACK_MB1_HEADER);
+
+ switch (header) {
+ case MB1H_ARM_OPP:
+ mb1_transfer.ack.arm_opp = readb(PRCM_ACK_MB1_CURRENT_ARM_OPP);
+ mb1_transfer.ack.arm_voltage_st =
+ readb(PRCM_ACK_MB1_ARM_VOLT_STATUS);
+ break;
+ case MB1H_APE_OPP:
+ mb1_transfer.ack.ape_opp = readb(PRCM_ACK_MB1_CURRENT_APE_OPP);
+ mb1_transfer.ack.ape_voltage_st =
+ readb(PRCM_ACK_MB1_APE_VOLT_STATUS);
+ break;
+ case MB1H_ARM_APE_OPP:
+ mb1_transfer.ack.ape_opp = readb(PRCM_ACK_MB1_CURRENT_APE_OPP);
+ mb1_transfer.ack.ape_voltage_st =
+ readb(PRCM_ACK_MB1_APE_VOLT_STATUS);
+ break;
+ default:
+ print_unknown_header_warning(1, header);
+ do_complete = false;
+ break;
+ }
+
+ writel(MBOX_BIT(1), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+
+ if (do_complete)
+ complete(&mb1_transfer.work);
+
return false;
}
static bool read_mailbox_2(void)
{
- writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR);
+ u8 header;
+
+ header = readb(PRCM_ACK_MB2_HEADER);
+ mb2_transfer.ack.header = header;
+ switch (header) {
+ case MB2H_EPOD_REQUEST:
+ mb2_transfer.ack.status = readb(PRCM_ACK_MB2_EPOD_STATUS);
+ break;
+ case MB2H_CLK_REQUEST:
+ mb2_transfer.ack.status = readb(PRCM_ACK_MB2_CLK_STATUS);
+ break;
+ case MB2H_PLL_REQUEST:
+ mb2_transfer.ack.status = readb(PRCM_ACK_MB2_PLL_STATUS);
+ break;
+ default:
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+ pr_err("prcmu: Wrong ACK received for MB2 request \n");
+ return false;
+ break;
+ }
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+ complete(&mb2_transfer.work);
return false;
}
static bool read_mailbox_3(void)
{
- writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR);
+ u8 header;
+
+ header = readb(PRCM_ACK_MB3_HEADER);
+ mb3_transfer.ack.header = header;
+ switch (header) {
+ case MB3H_REFCLK_REQUEST:
+ mb3_transfer.ack.status = readb(PRCM_ACK_MB3_REFCLK_REQ);
+ writel(MBOX_BIT(3), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+ complete(&mb3_transfer.sysclk_work);
+ break;
+ default:
+ writel(MBOX_BIT(3), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+ pr_err("prcmu: wrong MB3 header\n");
+ break;
+ }
+
return false;
}
static bool read_mailbox_4(void)
{
- writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR);
+ u8 header;
+ bool do_complete = true;
+
+ header = readb(PRCM_ACK_MB4_HEADER);
+ mb4_transfer.ack.header = header;
+ switch (header) {
+ case MB4H_ACK_CFG_HOTDOG:
+ case MB4H_ACK_CFG_HOTMON:
+ case MB4H_ACK_CFG_HOTPERIOD:
+ mb4_transfer.ack.status = readb(PRCM_ACK_MB4_REQUESTS);
+ break;
+ default:
+ print_unknown_header_warning(4, header);
+ do_complete = false;
+ break;
+ }
+
+ writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_ARM_IT1_CLEAR));
+
+ if (do_complete)
+ complete(&mb4_transfer.work);
+
return false;
}
@@ -363,19 +1785,19 @@ static bool read_mailbox_5(void)
print_unknown_header_warning(5, header);
break;
}
- writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR);
+ writel(MBOX_BIT(5), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
return false;
}
static bool read_mailbox_6(void)
{
- writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR);
+ writel(MBOX_BIT(6), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
return false;
}
static bool read_mailbox_7(void)
{
- writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR);
+ writel(MBOX_BIT(7), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
return false;
}
@@ -396,7 +1818,7 @@ static irqreturn_t prcmu_irq_handler(int irq, void *data)
u8 n;
irqreturn_t r;
- bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
+ bits = (readl(_PRCMU_BASE + PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
if (unlikely(!bits))
return IRQ_NONE;
@@ -417,35 +1839,232 @@ static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
return IRQ_HANDLED;
}
+static void prcmu_mask_work(struct work_struct *work)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ config_wakeups();
+
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+}
+
+static void prcmu_irq_mask(struct irq_data *d)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
+
+ mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->irq - IRQ_DB5500_PRCMU_BASE];
+
+ spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
+ schedule_work(&mb0_transfer.mask_work);
+}
+
+static void prcmu_irq_unmask(struct irq_data *d)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
+
+ mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->irq - IRQ_DB5500_PRCMU_BASE];
+
+ spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
+ schedule_work(&mb0_transfer.mask_work);
+}
+
+static void noop(struct irq_data *d)
+{
+}
+
+static struct irq_chip prcmu_irq_chip = {
+ .name = "prcmu",
+ .irq_disable = prcmu_irq_mask,
+ .irq_ack = noop,
+ .irq_mask = prcmu_irq_mask,
+ .irq_unmask = prcmu_irq_unmask,
+};
+
void __init db5500_prcmu_early_init(void)
{
+ unsigned int i;
+ void *tcpm_base = ioremap_nocache(U5500_PRCMU_TCPM_BASE, SZ_4K);
+
+ if (tcpm_base != NULL) {
+ int version_high, version_low;
+
+ version_high = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
+ version_low = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET + 4);
+ prcmu_version.board = (version_high >> 24) & 0xFF;
+ prcmu_version.fw_version = version_high & 0xFF;
+ prcmu_version.api_version = version_low & 0xFF;
+
+ pr_info("PRCMU Firmware Version: 0x%x\n",
+ prcmu_version.fw_version);
+ pr_info("PRCMU API Version: 0x%x\n",
+ prcmu_version.api_version);
+
+ iounmap(tcpm_base);
+ }
+
tcdm_base = __io_address(U5500_PRCMU_TCDM_BASE);
spin_lock_init(&mb0_transfer.lock);
+ spin_lock_init(&mb0_transfer.dbb_irqs_lock);
+ mutex_init(&mb1_transfer.lock);
+ init_completion(&mb1_transfer.work);
+ mutex_init(&mb2_transfer.lock);
+ init_completion(&mb2_transfer.work);
+ mutex_init(&mb3_transfer.sysclk_lock);
+ init_completion(&mb3_transfer.sysclk_work);
+ mutex_init(&mb4_transfer.lock);
+ init_completion(&mb4_transfer.work);
mutex_init(&mb5_transfer.lock);
init_completion(&mb5_transfer.work);
+
+ INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
+
+ /* Initalize irqs. */
+ for (i = 0; i < NUM_DB5500_PRCMU_WAKEUPS; i++) {
+ unsigned int irq;
+
+ irq = IRQ_DB5500_PRCMU_BASE + i;
+ irq_set_chip_and_handler(irq, &prcmu_irq_chip,
+ handle_simple_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+ prcmu_ape_clocks_init();
}
+/*
+ * Power domain switches (ePODs) modeled as regulators for the DB5500 SoC
+ */
+static struct regulator_consumer_supply db5500_vape_consumers[] = {
+ REGULATOR_SUPPLY("v-ape", NULL),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
+ REGULATOR_SUPPLY("vcore", "sdi0"),
+ REGULATOR_SUPPLY("vcore", "sdi1"),
+ REGULATOR_SUPPLY("vcore", "sdi2"),
+ REGULATOR_SUPPLY("vcore", "sdi3"),
+ REGULATOR_SUPPLY("vcore", "sdi4"),
+ REGULATOR_SUPPLY("v-uart", "uart0"),
+ REGULATOR_SUPPLY("v-uart", "uart1"),
+ REGULATOR_SUPPLY("v-uart", "uart2"),
+ REGULATOR_SUPPLY("v-uart", "uart3"),
+ REGULATOR_SUPPLY("v-ape", "db5500-keypad"),
+};
+
+static struct regulator_consumer_supply db5500_sga_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.0"),
+ REGULATOR_SUPPLY("v-mali", NULL),
+};
+
+static struct regulator_consumer_supply db5500_hva_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.1"),
+ REGULATOR_SUPPLY("v-hva", NULL),
+};
+
+static struct regulator_consumer_supply db5500_sia_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.2"),
+ REGULATOR_SUPPLY("v-sia", "mmio_camera"),
+};
+
+static struct regulator_consumer_supply db5500_disp_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.3"),
+ REGULATOR_SUPPLY("vsupply", "b2r2_bus"),
+ REGULATOR_SUPPLY("vsupply", "mcde"),
+};
+
+static struct regulator_consumer_supply db5500_esram12_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.4"),
+ REGULATOR_SUPPLY("v-esram12", "mcde"),
+ REGULATOR_SUPPLY("esram12", "hva"),
+};
+
+#define DB5500_REGULATOR_SWITCH(lower, upper) \
+[DB5500_REGULATOR_SWITCH_##upper] = { \
+ .constraints = { \
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \
+ }, \
+ .consumer_supplies = db5500_##lower##_consumers, \
+ .num_consumer_supplies = ARRAY_SIZE(db5500_##lower##_consumers),\
+}
+
+static struct regulator_init_data db5500_regulators[DB5500_NUM_REGULATORS] = {
+ [DB5500_REGULATOR_VAPE] = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = db5500_vape_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(db5500_vape_consumers),
+ },
+ DB5500_REGULATOR_SWITCH(sga, SGA),
+ DB5500_REGULATOR_SWITCH(hva, HVA),
+ DB5500_REGULATOR_SWITCH(sia, SIA),
+ DB5500_REGULATOR_SWITCH(disp, DISP),
+ DB5500_REGULATOR_SWITCH(esram12, ESRAM12),
+};
+
+static struct mfd_cell db5500_prcmu_devs[] = {
+ {
+ .name = "db5500-prcmu-regulators",
+ .platform_data = &db5500_regulators,
+ .pdata_size = sizeof(db5500_regulators),
+ },
+ {
+ .name = "cpufreq-u5500",
+ },
+};
+
/**
* prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
*
*/
-int __init db5500_prcmu_init(void)
+static int __init db5500_prcmu_probe(struct platform_device *pdev)
{
- int r = 0;
+ int err = 0;
if (ux500_is_svp() || !cpu_is_u5500())
return -ENODEV;
/* Clean up the mailbox interrupts after pre-kernel code. */
- writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
+ writel(ALL_MBOX_BITS, _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
- r = request_threaded_irq(IRQ_DB5500_PRCMU1, prcmu_irq_handler,
- prcmu_irq_thread_fn, 0, "prcmu", NULL);
- if (r < 0) {
+ err = request_threaded_irq(IRQ_DB5500_PRCMU1, prcmu_irq_handler,
+ prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
+ if (err < 0) {
pr_err("prcmu: Failed to allocate IRQ_DB5500_PRCMU1.\n");
- return -EBUSY;
+ err = -EBUSY;
+ goto no_irq_return;
}
- return 0;
+
+ err = mfd_add_devices(&pdev->dev, 0, db5500_prcmu_devs,
+ ARRAY_SIZE(db5500_prcmu_devs), NULL,
+ 0);
+
+ if (err)
+ pr_err("prcmu: Failed to add subdevices\n");
+ else
+ pr_info("DB5500 PRCMU initialized\n");
+
+no_irq_return:
+ return err;
+
+}
+
+static struct platform_driver db5500_prcmu_driver = {
+ .driver = {
+ .name = "db5500-prcmu",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init db5500_prcmu_init(void)
+{
+ return platform_driver_probe(&db5500_prcmu_driver, db5500_prcmu_probe);
}
arch_initcall(db5500_prcmu_init);
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index a25ab9c6b5a..b5603ed2211 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -34,6 +34,8 @@
#include <mach/irqs.h>
#include <mach/db8500-regs.h>
#include <mach/id.h>
+#include <mach/prcmu-debug.h>
+
#include "dbx500-prcmu-regs.h"
/* Offset for the firmware version within the TCPM */
@@ -341,11 +343,13 @@ static struct {
* mb1_transfer - state needed for mailbox 1 communication.
* @lock: The transaction lock.
* @work: The transaction completion structure.
+ * @ape_opp: The current APE OPP.
* @ack: Reply ("acknowledge") data.
*/
static struct {
struct mutex lock;
struct completion work;
+ u8 ape_opp;
struct {
u8 header;
u8 arm_opp;
@@ -420,43 +424,52 @@ static DEFINE_SPINLOCK(gpiocr_lock);
static __iomem void *tcdm_base;
struct clk_mgt {
- unsigned int offset;
+ void __iomem *reg;
u32 pllsw;
+ int branch;
+ bool clk38div;
+};
+
+enum {
+ PLL_RAW,
+ PLL_FIX,
+ PLL_DIV
};
static DEFINE_SPINLOCK(clk_mgt_lock);
-#define CLK_MGT_ENTRY(_name)[PRCMU_##_name] = { (PRCM_##_name##_MGT_OFF), 0 }
+#define CLK_MGT_ENTRY(_name, _branch, _clk38div)[PRCMU_##_name] = \
+ { (PRCM_##_name##_MGT), 0 , _branch, _clk38div}
struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
- CLK_MGT_ENTRY(SGACLK),
- CLK_MGT_ENTRY(UARTCLK),
- CLK_MGT_ENTRY(MSP02CLK),
- CLK_MGT_ENTRY(MSP1CLK),
- CLK_MGT_ENTRY(I2CCLK),
- CLK_MGT_ENTRY(SDMMCCLK),
- CLK_MGT_ENTRY(SLIMCLK),
- CLK_MGT_ENTRY(PER1CLK),
- CLK_MGT_ENTRY(PER2CLK),
- CLK_MGT_ENTRY(PER3CLK),
- CLK_MGT_ENTRY(PER5CLK),
- CLK_MGT_ENTRY(PER6CLK),
- CLK_MGT_ENTRY(PER7CLK),
- CLK_MGT_ENTRY(LCDCLK),
- CLK_MGT_ENTRY(BMLCLK),
- CLK_MGT_ENTRY(HSITXCLK),
- CLK_MGT_ENTRY(HSIRXCLK),
- CLK_MGT_ENTRY(HDMICLK),
- CLK_MGT_ENTRY(APEATCLK),
- CLK_MGT_ENTRY(APETRACECLK),
- CLK_MGT_ENTRY(MCDECLK),
- CLK_MGT_ENTRY(IPI2CCLK),
- CLK_MGT_ENTRY(DSIALTCLK),
- CLK_MGT_ENTRY(DMACLK),
- CLK_MGT_ENTRY(B2R2CLK),
- CLK_MGT_ENTRY(TVCLK),
- CLK_MGT_ENTRY(SSPCLK),
- CLK_MGT_ENTRY(RNGCLK),
- CLK_MGT_ENTRY(UICCCLK),
+ CLK_MGT_ENTRY(SGACLK, PLL_DIV, false),
+ CLK_MGT_ENTRY(UARTCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(MSP02CLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(MSP1CLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(I2CCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(SDMMCCLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(SLIMCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(PER1CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(PER2CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(PER3CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(PER5CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(PER6CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(PER7CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(LCDCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(BMLCLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(HSITXCLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(HSIRXCLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(HDMICLK, PLL_FIX, false),
+ CLK_MGT_ENTRY(APEATCLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(APETRACECLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(MCDECLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(IPI2CCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(DSIALTCLK, PLL_FIX, false),
+ CLK_MGT_ENTRY(DMACLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(B2R2CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(TVCLK, PLL_FIX, false),
+ CLK_MGT_ENTRY(SSPCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(RNGCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(UICCCLK, PLL_FIX, false),
};
static struct regulator *hwacc_regulator[NUM_HW_ACC];
@@ -895,6 +908,8 @@ int db8500_prcmu_set_arm_opp(u8 opp)
mutex_unlock(&mb1_transfer.lock);
+ prcmu_debug_arm_opp_log(opp);
+
return r;
}
@@ -909,23 +924,23 @@ int db8500_prcmu_get_arm_opp(void)
}
/**
- * prcmu_get_ddr_opp - get the current DDR OPP
+ * db8500_prcmu_get_ddr_opp - get the current DDR OPP
*
* Returns: the current DDR OPP
*/
-int prcmu_get_ddr_opp(void)
+int db8500_prcmu_get_ddr_opp(void)
{
return readb(PRCM_DDR_SUBSYS_APE_MINBW);
}
/**
- * set_ddr_opp - set the appropriate DDR OPP
+ * db8500_set_ddr_opp - set the appropriate DDR OPP
* @opp: The new DDR operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
* This function sets the operating point of the DDR.
*/
-int prcmu_set_ddr_opp(u8 opp)
+int db8500_prcmu_set_ddr_opp(u8 opp)
{
if (opp < DDR_100_OPP || opp > DDR_25_OPP)
return -EINVAL;
@@ -935,25 +950,82 @@ int prcmu_set_ddr_opp(u8 opp)
return 0;
}
+
+/* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */
+static void request_even_slower_clocks(bool enable)
+{
+ void __iomem *clock_reg[] = {
+ PRCM_ACLK_MGT,
+ PRCM_DMACLK_MGT
+ };
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ /* Grab the HW semaphore. */
+ while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ for (i = 0; i < ARRAY_SIZE(clock_reg); i++) {
+ u32 val;
+ u32 div;
+
+ val = readl(clock_reg[i]);
+ div = (val & PRCM_CLK_MGT_CLKPLLDIV_MASK);
+ if (enable) {
+ if ((div <= 1) || (div > 15)) {
+ pr_err("prcmu: Bad clock divider %d in %s\n",
+ div, __func__);
+ goto unlock_and_return;
+ }
+ div <<= 1;
+ } else {
+ if (div <= 2)
+ goto unlock_and_return;
+ div >>= 1;
+ }
+ val = ((val & ~PRCM_CLK_MGT_CLKPLLDIV_MASK) |
+ (div & PRCM_CLK_MGT_CLKPLLDIV_MASK));
+ writel(val, clock_reg[i]);
+ }
+
+unlock_and_return:
+ /* Release the HW semaphore. */
+ writel(0, PRCM_SEM);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+}
+
/**
- * set_ape_opp - set the appropriate APE OPP
+ * db8500_set_ape_opp - set the appropriate APE OPP
* @opp: The new APE operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
* This function sets the operating point of the APE.
*/
-int prcmu_set_ape_opp(u8 opp)
+int db8500_prcmu_set_ape_opp(u8 opp)
{
int r = 0;
+ if (opp == mb1_transfer.ape_opp)
+ return 0;
+
mutex_lock(&mb1_transfer.lock);
+ if (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP)
+ request_even_slower_clocks(false);
+
+ if ((opp != APE_100_OPP) && (mb1_transfer.ape_opp != APE_100_OPP))
+ goto skip_message;
+
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
- writeb(opp, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
+ writeb(((opp == APE_50_PARTLY_25_OPP) ? APE_50_OPP : opp),
+ (tcdm_base + PRCM_REQ_MB1_APE_OPP));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
@@ -962,17 +1034,24 @@ int prcmu_set_ape_opp(u8 opp)
(mb1_transfer.ack.ape_opp != opp))
r = -EIO;
+skip_message:
+ if ((!r && (opp == APE_50_PARTLY_25_OPP)) ||
+ (r && (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP)))
+ request_even_slower_clocks(true);
+ if (!r)
+ mb1_transfer.ape_opp = opp;
+
mutex_unlock(&mb1_transfer.lock);
return r;
}
/**
- * prcmu_get_ape_opp - get the current APE OPP
+ * db8500_prcmu_get_ape_opp - get the current APE OPP
*
* Returns: the current APE OPP
*/
-int prcmu_get_ape_opp(void)
+int db8500_prcmu_get_ape_opp(void)
{
return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP);
}
@@ -1375,7 +1454,7 @@ static int request_timclk(bool enable)
return 0;
}
-static int request_reg_clock(u8 clock, bool enable)
+static int request_clock(u8 clock, bool enable)
{
u32 val;
unsigned long flags;
@@ -1386,14 +1465,14 @@ static int request_reg_clock(u8 clock, bool enable)
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
- val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
+ val = readl(clk_mgt[clock].reg);
if (enable) {
val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
} else {
clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
}
- writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
+ writel(val, clk_mgt[clock].reg);
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
@@ -1413,7 +1492,7 @@ static int request_sga_clock(u8 clock, bool enable)
writel(val | PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS);
}
- ret = request_reg_clock(clock, enable);
+ ret = request_clock(clock, enable);
if (!ret && !enable) {
val = readl(PRCM_CGATING_BYPASS);
@@ -1446,10 +1525,228 @@ int db8500_prcmu_request_clock(u8 clock, bool enable)
break;
}
if (clock < PRCMU_NUM_REG_CLOCKS)
- return request_reg_clock(clock, enable);
+ return request_clock(clock, enable);
return -EINVAL;
}
+static unsigned long pll_rate(void __iomem *reg, unsigned long src_rate,
+ int branch)
+{
+ u64 rate;
+ u32 val;
+ u32 d;
+ u32 div = 1;
+
+ val = readl(reg);
+
+ rate = src_rate;
+ rate *= ((val & PRCM_PLL_FREQ_D_MASK) >> PRCM_PLL_FREQ_D_SHIFT);
+
+ d = ((val & PRCM_PLL_FREQ_N_MASK) >> PRCM_PLL_FREQ_N_SHIFT);
+ if (d > 1)
+ div *= d;
+
+ d = ((val & PRCM_PLL_FREQ_R_MASK) >> PRCM_PLL_FREQ_R_SHIFT);
+ if (d > 1)
+ div *= d;
+
+ if (val & PRCM_PLL_FREQ_SELDIV2)
+ div *= 2;
+
+ if ((branch == PLL_FIX) || ((branch == PLL_DIV) &&
+ (val & PRCM_PLL_FREQ_DIV2EN) &&
+ ((reg == PRCM_PLLSOC0_FREQ) ||
+ (reg == PRCM_PLLDDR_FREQ))))
+ div *= 2;
+
+ (void)do_div(rate, div);
+
+ return (unsigned long)rate;
+}
+
+#define ROOT_CLOCK_RATE 38400000
+
+static unsigned long clock_rate(u8 clock)
+{
+ u32 val;
+ u32 pllsw;
+ unsigned long rate = ROOT_CLOCK_RATE;
+
+ val = readl(clk_mgt[clock].reg);
+
+ if (val & PRCM_CLK_MGT_CLK38) {
+ if (clk_mgt[clock].clk38div && (val & PRCM_CLK_MGT_CLK38DIV))
+ rate /= 2;
+ return rate;
+ }
+
+ val |= clk_mgt[clock].pllsw;
+ pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
+
+ if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC0)
+ rate = pll_rate(PRCM_PLLSOC0_FREQ, rate, clk_mgt[clock].branch);
+ else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC1)
+ rate = pll_rate(PRCM_PLLSOC1_FREQ, rate, clk_mgt[clock].branch);
+ else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_DDR)
+ rate = pll_rate(PRCM_PLLDDR_FREQ, rate, clk_mgt[clock].branch);
+ else
+ return 0;
+
+ if ((clock == PRCMU_SGACLK) &&
+ (val & PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN)) {
+ u64 r = (rate * 10);
+
+ (void)do_div(r, 25);
+ return (unsigned long)r;
+ }
+ val &= PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ if (val)
+ return rate / val;
+ else
+ return 0;
+}
+
+unsigned long prcmu_clock_rate(u8 clock)
+{
+ if (clock < PRCMU_NUM_REG_CLOCKS)
+ return clock_rate(clock);
+ else if (clock == PRCMU_TIMCLK)
+ return ROOT_CLOCK_RATE / 16;
+ else if (clock == PRCMU_SYSCLK)
+ return ROOT_CLOCK_RATE;
+ else if (clock == PRCMU_PLLSOC0)
+ return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, PLL_RAW);
+ else if (clock == PRCMU_PLLSOC1)
+ return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, PLL_RAW);
+ else if (clock == PRCMU_PLLDDR)
+ return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, PLL_RAW);
+ else
+ return 0;
+}
+
+static unsigned long clock_source_rate(u32 clk_mgt_val, int branch)
+{
+ if (clk_mgt_val & PRCM_CLK_MGT_CLK38)
+ return ROOT_CLOCK_RATE;
+ clk_mgt_val &= PRCM_CLK_MGT_CLKPLLSW_MASK;
+ if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC0)
+ return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, branch);
+ else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC1)
+ return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, branch);
+ else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_DDR)
+ return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, branch);
+ else
+ return 0;
+}
+
+static u32 clock_divider(unsigned long src_rate, unsigned long rate)
+{
+ u32 div;
+
+ div = (src_rate / rate);
+ if (div == 0)
+ return 1;
+ if (rate < (src_rate / div))
+ div++;
+ if (div > 31)
+ div = 31;
+ return div;
+}
+
+static long round_clock_rate(u8 clock, unsigned long rate)
+{
+ u32 val;
+ u32 div;
+ unsigned long src_rate;
+ long rounded_rate;
+
+ val = readl(clk_mgt[clock].reg);
+ src_rate = clock_source_rate((val | clk_mgt[clock].pllsw),
+ clk_mgt[clock].branch);
+ div = clock_divider(src_rate, rate);
+ if (val & PRCM_CLK_MGT_CLK38) {
+ if (clk_mgt[clock].clk38div) {
+ if (div > 2)
+ div = 2;
+ } else {
+ div = 1;
+ }
+ } else if ((clock == PRCMU_SGACLK) && (div == 3)) {
+ u64 r = (src_rate * 10);
+
+ (void)do_div(r, 25);
+ if (r <= rate)
+ return (unsigned long)r;
+ }
+ rounded_rate = (src_rate / div);
+
+ return rounded_rate;
+}
+
+long prcmu_round_clock_rate(u8 clock, unsigned long rate)
+{
+ if (clock < PRCMU_NUM_REG_CLOCKS)
+ return round_clock_rate(clock, rate);
+ else
+ return (long)prcmu_clock_rate(clock);
+}
+
+static void set_clock_rate(u8 clock, unsigned long rate)
+{
+ u32 val;
+ u32 div;
+ unsigned long src_rate;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ /* Grab the HW semaphore. */
+ while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ val = readl(clk_mgt[clock].reg);
+ src_rate = clock_source_rate((val | clk_mgt[clock].pllsw),
+ clk_mgt[clock].branch);
+ div = clock_divider(src_rate, rate);
+ if (val & PRCM_CLK_MGT_CLK38) {
+ if (clk_mgt[clock].clk38div) {
+ if (div > 1)
+ val |= PRCM_CLK_MGT_CLK38DIV;
+ else
+ val &= ~PRCM_CLK_MGT_CLK38DIV;
+ }
+ } else if (clock == PRCMU_SGACLK) {
+ val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK |
+ PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN);
+ if (div == 3) {
+ u64 r = (src_rate * 10);
+
+ (void)do_div(r, 25);
+ if (r <= rate) {
+ val |= PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN;
+ div = 0;
+ }
+ }
+ val |= div;
+ } else {
+ val &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ val |= div;
+ }
+ writel(val, clk_mgt[clock].reg);
+
+ /* Release the HW semaphore. */
+ writel(0, PRCM_SEM);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+}
+
+int prcmu_set_clock_rate(u8 clock, unsigned long rate)
+{
+ if (clock < PRCMU_NUM_REG_CLOCKS)
+ set_clock_rate(clock, rate);
+ return 0;
+}
+
int db8500_prcmu_config_esram0_deep_sleep(u8 state)
{
if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
@@ -1476,7 +1773,7 @@ int db8500_prcmu_config_esram0_deep_sleep(u8 state)
return 0;
}
-int prcmu_config_hotdog(u8 threshold)
+int db8500_prcmu_config_hotdog(u8 threshold)
{
mutex_lock(&mb4_transfer.lock);
@@ -1494,7 +1791,7 @@ int prcmu_config_hotdog(u8 threshold)
return 0;
}
-int prcmu_config_hotmon(u8 low, u8 high)
+int db8500_prcmu_config_hotmon(u8 low, u8 high)
{
mutex_lock(&mb4_transfer.lock);
@@ -1533,7 +1830,7 @@ static int config_hot_period(u16 val)
return 0;
}
-int prcmu_start_temp_sense(u16 cycles32k)
+int db8500_prcmu_start_temp_sense(u16 cycles32k)
{
if (cycles32k == 0xFFFF)
return -EINVAL;
@@ -1541,7 +1838,7 @@ int prcmu_start_temp_sense(u16 cycles32k)
return config_hot_period(cycles32k);
}
-int prcmu_stop_temp_sense(void)
+int db8500_prcmu_stop_temp_sense(void)
{
return config_hot_period(0xFFFF);
}
@@ -1596,16 +1893,8 @@ int prcmu_kick_a9wdog(u8 id)
/*
* timeout is 28 bit, in ms.
*/
-#define MAX_WATCHDOG_TIMEOUT 131000
int prcmu_load_a9wdog(u8 id, u32 timeout)
{
- if (timeout > MAX_WATCHDOG_TIMEOUT)
- /*
- * Due to calculation bug in prcmu fw, timeouts
- * can't be bigger than 131 seconds.
- */
- return -EINVAL;
-
return prcmu_a9wdog(MB4H_A9WDOG_LOAD,
(id & A9WDOG_ID_MASK) |
/*
@@ -1619,41 +1908,6 @@ int prcmu_load_a9wdog(u8 id, u32 timeout)
}
/**
- * prcmu_set_clock_divider() - Configure the clock divider.
- * @clock: The clock for which the request is made.
- * @divider: The clock divider. (< 32)
- *
- * This function should only be used by the clock implementation.
- * Do not use it from any other place!
- */
-int prcmu_set_clock_divider(u8 clock, u8 divider)
-{
- u32 val;
- unsigned long flags;
-
- if ((clock >= PRCMU_NUM_REG_CLOCKS) || (divider < 1) || (31 < divider))
- return -EINVAL;
-
- spin_lock_irqsave(&clk_mgt_lock, flags);
-
- /* Grab the HW semaphore. */
- while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
- cpu_relax();
-
- val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
- val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK);
- val |= (u32)divider;
- writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
-
- /* Release the HW semaphore. */
- writel(0, PRCM_SEM);
-
- spin_unlock_irqrestore(&clk_mgt_lock, flags);
-
- return 0;
-}
-
-/**
* prcmu_abb_read() - Read register value(s) from the ABB.
* @slave: The I2C slave address.
* @reg: The (start) register address.
@@ -2102,10 +2356,7 @@ static struct irq_chip prcmu_irq_chip = {
void __init db8500_prcmu_early_init(void)
{
unsigned int i;
-
- if (cpu_is_u8500v1()) {
- tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1);
- } else if (cpu_is_u8500v2()) {
+ if (cpu_is_u8500v2()) {
void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
if (tcpm_base != NULL) {
@@ -2133,6 +2384,7 @@ void __init db8500_prcmu_early_init(void)
init_completion(&mb0_transfer.ac_wake_work);
mutex_init(&mb1_transfer.lock);
init_completion(&mb1_transfer.work);
+ mb1_transfer.ape_opp = APE_NO_CHANGE;
mutex_init(&mb2_transfer.lock);
init_completion(&mb2_transfer.work);
spin_lock_init(&mb2_transfer.auto_pm_lock);
@@ -2184,24 +2436,20 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
REGULATOR_SUPPLY("vcore", "sdi4"),
REGULATOR_SUPPLY("v-dma", "dma40.0"),
REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"),
- /* "v-uart" changed to "vcore" in the mainline kernel */
- REGULATOR_SUPPLY("vcore", "uart0"),
- REGULATOR_SUPPLY("vcore", "uart1"),
- REGULATOR_SUPPLY("vcore", "uart2"),
+ REGULATOR_SUPPLY("v-uart", "uart0"),
+ REGULATOR_SUPPLY("v-uart", "uart1"),
+ REGULATOR_SUPPLY("v-uart", "uart2"),
REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
};
static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
- /* CG2900 and CW1200 power to off-chip peripherals */
- REGULATOR_SUPPLY("gbf_1v8", "cg2900-uart.0"),
- REGULATOR_SUPPLY("wlan_1v8", "cw1200.0"),
REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"),
/* AV8100 regulator */
REGULATOR_SUPPLY("hdmi_1v8", "0-0070"),
};
static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = {
- REGULATOR_SUPPLY("vsupply", "b2r2.0"),
+ REGULATOR_SUPPLY("vsupply", "b2r2_bus"),
REGULATOR_SUPPLY("vsupply", "mcde"),
};
@@ -2238,6 +2486,7 @@ static struct regulator_consumer_supply db8500_esram12_consumers[] = {
static struct regulator_consumer_supply db8500_esram34_consumers[] = {
REGULATOR_SUPPLY("v-esram34", "mcde"),
REGULATOR_SUPPLY("esram34", "cm_control"),
+ REGULATOR_SUPPLY("lcla_esram", "dma40.0"),
};
static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
diff --git a/drivers/mfd/dbx500-prcmu-regs.h b/drivers/mfd/dbx500-prcmu-regs.h
index ec22e9f15d3..2da628d11db 100644
--- a/drivers/mfd/dbx500-prcmu-regs.h
+++ b/drivers/mfd/dbx500-prcmu-regs.h
@@ -17,41 +17,41 @@
#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end))
-#define PRCM_SVACLK_MGT_OFF 0x008
-#define PRCM_SIACLK_MGT_OFF 0x00C
-#define PRCM_SGACLK_MGT_OFF 0x014
-#define PRCM_UARTCLK_MGT_OFF 0x018
-#define PRCM_MSP02CLK_MGT_OFF 0x01C
-#define PRCM_I2CCLK_MGT_OFF 0x020
-#define PRCM_SDMMCCLK_MGT_OFF 0x024
-#define PRCM_SLIMCLK_MGT_OFF 0x028
-#define PRCM_PER1CLK_MGT_OFF 0x02C
-#define PRCM_PER2CLK_MGT_OFF 0x030
-#define PRCM_PER3CLK_MGT_OFF 0x034
-#define PRCM_PER5CLK_MGT_OFF 0x038
-#define PRCM_PER6CLK_MGT_OFF 0x03C
-#define PRCM_PER7CLK_MGT_OFF 0x040
-#define PRCM_PWMCLK_MGT_OFF 0x044 /* for DB5500 */
-#define PRCM_IRDACLK_MGT_OFF 0x048 /* for DB5500 */
-#define PRCM_IRRCCLK_MGT_OFF 0x04C /* for DB5500 */
-#define PRCM_LCDCLK_MGT_OFF 0x044
-#define PRCM_BMLCLK_MGT_OFF 0x04C
-#define PRCM_HSITXCLK_MGT_OFF 0x050
-#define PRCM_HSIRXCLK_MGT_OFF 0x054
-#define PRCM_HDMICLK_MGT_OFF 0x058
-#define PRCM_APEATCLK_MGT_OFF 0x05C
-#define PRCM_APETRACECLK_MGT_OFF 0x060
-#define PRCM_MCDECLK_MGT_OFF 0x064
-#define PRCM_IPI2CCLK_MGT_OFF 0x068
-#define PRCM_DSIALTCLK_MGT_OFF 0x06C
-#define PRCM_DMACLK_MGT_OFF 0x074
-#define PRCM_B2R2CLK_MGT_OFF 0x078
-#define PRCM_TVCLK_MGT_OFF 0x07C
-#define PRCM_UNIPROCLK_MGT_OFF 0x278
-#define PRCM_SSPCLK_MGT_OFF 0x280
-#define PRCM_RNGCLK_MGT_OFF 0x284
-#define PRCM_UICCCLK_MGT_OFF 0x27C
-#define PRCM_MSP1CLK_MGT_OFF 0x288
+#define PRCM_CLK_MGT(_offset) (void __iomem *)(IO_ADDRESS(U8500_PRCMU_BASE) \
+ + _offset)
+#define PRCM_ACLK_MGT PRCM_CLK_MGT(0x004)
+#define PRCM_SVACLK_MGT PRCM_CLK_MGT(0x008)
+#define PRCM_SIACLK_MGT PRCM_CLK_MGT(0x00C)
+#define PRCM_SGACLK_MGT PRCM_CLK_MGT(0x014)
+#define PRCM_UARTCLK_MGT PRCM_CLK_MGT(0x018)
+#define PRCM_MSP02CLK_MGT PRCM_CLK_MGT(0x01C)
+#define PRCM_I2CCLK_MGT PRCM_CLK_MGT(0x020)
+#define PRCM_SDMMCCLK_MGT PRCM_CLK_MGT(0x024)
+#define PRCM_SLIMCLK_MGT PRCM_CLK_MGT(0x028)
+#define PRCM_PER1CLK_MGT PRCM_CLK_MGT(0x02C)
+#define PRCM_PER2CLK_MGT PRCM_CLK_MGT(0x030)
+#define PRCM_PER3CLK_MGT PRCM_CLK_MGT(0x034)
+#define PRCM_PER5CLK_MGT PRCM_CLK_MGT(0x038)
+#define PRCM_PER6CLK_MGT PRCM_CLK_MGT(0x03C)
+#define PRCM_PER7CLK_MGT PRCM_CLK_MGT(0x040)
+#define PRCM_LCDCLK_MGT PRCM_CLK_MGT(0x044)
+#define PRCM_BMLCLK_MGT PRCM_CLK_MGT(0x04C)
+#define PRCM_HSITXCLK_MGT PRCM_CLK_MGT(0x050)
+#define PRCM_HSIRXCLK_MGT PRCM_CLK_MGT(0x054)
+#define PRCM_HDMICLK_MGT PRCM_CLK_MGT(0x058)
+#define PRCM_APEATCLK_MGT PRCM_CLK_MGT(0x05C)
+#define PRCM_APETRACECLK_MGT PRCM_CLK_MGT(0x060)
+#define PRCM_MCDECLK_MGT PRCM_CLK_MGT(0x064)
+#define PRCM_IPI2CCLK_MGT PRCM_CLK_MGT(0x068)
+#define PRCM_DSIALTCLK_MGT PRCM_CLK_MGT(0x06C)
+#define PRCM_DMACLK_MGT PRCM_CLK_MGT(0x074)
+#define PRCM_B2R2CLK_MGT PRCM_CLK_MGT(0x078)
+#define PRCM_TVCLK_MGT PRCM_CLK_MGT(0x07C)
+#define PRCM_UNIPROCLK_MGT PRCM_CLK_MGT(0x278)
+#define PRCM_SSPCLK_MGT PRCM_CLK_MGT(0x280)
+#define PRCM_RNGCLK_MGT PRCM_CLK_MGT(0x284)
+#define PRCM_UICCCLK_MGT PRCM_CLK_MGT(0x27C)
+#define PRCM_MSP1CLK_MGT PRCM_CLK_MGT(0x288)
#define PRCM_ARM_PLLDIVPS (_PRCMU_BASE + 0x118)
#define PRCM_ARM_PLLDIVPS_ARM_BRM_RATE 0x3f
@@ -132,13 +132,21 @@
#define PRCM_MMIP_LS_CLAMP_CLR (_PRCMU_BASE + 0x424)
/* PRCMU clock/PLL/reset registers */
+#define PRCM_PLLSOC0_FREQ (_PRCMU_BASE + 0x080)
+#define PRCM_PLLSOC1_FREQ (_PRCMU_BASE + 0x084)
+#define PRCM_PLLDDR_FREQ (_PRCMU_BASE + 0x08C)
+#define PRCM_PLL_FREQ_D_SHIFT 0
+#define PRCM_PLL_FREQ_D_MASK BITS(0, 7)
+#define PRCM_PLL_FREQ_N_SHIFT 8
+#define PRCM_PLL_FREQ_N_MASK BITS(8, 13)
+#define PRCM_PLL_FREQ_R_SHIFT 16
+#define PRCM_PLL_FREQ_R_MASK BITS(16, 18)
+#define PRCM_PLL_FREQ_SELDIV2 BIT(24)
+#define PRCM_PLL_FREQ_DIV2EN BIT(25)
+
#define PRCM_PLLDSI_FREQ (_PRCMU_BASE + 0x500)
#define PRCM_PLLDSI_ENABLE (_PRCMU_BASE + 0x504)
#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508)
-#define PRCM_LCDCLK_MGT (_PRCMU_BASE + PRCM_LCDCLK_MGT_OFF)
-#define PRCM_MCDECLK_MGT (_PRCMU_BASE + PRCM_MCDECLK_MGT_OFF)
-#define PRCM_HDMICLK_MGT (_PRCMU_BASE + PRCM_HDMICLK_MGT_OFF)
-#define PRCM_TVCLK_MGT (_PRCMU_BASE + PRCM_TVCLK_MGT_OFF)
#define PRCM_DSI_PLLOUT_SEL (_PRCMU_BASE + 0x530)
#define PRCM_DSITVCLK_DIV (_PRCMU_BASE + 0x52C)
#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508)
@@ -183,9 +191,15 @@
#define PRCM_CLKOCR_CLKOSEL1_MASK BITS(22, 24)
#define PRCM_CLKOCR_CLK1TYPE BIT(28)
-#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4)
-#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7)
-#define PRCM_CLK_MGT_CLKEN BIT(8)
+#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4)
+#define PRCM_CLK_MGT_CLKPLLSW_SOC0 BIT(5)
+#define PRCM_CLK_MGT_CLKPLLSW_SOC1 BIT(6)
+#define PRCM_CLK_MGT_CLKPLLSW_DDR BIT(7)
+#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7)
+#define PRCM_CLK_MGT_CLKEN BIT(8)
+#define PRCM_CLK_MGT_CLK38 BIT(9)
+#define PRCM_CLK_MGT_CLK38DIV BIT(11)
+#define PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN BIT(12)
/* GPIOCR register */
#define PRCM_GPIOCR_SPI2_SELECT BIT(23)
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 2963689cf45..f9abf20975b 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -674,7 +674,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
ret = stmpe_block_read(stmpe, israddr, num, isr);
if (ret < 0)
return IRQ_NONE;
-
+back:
for (i = 0; i < num; i++) {
int bank = num - i - 1;
u8 status = isr[i];
@@ -696,6 +696,22 @@ static irqreturn_t stmpe_irq(int irq, void *data)
stmpe_reg_write(stmpe, israddr + i, clear);
}
+ /*
+ It may happen that on the first status read interrupt
+ sources may not showup, so read one more time.
+ */
+ ret = stmpe_block_read(stmpe, israddr, num, isr);
+ if (ret >= 0) {
+ for (i = 0; i < num; i++) {
+ int bank = num - i - 1;
+ u8 status = isr[i];
+
+ status &= stmpe->ier[bank];
+ if (status)
+ goto back;
+ }
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/mfd/tc35892.c b/drivers/mfd/tc35892.c
new file mode 100644
index 00000000000..91211f29623
--- /dev/null
+++ b/drivers/mfd/tc35892.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tc35892.h>
+
+#define TC35892_CLKMODE_MODCTL_SLEEP 0x0
+#define TC35892_CLKMODE_MODCTL_OPERATION (1 << 0)
+
+/**
+ * tc35892_reg_read() - read a single TC35892 register
+ * @tc35892: Device to read from
+ * @reg: Register to read
+ */
+int tc35892_reg_read(struct tc35892 *tc35892, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(tc35892->i2c, reg);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to read reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_reg_read);
+
+/**
+ * tc35892_reg_read() - write a single TC35892 register
+ * @tc35892: Device to write to
+ * @reg: Register to read
+ * @data: Value to write
+ */
+int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(tc35892->i2c, reg, data);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to write reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_reg_write);
+
+/**
+ * tc35892_block_read() - read multiple TC35892 registers
+ * @tc35892: Device to read from
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Buffer to write to
+ */
+int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length, u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(tc35892->i2c, reg, length, values);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to read regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_block_read);
+
+/**
+ * tc35892_block_write() - write multiple TC35892 registers
+ * @tc35892: Device to write to
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Values to write
+ */
+int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length,
+ const u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(tc35892->i2c, reg, length,
+ values);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to write regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_block_write);
+
+/**
+ * tc35892_set_bits() - set the value of a bitfield in a TC35892 register
+ * @tc35892: Device to write to
+ * @reg: Register to write
+ * @mask: Mask of bits to set
+ * @values: Value to set
+ */
+int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val)
+{
+ int ret;
+
+ mutex_lock(&tc35892->lock);
+
+ ret = tc35892_reg_read(tc35892, reg);
+ if (ret < 0)
+ goto out;
+
+ ret &= ~mask;
+ ret |= val;
+
+ ret = tc35892_reg_write(tc35892, reg, ret);
+
+out:
+ mutex_unlock(&tc35892->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_set_bits);
+
+static struct resource gpio_resources[] = {
+ {
+ .start = TC35892_INT_GPIIRQ,
+ .end = TC35892_INT_GPIIRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell tc35892_devs[] = {
+ {
+ .name = "tc35892-gpio",
+ .num_resources = ARRAY_SIZE(gpio_resources),
+ .resources = &gpio_resources[0],
+ },
+};
+
+static irqreturn_t tc35892_irq(int irq, void *data)
+{
+ struct tc35892 *tc35892 = data;
+ int status;
+
+again:
+ status = tc35892_reg_read(tc35892, TC35892_IRQST);
+ if (status < 0)
+ return IRQ_NONE;
+
+ while (status) {
+ int bit = __ffs(status);
+
+ handle_nested_irq(tc35892->irq_base + bit);
+ status &= ~(1 << bit);
+ }
+
+ /*
+ * A dummy read or write (to any register) appears to be necessary to
+ * have the last interrupt clear (for example, GPIO IC write) take
+ * effect. In such a case, recheck for any interrupt which is still
+ * pending.
+ */
+ status = tc35892_reg_read(tc35892, TC35892_IRQST);
+ if (status)
+ goto again;
+
+ return IRQ_HANDLED;
+}
+
+static void tc35892_irq_dummy(unsigned int irq)
+{
+ /* No mask/unmask at this level */
+}
+
+static struct irq_chip tc35892_irq_chip = {
+ .name = "tc35892",
+ .irq_mask = tc35892_irq_dummy,
+ .irq_unmask = tc35892_irq_dummy,
+};
+
+static int tc35892_irq_init(struct tc35892 *tc35892)
+{
+ int base = tc35892->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
+ irq_set_chip_data(irq, tc35892);
+ irq_set_chip_and_handler(irq, &tc35892_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void tc35892_irq_remove(struct tc35892 *tc35892)
+{
+ int base = tc35892->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+ }
+}
+
+static int tc35892_chip_init(struct tc35892 *tc35892)
+{
+ int manf, ver, ret;
+
+ manf = tc35892_reg_read(tc35892, TC35892_MANFCODE);
+ if (manf < 0)
+ return manf;
+
+ ver = tc35892_reg_read(tc35892, TC35892_VERSION);
+ if (ver < 0)
+ return ver;
+
+ if (manf != TC35892_MANFCODE_MAGIC) {
+ dev_err(tc35892->dev, "unknown manufacturer: %#x\n", manf);
+ return -EINVAL;
+ }
+
+ dev_info(tc35892->dev, "manufacturer: %#x, version: %#x\n", manf, ver);
+
+ /*
+ * Put everything except the IRQ module into reset;
+ * also spare the GPIO module for any pin initialization
+ * done during pre-kernel boot
+ */
+ ret = tc35892_reg_write(tc35892, TC35892_RSTCTRL,
+ TC35892_RSTCTRL_TIMRST
+ | TC35892_RSTCTRL_ROTRST
+ | TC35892_RSTCTRL_KBDRST);
+ if (ret < 0)
+ return ret;
+
+ /* Clear the reset interrupt. */
+ return tc35892_reg_write(tc35892, TC35892_RSTINTCLR, 0x1);
+}
+
+static int __devinit tc35892_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tc35892_platform_data *pdata = i2c->dev.platform_data;
+ struct tc35892 *tc35892;
+ int ret;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EIO;
+
+ tc35892 = kzalloc(sizeof(struct tc35892), GFP_KERNEL);
+ if (!tc35892)
+ return -ENOMEM;
+
+ mutex_init(&tc35892->lock);
+
+ tc35892->dev = &i2c->dev;
+ tc35892->i2c = i2c;
+ tc35892->pdata = pdata;
+ tc35892->irq_base = pdata->irq_base;
+ tc35892->num_gpio = id->driver_data;
+
+ i2c_set_clientdata(i2c, tc35892);
+
+ ret = tc35892_chip_init(tc35892);
+ if (ret)
+ goto out_free;
+
+ ret = tc35892_irq_init(tc35892);
+ if (ret)
+ goto out_free;
+
+ ret = request_threaded_irq(tc35892->i2c->irq, NULL, tc35892_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "tc35892", tc35892);
+ if (ret) {
+ dev_err(tc35892->dev, "failed to request IRQ: %d\n", ret);
+ goto out_removeirq;
+ }
+
+ ret = mfd_add_devices(tc35892->dev, -1, tc35892_devs,
+ ARRAY_SIZE(tc35892_devs), NULL,
+ tc35892->irq_base);
+ if (ret) {
+ dev_err(tc35892->dev, "failed to add children\n");
+ goto out_freeirq;
+ }
+
+ return 0;
+
+out_freeirq:
+ free_irq(tc35892->i2c->irq, tc35892);
+out_removeirq:
+ tc35892_irq_remove(tc35892);
+out_free:
+ kfree(tc35892);
+ return ret;
+}
+
+static int __devexit tc35892_remove(struct i2c_client *client)
+{
+ struct tc35892 *tc35892 = i2c_get_clientdata(client);
+
+ mfd_remove_devices(tc35892->dev);
+
+ free_irq(tc35892->i2c->irq, tc35892);
+ tc35892_irq_remove(tc35892);
+
+ kfree(tc35892);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static u32 sleep_regs[] = {
+ TC35892_IOPC0_L,
+ TC35892_IOPC0_H,
+ TC35892_IOPC1_L,
+ TC35892_IOPC1_H,
+ TC35892_IOPC2_L,
+ TC35892_IOPC2_H,
+ TC35892_DRIVE0_L,
+ TC35892_DRIVE0_H,
+ TC35892_DRIVE1_L,
+ TC35892_DRIVE1_H,
+ TC35892_DRIVE2_L,
+ TC35892_DRIVE2_H,
+ TC35892_DRIVE3,
+ TC35892_GPIODATA0,
+ TC35892_GPIOMASK0,
+ TC35892_GPIODATA1,
+ TC35892_GPIOMASK1,
+ TC35892_GPIODATA2,
+ TC35892_GPIOMASK2,
+ TC35892_GPIODIR0,
+ TC35892_GPIODIR1,
+ TC35892_GPIODIR2,
+ TC35892_GPIOIE0,
+ TC35892_GPIOIE1,
+ TC35892_GPIOIE2,
+ TC35892_RSTCTRL,
+ TC35892_CLKCFG,
+};
+
+static u8 sleep_regs_val[] = {
+ 0x00, /* TC35892_IOPC0_L */
+ 0x00, /* TC35892_IOPC0_H */
+ 0x00, /* TC35892_IOPC1_L */
+ 0x00, /* TC35892_IOPC1_H */
+ 0x00, /* TC35892_IOPC2_L */
+ 0x00, /* TC35892_IOPC2_H */
+ 0xff, /* TC35892_DRIVE0_L */
+ 0xff, /* TC35892_DRIVE0_H */
+ 0xff, /* TC35892_DRIVE1_L */
+ 0xff, /* TC35892_DRIVE1_H */
+ 0xff, /* TC35892_DRIVE2_L */
+ 0xff, /* TC35892_DRIVE2_H */
+ 0x0f, /* TC35892_DRIVE3 */
+ 0x80, /* TC35892_GPIODATA0 */
+ 0x80, /* TC35892_GPIOMASK0 */
+ 0x80, /* TC35892_GPIODATA1 */
+ 0x80, /* TC35892_GPIOMASK1 */
+ 0x06, /* TC35892_GPIODATA2 */
+ 0x06, /* TC35892_GPIOMASK2 */
+ 0xf0, /* TC35892_GPIODIR0 */
+ 0xe0, /* TC35892_GPIODIR1 */
+ 0xee, /* TC35892_GPIODIR2 */
+ 0x0f, /* TC35892_GPIOIE0 */
+ 0x1f, /* TC35892_GPIOIE1 */
+ 0x11, /* TC35892_GPIOIE2 */
+ 0x0f, /* TC35892_RSTCTRL */
+ 0xb0 /* TC35892_CLKCFG */
+
+};
+
+static u8 sleep_regs_backup[ARRAY_SIZE(sleep_regs)];
+
+static int tc35892_suspend(struct device *dev)
+{
+ struct tc35892 *tc35892 = dev_get_drvdata(dev);
+ struct i2c_client *client = tc35892->i2c;
+ int ret = 0;
+ int i, j;
+ int val;
+
+ /* Put the system to sleep mode */
+ if (!device_may_wakeup(&client->dev)) {
+ for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) {
+ val = tc35892_reg_read(tc35892,
+ sleep_regs[i]);
+ if (val < 0)
+ goto out;
+
+ sleep_regs_backup[i] = (u8) (val & 0xff);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) {
+ ret = tc35892_reg_write(tc35892,
+ sleep_regs[i],
+ sleep_regs_val[i]);
+ if (ret < 0)
+ goto fail;
+
+ }
+
+ ret = tc35892_reg_write(tc35892,
+ TC35892_CLKMODE,
+ TC35892_CLKMODE_MODCTL_SLEEP);
+ }
+out:
+ return ret;
+fail:
+ for (j = 0; j <= i; j++) {
+ ret = tc35892_reg_write(tc35892,
+ sleep_regs[i],
+ sleep_regs_backup[i]);
+ if (ret < 0)
+ break;
+ }
+ return ret;
+}
+
+static int tc35892_resume(struct device *dev)
+{
+ struct tc35892 *tc35892 = dev_get_drvdata(dev);
+ struct i2c_client *client = tc35892->i2c;
+ int ret = 0;
+ int i;
+
+ /* Enable the system into operation */
+ if (!device_may_wakeup(&client->dev))
+ {
+ ret = tc35892_reg_write(tc35892,
+ TC35892_CLKMODE,
+ TC35892_CLKMODE_MODCTL_OPERATION);
+ if (ret < 0)
+ goto out;
+
+ for (i = ARRAY_SIZE(sleep_regs) - 1; i >= 0; i--) {
+ ret = tc35892_reg_write(tc35892,
+ sleep_regs[i],
+ sleep_regs_backup[i]);
+ /* Not much to do here if we fail */
+ if (ret < 0)
+ break;
+ }
+ }
+out:
+ return ret;
+}
+
+static const struct dev_pm_ops tc35892_dev_pm_ops = {
+ .suspend = tc35892_suspend,
+ .resume = tc35892_resume,
+};
+#endif
+
+static const struct i2c_device_id tc35892_id[] = {
+ { "tc35892", 24 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc35892_id);
+
+static struct i2c_driver tc35892_driver = {
+ .driver.name = "tc35892",
+ .driver.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .driver.pm = &tc35892_dev_pm_ops,
+#endif
+ .probe = tc35892_probe,
+ .remove = __devexit_p(tc35892_remove),
+ .id_table = tc35892_id,
+};
+
+static int __init tc35892_init(void)
+{
+ return i2c_add_driver(&tc35892_driver);
+}
+subsys_initcall(tc35892_init);
+
+static void __exit tc35892_exit(void)
+{
+ i2c_del_driver(&tc35892_driver);
+}
+module_exit(tc35892_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TC35892 MFD core driver");
+MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index de979742c6f..0e79fe2d214 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -358,16 +358,114 @@ static int __devexit tc3589x_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
+
+static u32 sleep_regs[] = {
+ TC3589x_IOPC0_L,
+ TC3589x_IOPC0_H,
+ TC3589x_IOPC1_L,
+ TC3589x_IOPC1_H,
+ TC3589x_IOPC2_L,
+ TC3589x_IOPC2_H,
+ TC3589x_DRIVE0_L,
+ TC3589x_DRIVE0_H,
+ TC3589x_DRIVE1_L,
+ TC3589x_DRIVE1_H,
+ TC3589x_DRIVE2_L,
+ TC3589x_DRIVE2_H,
+ TC3589x_DRIVE3,
+ TC3589x_GPIODATA0,
+ TC3589x_GPIOMASK0,
+ TC3589x_GPIODATA1,
+ TC3589x_GPIOMASK1,
+ TC3589x_GPIODATA2,
+ TC3589x_GPIOMASK2,
+ TC3589x_GPIODIR0,
+ TC3589x_GPIODIR1,
+ TC3589x_GPIODIR2,
+ TC3589x_GPIOIE0,
+ TC3589x_GPIOIE1,
+ TC3589x_GPIOIE2,
+ TC3589x_RSTCTRL,
+ TC3589x_CLKCFG,
+};
+
+static u8 sleep_regs_val[] = {
+ 0x00, /* TC3589x_IOPC0_L */
+ 0x00, /* TC3589x_IOPC0_H */
+ 0x00, /* TC3589x_IOPC1_L */
+ 0x00, /* TC3589x_IOPC1_H */
+ 0x00, /* TC3589x_IOPC2_L */
+ 0x00, /* TC3589x_IOPC2_H */
+ 0xff, /* TC3589x_DRIVE0_L */
+ 0xff, /* TC3589x_DRIVE0_H */
+ 0xff, /* TC3589x_DRIVE1_L */
+ 0xff, /* TC3589x_DRIVE1_H */
+ 0xff, /* TC3589x_DRIVE2_L */
+ 0xff, /* TC3589x_DRIVE2_H */
+ 0x0f, /* TC3589x_DRIVE3 */
+ 0x80, /* TC3589x_GPIODATA0 */
+ 0x80, /* TC3589x_GPIOMASK0 */
+ 0x80, /* TC3589x_GPIODATA1 */
+ 0x80, /* TC3589x_GPIOMASK1 */
+ 0x06, /* TC3589x_GPIODATA2 */
+ 0x06, /* TC3589x_GPIOMASK2 */
+ 0xf0, /* TC3589x_GPIODIR0 */
+ 0xe0, /* TC3589x_GPIODIR1 */
+ 0xee, /* TC3589x_GPIODIR2 */
+ 0x0f, /* TC3589x_GPIOIE0 */
+ 0x1f, /* TC3589x_GPIOIE1 */
+ 0x11, /* TC3589x_GPIOIE2 */
+ 0x0f, /* TC3589x_RSTCTRL */
+ 0xb0 /* TC3589x_CLKCFG */
+
+};
+
+static u8 sleep_regs_backup[ARRAY_SIZE(sleep_regs)];
+
static int tc3589x_suspend(struct device *dev)
{
struct tc3589x *tc3589x = dev_get_drvdata(dev);
struct i2c_client *client = tc3589x->i2c;
int ret = 0;
+ int i, j;
+ int val;
+
+ /* Put the system to sleep mode */
+ if (!device_may_wakeup(&client->dev)) {
+ for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) {
+ val = tc3589x_reg_read(tc3589x,
+ sleep_regs[i]);
+ if (val < 0)
+ goto out;
+
+ sleep_regs_backup[i] = (u8) (val & 0xff);
+ }
- /* put the system to sleep mode */
- if (!device_may_wakeup(&client->dev))
- ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE,
- TC3589x_CLKMODE_MODCTL_SLEEP);
+ for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) {
+ ret = tc3589x_reg_write(tc3589x,
+ sleep_regs[i],
+ sleep_regs_val[i]);
+ if (ret < 0)
+ goto fail;
+
+ }
+
+ ret = tc3589x_reg_write(tc3589x,
+ TC3589x_CLKMODE,
+ TC3589x_CLKMODE_MODCTL_SLEEP);
+ } else {
+ enable_irq_wake(client->irq);
+ }
+out:
+ return ret;
+fail:
+ for (j = 0; j <= i; j++) {
+ ret = tc3589x_reg_write(tc3589x,
+ sleep_regs[i],
+ sleep_regs_backup[i]);
+ if (ret < 0)
+ break;
+ }
return ret;
}
@@ -377,12 +475,29 @@ static int tc3589x_resume(struct device *dev)
struct tc3589x *tc3589x = dev_get_drvdata(dev);
struct i2c_client *client = tc3589x->i2c;
int ret = 0;
+ int i;
- /* enable the system into operation */
+ /* Enable the system into operation */
if (!device_may_wakeup(&client->dev))
- ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE,
- TC3589x_CLKMODE_MODCTL_OPERATION);
-
+ {
+ ret = tc3589x_reg_write(tc3589x,
+ TC3589x_CLKMODE,
+ TC3589x_CLKMODE_MODCTL_OPERATION);
+ if (ret < 0)
+ goto out;
+
+ for (i = ARRAY_SIZE(sleep_regs) - 1; i >= 0; i--) {
+ ret = tc3589x_reg_write(tc3589x,
+ sleep_regs[i],
+ sleep_regs_backup[i]);
+ /* Not much to do here if we fail */
+ if (ret < 0)
+ break;
+ }
+ } else {
+ disable_irq_wake(client->irq);
+ }
+out:
return ret;
}
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index a293b978e27..d7b9e0c60ea 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -195,6 +195,7 @@ static int __devinit tps6105x_probe(struct i2c_client *client,
return 0;
fail:
+ i2c_set_clientdata(client, NULL);
kfree(tps6105x);
return ret;
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6c39050976f..a013598fcfd 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -471,6 +471,15 @@ config BMP085
To compile this driver as a module, choose M here: the
module will be called bmp085.
+config DISPDEV
+ bool "Display overlay device"
+ depends on FB_MCDE
+ default n
+ help
+ This driver provides a way to use a second overlay for a display (in
+ addition to the framebuffer). The device allows for registration of
+ userspace buffers to be used with the overlay.
+
config PCH_PHUB
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
depends on PCI
@@ -491,6 +500,14 @@ config PCH_PHUB
To compile this driver as a module, choose M here: the module will
be called pch_phub.
+config HWMEM
+ bool "Hardware memory driver"
+ default n
+ help
+ This driver provides a way to allocate contiguous system memory which
+ can be used by hardware. It also enables accessing hwmem allocated
+ memory buffers through a secure id which can be shared across processes.
+
config USB_SWITCH_FSA9480
tristate "FSA9480 USB Switch"
depends on I2C
@@ -500,6 +517,7 @@ config USB_SWITCH_FSA9480
stereo and mono audio, video, microphone and UART data to use
a common connector port.
+source "drivers/misc/Kconfig.stm"
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Kconfig.stm b/drivers/misc/Kconfig.stm
new file mode 100644
index 00000000000..ef2b94683a3
--- /dev/null
+++ b/drivers/misc/Kconfig.stm
@@ -0,0 +1,114 @@
+menuconfig STM_TRACE
+ bool "STM MIPI Trace driver"
+ depends on ARCH_U8500
+ help
+ Simple System Trace Module driver. It allows to use and configure the
+ STM, either from kernel space, or from user space.
+
+if STM_TRACE
+
+config STM_NUMBER_OF_CHANNEL
+ int
+ default 512 if ARCH_U8500
+ default 256
+ help
+ Number Max of channels always a multiple of 256
+
+config STM_PRINTK
+ bool "printk support"
+ depends on STM_TRACE
+ help
+ Duplicate printk output on STM printk channel & activate stm_printk
+
+config STM_PRINTK_CHANNEL
+ int "printk channel"
+ range 0 255
+ depends on STM_PRINTK
+ default 255
+ help
+ STM printk channel number
+
+config STM_FTRACE
+ bool "functions tracing"
+ depends on FTRACE
+ default y
+ help
+ Output function tracing on STM dedicated channel
+
+config STM_FTRACE_CHANNEL
+ int "ftrace channel"
+ range 0 255
+ depends on STM_FTRACE
+ default 254
+ help
+ STM ftrace channel number
+
+config STM_CTX_SWITCH
+ bool "Context switch tracing"
+ depends on CONTEXT_SWITCH_TRACER
+ default y
+ help
+ Output scheduler context switch on STM dedicated channel
+
+config STM_CTX_SWITCH_CHANNEL
+ int "Context switch channel"
+ range 0 255
+ depends on STM_CTX_SWITCH
+ default 253
+ help
+ STM Context switch channel number
+
+config STM_WAKEUP
+ bool "Scheduler wakeup tracing"
+ depends on CONTEXT_SWITCH_TRACER
+ default y
+ help
+ Output scheduler wakeup on STM dedicated channel
+
+config STM_WAKEUP_CHANNEL
+ int "Wakeup channel"
+ range 0 255
+ depends on STM_WAKEUP
+ default 252
+ help
+ STM scheduler wakeup channel number
+
+config STM_STACK_TRACE
+ bool "Stack tracing"
+ depends on STACKTRACE
+ default y
+ help
+ Output stack tracing on STM dedicated channel
+
+config STM_STACK_TRACE_CHANNEL
+ int "Stack trace channel"
+ range 0 255
+ depends on STM_STACK_TRACE
+ default 251
+ help
+ STM stack trace channel number
+
+config STM_TRACE_PRINTK
+ bool "trace printk & binary printk support"
+ depends on TRACING
+ default y
+ help
+ Duplicate trace printk output on STM printk channel
+
+config STM_TRACE_PRINTK_CHANNEL
+ int "trace_printk channel"
+ range 0 255
+ depends on TRACING
+ default 250
+ help
+ STM trace_printk channel number
+
+config STM_TRACE_BPRINTK_CHANNEL
+ int "trace_bprintk channel"
+ range 0 255
+ depends on TRACING
+ default 249
+ help
+ STM trace binary printk channel number
+
+endif
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 05237e82565..db9f716a3c1 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -46,6 +46,9 @@ obj-y += ti-st/
obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
obj-y += lis3lv02d/
obj-y += carma/
+obj-$(CONFIG_STM_TRACE) += stm.o
+obj-$(CONFIG_HWMEM) += hwmem/
+obj-$(CONFIG_DISPDEV) += dispdev/
obj-$(CONFIG_STM_I2S) += i2s/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index 2208a9d5262..7cae999889f 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -8,6 +8,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/pwm.h>
+#include <linux/clk.h>
#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
#include <linux/module.h>
@@ -27,8 +28,10 @@
struct pwm_device {
struct device *dev;
struct list_head node;
+ struct clk *clk;
const char *label;
unsigned int pwm_id;
+ bool clk_enabled;
};
static LIST_HEAD(pwm_list);
@@ -67,9 +70,17 @@ int pwm_enable(struct pwm_device *pwm)
{
int ret;
+ if (!pwm->clk_enabled) {
+ ret = clk_enable(pwm->clk);
+ if (ret < 0) {
+ dev_err(pwm->dev, "failed to enable clock\n");
+ return ret;
+ }
+ pwm->clk_enabled = true;
+ }
ret = abx500_mask_and_set_register_interruptible(pwm->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (pwm->pwm_id-1), ENABLE_PWM);
+ 1 << (pwm->pwm_id-1), 1 << (pwm->pwm_id-1));
if (ret < 0)
dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
pwm->label, ret);
@@ -84,9 +95,27 @@ void pwm_disable(struct pwm_device *pwm)
ret = abx500_mask_and_set_register_interruptible(pwm->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
1 << (pwm->pwm_id-1), DISABLE_PWM);
+ /*
+ * Workaround to set PWM in disable.
+ * If enable bit is not toggled the PWM might output 50/50 duty cycle
+ * even though it should be disabled
+ */
+ ret &= abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (pwm->pwm_id-1),
+ ENABLE_PWM << (pwm->pwm_id-1));
+ ret &= abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (pwm->pwm_id-1), DISABLE_PWM);
+
if (ret < 0)
dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
pwm->label, ret);
+ if (pwm->clk_enabled) {
+ clk_disable(pwm->clk);
+ pwm->clk_enabled = false;
+ }
+
return;
}
EXPORT_SYMBOL(pwm_disable);
@@ -116,6 +145,8 @@ EXPORT_SYMBOL(pwm_free);
static int __devinit ab8500_pwm_probe(struct platform_device *pdev)
{
struct pwm_device *pwm;
+ int ret = 0;
+
/*
* Nothing to be done in probe, this is required to get the
* device which is required for ab8500 read and write
@@ -129,14 +160,24 @@ static int __devinit ab8500_pwm_probe(struct platform_device *pdev)
pwm->pwm_id = pdev->id;
list_add_tail(&pwm->node, &pwm_list);
platform_set_drvdata(pdev, pwm);
+
+ pwm->clk = clk_get(pwm->dev, NULL);
+ if (IS_ERR(pwm->clk)) {
+ dev_err(pwm->dev, "clock request failed\n");
+ ret = PTR_ERR(pwm->clk);
+ kfree(pwm);
+ return ret;
+ }
+ pwm->clk_enabled = false;
dev_dbg(pwm->dev, "pwm probe successful\n");
- return 0;
+ return ret;
}
static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
{
struct pwm_device *pwm = platform_get_drvdata(pdev);
list_del(&pwm->node);
+ clk_put(pwm->clk);
dev_dbg(&pdev->dev, "pwm driver removed\n");
kfree(pwm);
return 0;
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index bfeea9ba702..ec303edd94a 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -18,11 +18,17 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/i2c.h>
+#include <linux/err.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
#define BH1780_REG_CONTROL 0x80
#define BH1780_REG_PARTID 0x8A
@@ -40,11 +46,20 @@
struct bh1780_data {
struct i2c_client *client;
+ struct regulator *regulator;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
int power_state;
/* lock for sysfs operations */
struct mutex lock;
};
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void bh1780_early_suspend(struct early_suspend *ddata);
+static void bh1780_late_resume(struct early_suspend *ddata);
+#endif
+
static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg)
{
int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
@@ -72,6 +87,9 @@ static ssize_t bh1780_show_lux(struct device *dev,
struct bh1780_data *ddata = platform_get_drvdata(pdev);
int lsb, msb;
+ if (ddata->power_state == BH1780_POFF)
+ return -EINVAL;
+
lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW");
if (lsb < 0)
return lsb;
@@ -89,13 +107,9 @@ static ssize_t bh1780_show_power_state(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct bh1780_data *ddata = platform_get_drvdata(pdev);
- int state;
-
- state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
- if (state < 0)
- return state;
- return sprintf(buf, "%d\n", state & BH1780_POWMASK);
+ /* we already maintain a sw state */
+ return sprintf(buf, "%d\n", ddata->power_state);
}
static ssize_t bh1780_store_power_state(struct device *dev,
@@ -104,7 +118,7 @@ static ssize_t bh1780_store_power_state(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct bh1780_data *ddata = platform_get_drvdata(pdev);
- unsigned long val;
+ long val;
int error;
error = strict_strtoul(buf, 0, &val);
@@ -114,14 +128,24 @@ static ssize_t bh1780_store_power_state(struct device *dev,
if (val < BH1780_POFF || val > BH1780_PON)
return -EINVAL;
+ if (ddata->power_state == val)
+ return count;
+
mutex_lock(&ddata->lock);
+ if (ddata->power_state == BH1780_POFF)
+ regulator_enable(ddata->regulator);
+
error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL");
if (error < 0) {
mutex_unlock(&ddata->lock);
+ regulator_disable(ddata->regulator);
return error;
}
+ if (val == BH1780_POFF)
+ regulator_disable(ddata->regulator);
+
msleep(BH1780_PON_DELAY);
ddata->power_state = val;
mutex_unlock(&ddata->lock);
@@ -131,7 +155,7 @@ static ssize_t bh1780_store_power_state(struct device *dev,
static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL);
-static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(power_state, S_IWUGO | S_IRUGO,
bh1780_show_power_state, bh1780_store_power_state);
static struct attribute *bh1780_attributes[] = {
@@ -153,21 +177,42 @@ static int __devinit bh1780_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) {
ret = -EIO;
- goto err_op_failed;
+ return ret;
}
ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL);
if (ddata == NULL) {
+ dev_err(&client->dev, "failed to alloc ddata\n");
ret = -ENOMEM;
- goto err_op_failed;
+ return ret;
}
ddata->client = client;
i2c_set_clientdata(client, ddata);
+ ddata->regulator = regulator_get(&client->dev, "vcc");
+ if (IS_ERR(ddata->regulator)) {
+ dev_err(&client->dev, "failed to get regulator\n");
+ ret = PTR_ERR(ddata->regulator);
+ goto free_ddata;
+ }
+
+ regulator_enable(ddata->regulator);
+
ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
- if (ret < 0)
- goto err_op_failed;
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read part ID\n");
+ goto put_regulator;
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ddata->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ddata->early_suspend.suspend = bh1780_early_suspend;
+ ddata->early_suspend.resume = bh1780_late_resume;
+ register_early_suspend(&ddata->early_suspend);
+#endif
+
+ regulator_disable(ddata->regulator);
+ ddata->power_state = BH1780_POFF;
dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
(ret & BH1780_REVMASK));
@@ -176,11 +221,14 @@ static int __devinit bh1780_probe(struct i2c_client *client,
ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
if (ret)
- goto err_op_failed;
+ goto put_regulator;
return 0;
-err_op_failed:
+put_regulator:
+ regulator_disable(ddata->regulator);
+ regulator_put(ddata->regulator);
+free_ddata:
kfree(ddata);
return ret;
}
@@ -196,50 +244,106 @@ static int __devexit bh1780_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
-static int bh1780_suspend(struct device *dev)
+#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_PM)
+static int bh1780_do_suspend(struct bh1780_data *ddata)
{
- struct bh1780_data *ddata;
- int state, ret;
- struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
- ddata = i2c_get_clientdata(client);
- state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
- if (state < 0)
- return state;
+ mutex_lock(&ddata->lock);
- ddata->power_state = state & BH1780_POWMASK;
+ if (ddata->power_state == BH1780_POFF)
+ goto unlock;
- ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF,
- "CONTROL");
+ ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF, "CONTROL");
if (ret < 0)
- return ret;
+ goto unlock;
- return 0;
+ if (ddata->regulator)
+ regulator_disable(ddata->regulator);
+unlock:
+ mutex_unlock(&ddata->lock);
+ return ret;
}
-static int bh1780_resume(struct device *dev)
+static int bh1780_do_resume(struct bh1780_data *ddata)
{
- struct bh1780_data *ddata;
- int state, ret;
- struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
- ddata = i2c_get_clientdata(client);
- state = ddata->power_state;
- ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
- "CONTROL");
+ mutex_lock(&ddata->lock);
+
+ if (ddata->power_state == BH1780_POFF)
+ goto unlock;
+
+ if (ddata->regulator)
+ regulator_enable(ddata->regulator);
+ ret = bh1780_write(ddata, BH1780_REG_CONTROL,
+ ddata->power_state, "CONTROL");
+
+unlock:
+ mutex_unlock(&ddata->lock);
+ return ret;
+}
+#endif
+
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#ifdef CONFIG_PM
+static int bh1780_suspend(struct device *dev)
+{
+ struct bh1780_data *ddata = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = bh1780_do_suspend(ddata);
if (ret < 0)
- return ret;
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device\n");
- return 0;
+ return ret;
+}
+
+static int bh1780_resume(struct device *dev)
+{
+ struct bh1780_data *ddata = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = bh1780_do_resume(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device\n");
+
+ return ret;
}
+
static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
#define BH1780_PMOPS (&bh1780_pm)
+#endif /* CONFIG_PM */
#else
#define BH1780_PMOPS NULL
-#endif /* CONFIG_PM */
+static void bh1780_early_suspend(struct early_suspend *data)
+{
+ struct bh1780_data *ddata =
+ container_of(data, struct bh1780_data, early_suspend);
+ int ret;
+
+ ret = bh1780_do_suspend(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device\n");
+}
+
+static void bh1780_late_resume(struct early_suspend *data)
+{
+ struct bh1780_data *ddata =
+ container_of(data, struct bh1780_data, early_suspend);
+ int ret;
+
+ ret = bh1780_do_resume(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device\n");
+}
+#endif /*!CONFIG_HAS_EARLYSUSPEND */
static const struct i2c_device_id bh1780_id[] = {
{ "bh1780", 0 },
@@ -252,8 +356,10 @@ static struct i2c_driver bh1780_driver = {
.id_table = bh1780_id,
.driver = {
.name = "bh1780",
+#if (!defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM))
.pm = BH1780_PMOPS,
-},
+#endif
+ },
};
static int __init bh1780_init(void)
diff --git a/drivers/misc/dispdev/Makefile b/drivers/misc/dispdev/Makefile
new file mode 100644
index 00000000000..11dc7611d26
--- /dev/null
+++ b/drivers/misc/dispdev/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DISPDEV) += dispdev.o
diff --git a/drivers/misc/dispdev/dispdev.c b/drivers/misc/dispdev/dispdev.c
new file mode 100644
index 00000000000..c504b69e80c
--- /dev/null
+++ b/drivers/misc/dispdev/dispdev.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Display output device driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/ioctl.h>
+
+#include <linux/dispdev.h>
+#include <linux/hwmem.h>
+#include <video/mcde_dss.h>
+
+#define DENSITY_CHECK (16)
+#define MAX_BUFFERS 4
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_list_lock);
+
+enum buffer_state {
+ BUF_UNUSED = 0,
+ BUF_QUEUED,
+ BUF_ACTIVATED,
+/*TODO:waitfordone BUF_DEACTIVATED,*/
+ BUF_FREE,
+ BUF_DEQUEUED,
+};
+
+struct dispdev_buffer {
+ struct hwmem_alloc *alloc;
+ u32 size;
+ enum buffer_state state;
+ u32 paddr; /* if pinned */
+};
+
+struct dispdev {
+ bool open;
+ struct mutex lock;
+ struct miscdevice mdev;
+ struct list_head list;
+ struct mcde_display_device *ddev;
+ struct mcde_overlay *ovly;
+ struct mcde_overlay *parent_ovly;
+ struct dispdev_config config;
+ bool overlay;
+ struct dispdev_buffer buffers[MAX_BUFFERS];
+ wait_queue_head_t waitq_dq;
+ /*
+ * For the rotation use case
+ * buffers_need_update is used to ensure that a set_config that
+ * changes width or height is followed by a unregister_buffer.
+ */
+ bool buffers_need_update;
+ /*
+ * For the overlay startup use case.
+ * first_update is used to handle the first update after a set_config.
+ * In this case a queue_buffer will arrive after set_config and not a
+ * unregister_buffer as in the rotation use case.
+ */
+ bool first_update;
+};
+
+static int find_buf(struct dispdev *dd, enum buffer_state state)
+{
+ int i;
+ for (i = 0; i < MAX_BUFFERS; i++)
+ if (dd->buffers[i].state == state)
+ return i;
+ return -1;
+}
+
+int dispdev_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct dispdev *dd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(dd, &dev_list, list)
+ if (dd->mdev.minor == iminor(inode))
+ break;
+
+ if (&dd->list == &dev_list) {
+ mutex_unlock(&dev_list_lock);
+ return -ENODEV;
+ }
+
+ if (dd->open) {
+ mutex_unlock(&dev_list_lock);
+ return -EBUSY;
+ }
+
+ dd->open = true;
+
+ mutex_unlock(&dev_list_lock);
+
+ ret = mcde_dss_enable_overlay(dd->ovly);
+ if (ret)
+ return ret;
+
+ file->private_data = dd;
+
+ return 0;
+}
+
+int dispdev_release(struct inode *inode, struct file *file)
+{
+ int i;
+ struct dispdev *dd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(dd, &dev_list, list)
+ if (dd->mdev.minor == iminor(inode))
+ break;
+ mutex_unlock(&dev_list_lock);
+
+ if (&dd->list == &dev_list)
+ return -ENODEV;
+
+ /* TODO: Make sure it waits for completion */
+ mcde_dss_disable_overlay(dd->ovly);
+ for (i = 0; i < MAX_BUFFERS; i++) {
+ if (dd->buffers[i].paddr)
+ hwmem_unpin(dd->buffers[i].alloc);
+ if (dd->buffers[i].alloc)
+ hwmem_release(dd->buffers[i].alloc);
+ dd->buffers[i].alloc = NULL;
+ dd->buffers[i].state = BUF_UNUSED;
+ dd->buffers[i].size = 0;
+ dd->buffers[i].paddr = 0;
+ }
+ dd->open = false;
+ wake_up(&dd->waitq_dq);
+ return 0;
+}
+
+static enum mcde_ovly_pix_fmt get_ovly_fmt(enum dispdev_fmt fmt)
+{
+ switch (fmt) {
+ default:
+ case DISPDEV_FMT_RGB565:
+ return MCDE_OVLYPIXFMT_RGB565;
+ case DISPDEV_FMT_RGB888:
+ return MCDE_OVLYPIXFMT_RGB888;
+ case DISPDEV_FMT_RGBA8888:
+ return MCDE_OVLYPIXFMT_RGBA8888;
+ case DISPDEV_FMT_RGBX8888:
+ return MCDE_OVLYPIXFMT_RGBX8888;
+ case DISPDEV_FMT_YUV422:
+ return MCDE_OVLYPIXFMT_YCbCr422;
+ }
+}
+
+static void get_ovly_info(struct dispdev_config *cfg,
+ struct mcde_video_mode *vmode,
+ struct mcde_overlay_info *info, bool overlay)
+{
+ info->paddr = 0;
+ info->stride = cfg->stride;
+ info->fmt = get_ovly_fmt(cfg->format);
+ info->src_x = 0;
+ info->src_y = 0;
+ info->dst_x = cfg->x;
+ info->dst_y = cfg->y;
+ info->dst_z = cfg->z;
+ info->w = cfg->width;
+ info->h = cfg->height;
+ info->dirty.x = 0;
+ info->dirty.y = 0;
+ info->dirty.w = vmode->xres;
+ info->dirty.h = vmode->yres;
+}
+
+static int dispdev_set_config(struct dispdev *dd, struct dispdev_config *cfg)
+{
+ int ret = 0;
+ if (memcmp(&dd->config, cfg, sizeof(struct dispdev_config)) == 0)
+ return 0;
+
+ /*
+ * Only update MCDE if format, stride, width and height
+ * is the same. Otherwise just store the new config and update
+ * MCDE in the next queue buffer. This because the buffer that is
+ * active can be have the wrong format, width ...
+ */
+ if (cfg->format == dd->config.format &&
+ cfg->stride == dd->config.stride &&
+ cfg->width == dd->config.width &&
+ cfg->height == dd->config.height) {
+
+ int buf_index;
+ if (!dd->buffers_need_update) {
+ buf_index = find_buf(dd, BUF_ACTIVATED);
+ if (buf_index >= 0) {
+ struct mcde_overlay_info info;
+ struct dispdev_buffer *buf;
+ struct mcde_video_mode vmode;
+
+ buf = &dd->buffers[buf_index];
+ mcde_dss_get_video_mode(dd->ddev, &vmode);
+ get_ovly_info(cfg, &vmode, &info, dd->overlay);
+ info.paddr = buf->paddr;
+ ret = mcde_dss_apply_overlay(dd->ovly, &info);
+ if (!ret)
+ mcde_dss_update_overlay(dd->ovly,
+ false);
+ }
+ }
+ } else {
+ dd->buffers_need_update = true;
+ }
+
+ dd->config = *cfg;
+
+ return ret;
+}
+
+static int dispdev_register_buffer(struct dispdev *dd, s32 hwmem_name)
+{
+ int ret;
+ struct dispdev_buffer *buf;
+ enum hwmem_mem_type memtype;
+ enum hwmem_access access;
+
+ ret = find_buf(dd, BUF_UNUSED);
+ if (ret < 0)
+ return -ENOMEM;
+ buf = &dd->buffers[ret];
+ buf->alloc = hwmem_resolve_by_name(hwmem_name);
+ if (IS_ERR(buf->alloc)) {
+ ret = PTR_ERR(buf->alloc);
+ goto resolve_failed;
+ }
+
+ hwmem_get_info(buf->alloc, &buf->size, &memtype, &access);
+
+ if (!(access & HWMEM_ACCESS_READ) ||
+ memtype != HWMEM_MEM_CONTIGUOUS_SYS) {
+ ret = -EACCES;
+ goto invalid_mem;
+ }
+
+ buf->state = BUF_FREE;
+ goto out;
+invalid_mem:
+ hwmem_release(buf->alloc);
+resolve_failed:
+out:
+ return ret;
+}
+
+static int dispdev_unregister_buffer(struct dispdev *dd, u32 buf_idx)
+{
+ struct dispdev_buffer *buf = &dd->buffers[buf_idx];
+
+ if (buf_idx >= ARRAY_SIZE(dd->buffers))
+ return -EINVAL;
+
+ if (buf->state == BUF_UNUSED)
+ return -EINVAL;
+
+ if (dd->buffers_need_update)
+ dd->buffers_need_update = false;
+
+ if (buf->state == BUF_ACTIVATED) {
+ /* Disable the overlay */
+ struct mcde_overlay_info info;
+ struct mcde_video_mode vmode;
+ /* TODO Wait for frame done */
+ mcde_dss_get_video_mode(dd->ddev, &vmode);
+ get_ovly_info(&dd->config, &vmode, &info, dd->overlay);
+ mcde_dss_apply_overlay(dd->ovly, &info);
+ mcde_dss_update_overlay(dd->ovly, false);
+ hwmem_unpin(dd->buffers[buf_idx].alloc);
+ }
+
+ hwmem_release(buf->alloc);
+ buf->state = BUF_UNUSED;
+ buf->alloc = NULL;
+ buf->size = 0;
+ buf->paddr = 0;
+ dd->first_update = false;
+
+ return 0;
+}
+
+
+/**
+ * @brief Check if the buffer is transparent or black (ARGB = X000)
+ * Note: Only for ARGB32.
+ * Worst case: a ~full transparent buffer
+ * Results: ~2200us @800Mhz for a WVGA screen, with DENSITY_CHECK=8
+ * ~520us @800Mhz for a WVGA screen, with DENSITY_CHECK=16
+ *
+ * @param w witdh
+ * @param h height
+ * @param addr buffer addr
+ *
+ * @return 1 if the buffer is transparent, else 0
+ */
+static int is_transparent(int w, int h, u32 *addr)
+{
+ int i, j;
+ u32 *c, *next_line;
+ u32 sum;
+
+ next_line = addr;
+ sum = 0;
+
+ /* TODO Optimize me */
+ for (j = 0; j < h; j += DENSITY_CHECK) {
+ c = next_line;
+ for (i = 0; i < w; i += DENSITY_CHECK) {
+ sum += ((*c) & 0x00FFFFFF);
+ c += DENSITY_CHECK;
+ }
+ if (sum)
+ return 0; /* Not "transparent" */
+ next_line += (w * DENSITY_CHECK);
+ }
+
+ return 1; /* "Transparent" */
+}
+
+static int dispdev_queue_buffer(struct dispdev *dd,
+ struct dispdev_buffer_info *buffer)
+{
+ int ret, i;
+ struct mcde_overlay_info info;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+ struct hwmem_region rgn = { .offset = 0, .count = 1, .start = 0 };
+ struct hwmem_alloc *alloc;
+ struct mcde_video_mode vmode;
+ u32 buf_idx = buffer->buf_idx;
+
+ if (buf_idx >= ARRAY_SIZE(dd->buffers) ||
+ dd->buffers[buf_idx].state != BUF_DEQUEUED)
+ return -EINVAL;
+
+ alloc = dd->buffers[buf_idx].alloc;
+ mcde_dss_get_video_mode(dd->ddev, &vmode);
+ get_ovly_info(&dd->config, &vmode, &info, dd->overlay);
+ ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length);
+ if (ret) {
+ dev_warn(dd->mdev.this_device, "Pin failed, %d\n", ret);
+ return -EINVAL;
+ }
+
+ rgn.size = rgn.end = dd->buffers[buf_idx].size;
+ ret = hwmem_set_domain(alloc, HWMEM_ACCESS_READ,
+ HWMEM_DOMAIN_SYNC, &rgn);
+ if (ret)
+ dev_warn(dd->mdev.this_device, "Set domain failed, %d\n", ret);
+
+ i = find_buf(dd, BUF_ACTIVATED);
+ if (i >= 0) {
+ dd->buffers[i].state = BUF_FREE;
+ wake_up(&dd->waitq_dq);
+ }
+
+ if (!dd->first_update) {
+ dd->first_update = true;
+ dd->buffers_need_update = false;
+ }
+
+ dd->buffers[buf_idx].paddr = mem_chunk.paddr;
+
+ if (buffer->display_update && !dd->buffers_need_update &&
+ dd->config.width == buffer->buf_cfg.width &&
+ dd->config.height == buffer->buf_cfg.height &&
+ dd->config.format == buffer->buf_cfg.format &&
+ dd->config.stride == buffer->buf_cfg.stride) {
+ info.paddr = mem_chunk.paddr;
+ mcde_dss_apply_overlay(dd->ovly, &info);
+ mcde_dss_update_overlay(dd->ovly, false);
+ } else if (buffer->display_update) {
+ dd->buffers_need_update = true;
+ }
+
+ /* Disable the MCDE FB overlay */
+ if ((dd->parent_ovly->state != NULL) &&
+ (dd->ddev->check_transparency)) {
+ dd->ddev->check_transparency--;
+ mcde_dss_get_overlay_info(dd->parent_ovly, &info);
+ if (dd->ddev->check_transparency == 0) {
+ if (is_transparent(info.w, info.h, info.vaddr)) {
+ mcde_dss_disable_overlay(dd->parent_ovly);
+ printk(KERN_INFO "%s Disable overlay\n",
+ __func__);
+ }
+ }
+ }
+
+ dd->buffers[buf_idx].state = BUF_ACTIVATED;
+
+ return 0;
+}
+
+static int dispdev_dequeue_buffer(struct dispdev *dd)
+{
+ int i;
+
+ i = find_buf(dd, BUF_FREE);
+ if (i < 0) {
+ if (find_buf(dd, BUF_ACTIVATED) < 0)
+ return -EINVAL;
+ mutex_unlock(&dd->lock);
+ wait_event(dd->waitq_dq, (i = find_buf(dd, BUF_FREE)) >= 0);
+ mutex_lock(&dd->lock);
+ }
+ hwmem_unpin(dd->buffers[i].alloc);
+ dd->buffers[i].state = BUF_DEQUEUED;
+ dd->buffers[i].paddr = 0;
+
+ return i;
+}
+
+long dispdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct dispdev *dd = (struct dispdev *)file->private_data;
+
+ mutex_lock(&dd->lock);
+
+ switch (cmd) {
+ case DISPDEV_SET_CONFIG_IOC:
+ {
+ struct dispdev_config cfg;
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(cfg)))
+ ret = -EFAULT;
+ else
+ ret = dispdev_set_config(dd, &cfg);
+ }
+ break;
+ case DISPDEV_GET_CONFIG_IOC:
+ ret = copy_to_user((void __user *)arg, &dd->config,
+ sizeof(dd->config));
+ if (ret)
+ ret = -EFAULT;
+ break;
+ case DISPDEV_REGISTER_BUFFER_IOC:
+ ret = dispdev_register_buffer(dd, (s32)arg);
+ break;
+ case DISPDEV_UNREGISTER_BUFFER_IOC:
+ ret = dispdev_unregister_buffer(dd, (u32)arg);
+ break;
+ case DISPDEV_QUEUE_BUFFER_IOC:
+ {
+ struct dispdev_buffer_info buffer;
+ if (copy_from_user(&buffer, (void __user *)arg,
+ sizeof(buffer)))
+ ret = -EFAULT;
+ else
+ ret = dispdev_queue_buffer(dd, &buffer);
+ break;
+ }
+ case DISPDEV_DEQUEUE_BUFFER_IOC:
+ ret = dispdev_dequeue_buffer(dd);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ mutex_unlock(&dd->lock);
+
+ return ret;
+}
+
+static const struct file_operations dispdev_fops = {
+ .open = dispdev_open,
+ .release = dispdev_release,
+ .unlocked_ioctl = dispdev_ioctl,
+};
+
+static void init_dispdev(struct dispdev *dd, struct mcde_display_device *ddev,
+ const char *name, bool overlay)
+{
+ u16 w, h;
+ int rotation;
+
+ mutex_init(&dd->lock);
+ INIT_LIST_HEAD(&dd->list);
+ dd->ddev = ddev;
+ dd->overlay = overlay;
+ mcde_dss_get_native_resolution(ddev, &w, &h);
+ rotation = mcde_dss_get_rotation(ddev);
+
+ if ((rotation == MCDE_DISPLAY_ROT_90_CCW) ||
+ (rotation == MCDE_DISPLAY_ROT_90_CW)) {
+ dd->config.width = h;
+ dd->config.height = w;
+ } else {
+ dd->config.width = w;
+ dd->config.height = h;
+ }
+ dd->config.format = DISPDEV_FMT_RGB565;
+ dd->config.stride = sizeof(u16) * w;
+ dd->config.x = 0;
+ dd->config.y = 0;
+ dd->config.z = 0;
+ dd->buffers_need_update = false;
+ dd->first_update = false;
+ init_waitqueue_head(&dd->waitq_dq);
+ dd->mdev.minor = MISC_DYNAMIC_MINOR;
+ dd->mdev.name = name;
+ dd->mdev.fops = &dispdev_fops;
+ pr_info("%s: name=%s w=%d, h=%d, fmt=%d, stride=%d\n", __func__, name,
+ dd->config.width, dd->config.height, dd->config.format,
+ dd->config.stride);
+}
+
+int dispdev_create(struct mcde_display_device *ddev, bool overlay,
+ struct mcde_overlay *parent_ovly)
+{
+ int ret = 0;
+ struct dispdev *dd;
+ struct mcde_video_mode vmode;
+ struct mcde_overlay_info info = {0};
+
+ static int counter;
+ char *name = "dispdev0";
+
+ dd = kzalloc(sizeof(struct dispdev), GFP_KERNEL);
+ if (!dd)
+ return -ENOMEM;
+
+ sprintf(name, "%s%d", DISPDEV_DEFAULT_DEVICE_PREFIX, counter++);
+ init_dispdev(dd, ddev, name, overlay);
+
+ if (!overlay) {
+ ret = mcde_dss_enable_display(ddev);
+ if (ret)
+ goto fail_enable_display;
+ mcde_dss_get_video_mode(ddev, &vmode);
+ mcde_dss_try_video_mode(ddev, &vmode);
+ ret = mcde_dss_set_video_mode(ddev, &vmode);
+ if (ret)
+ goto fail_set_video_mode;
+ mcde_dss_set_pixel_format(ddev, info.fmt);
+ mcde_dss_apply_channel(ddev);
+ } else
+ mcde_dss_get_video_mode(ddev, &vmode);
+ get_ovly_info(&dd->config, &vmode, &info, overlay);
+
+ /* Save the MCDE FB overlay */
+ dd->parent_ovly = parent_ovly;
+
+ dd->ovly = mcde_dss_create_overlay(ddev, &info);
+ if (!dd->ovly) {
+ ret = -ENOMEM;
+ goto fail_create_ovly;
+ }
+
+ ret = misc_register(&dd->mdev);
+ if (ret)
+ goto fail_register_misc;
+ mutex_lock(&dev_list_lock);
+ list_add_tail(&dd->list, &dev_list);
+ mutex_unlock(&dev_list_lock);
+
+ goto out;
+
+fail_register_misc:
+ mcde_dss_destroy_overlay(dd->ovly);
+fail_create_ovly:
+ if (!overlay)
+ mcde_dss_disable_display(ddev);
+fail_set_video_mode:
+fail_enable_display:
+ kfree(dd);
+out:
+ return ret;
+}
+
+void dispdev_destroy(struct mcde_display_device *ddev)
+{
+ struct dispdev *dd;
+ struct dispdev *tmp;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(dd, tmp, &dev_list, list) {
+ if (dd->ddev == ddev) {
+ list_del(&dd->list);
+ misc_deregister(&dd->mdev);
+ mcde_dss_destroy_overlay(dd->ovly);
+ /*
+ * TODO: Uncomment when DSS has reference
+ * counting of enable/disable
+ */
+ /* mcde_dss_disable_display(dd->ddev); */
+ kfree(dd);
+ break;
+ }
+ }
+ mutex_unlock(&dev_list_lock);
+}
+
+static void dispdev_destroy_all(void)
+{
+ struct dispdev *dd;
+ struct dispdev *tmp;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(dd, tmp, &dev_list, list) {
+ list_del(&dd->list);
+ misc_deregister(&dd->mdev);
+ mcde_dss_destroy_overlay(dd->ovly);
+ /*
+ * TODO: Uncomment when DSS has reference
+ * counting of enable/disable
+ */
+ /* mcde_dss_disable_display(dd->ddev); */
+ kfree(dd);
+ }
+ mutex_unlock(&dev_list_lock);
+
+ mutex_destroy(&dev_list_lock);
+}
+
+static int __init dispdev_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ mutex_init(&dev_list_lock);
+
+ return 0;
+}
+module_init(dispdev_init);
+
+static void __exit dispdev_exit(void)
+{
+ dispdev_destroy_all();
+ pr_info("%s\n", __func__);
+}
+module_exit(dispdev_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Display output device driver");
+
diff --git a/drivers/misc/hwmem/Makefile b/drivers/misc/hwmem/Makefile
new file mode 100644
index 00000000000..c307616a181
--- /dev/null
+++ b/drivers/misc/hwmem/Makefile
@@ -0,0 +1,3 @@
+hwmem-objs := hwmem-main.o hwmem-ioctl.o cache_handler.o contig_alloc.o
+
+obj-$(CONFIG_HWMEM) += hwmem.o
diff --git a/drivers/misc/hwmem/cache_handler.c b/drivers/misc/hwmem/cache_handler.c
new file mode 100644
index 00000000000..e0ab4ee6cf8
--- /dev/null
+++ b/drivers/misc/hwmem/cache_handler.c
@@ -0,0 +1,510 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Cache handler
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/hwmem.h>
+
+#include <asm/pgtable.h>
+
+#include <mach/dcache.h>
+
+#include "cache_handler.h"
+
+#define U32_MAX (~(u32)0)
+
+enum hwmem_alloc_flags cachi_get_cache_settings(
+ enum hwmem_alloc_flags requested_cache_settings);
+void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings,
+ pgprot_t *pgprot);
+
+static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access,
+ struct hwmem_region *region);
+static void sync_buf_post_cpu(struct cach_buf *buf,
+ enum hwmem_access next_access, struct hwmem_region *next_region);
+
+static void invalidate_cpu_cache(struct cach_buf *buf,
+ struct cach_range *range_2b_used);
+static void clean_cpu_cache(struct cach_buf *buf,
+ struct cach_range *range_2b_used);
+static void flush_cpu_cache(struct cach_buf *buf,
+ struct cach_range *range_2b_used);
+
+static void null_range(struct cach_range *range);
+static void expand_range(struct cach_range *range,
+ struct cach_range *range_2_add);
+/*
+ * Expands range to one of enclosing_range's two edges. The function will
+ * choose which of enclosing_range's edges to expand range to in such a
+ * way that the size of range is minimized. range must be located inside
+ * enclosing_range.
+ */
+static void expand_range_2_edge(struct cach_range *range,
+ struct cach_range *enclosing_range);
+static void shrink_range(struct cach_range *range,
+ struct cach_range *range_2_remove);
+static bool is_non_empty_range(struct cach_range *range);
+static void intersect_range(struct cach_range *range_1,
+ struct cach_range *range_2, struct cach_range *intersection);
+/* Align_up restrictions apply here to */
+static void align_range_up(struct cach_range *range, u32 alignment);
+static u32 range_length(struct cach_range *range);
+static void region_2_range(struct hwmem_region *region, u32 buffer_size,
+ struct cach_range *range);
+
+static void *offset_2_vaddr(struct cach_buf *buf, u32 offset);
+static u32 offset_2_paddr(struct cach_buf *buf, u32 offset);
+
+/* Saturates, might return unaligned values when that happens */
+static u32 align_up(u32 value, u32 alignment);
+static u32 align_down(u32 value, u32 alignment);
+
+/*
+ * Exported functions
+ */
+
+void cach_init_buf(struct cach_buf *buf, enum hwmem_alloc_flags cache_settings,
+ u32 size)
+{
+ buf->vstart = NULL;
+ buf->pstart = 0;
+ buf->size = size;
+
+ buf->cache_settings = cachi_get_cache_settings(cache_settings);
+}
+
+void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr)
+{
+ bool tmp;
+
+ buf->vstart = vaddr;
+ buf->pstart = paddr;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) {
+ /*
+ * Keep whatever is in the cache. This way we avoid an
+ * unnecessary synch if CPU is the first user.
+ */
+ buf->range_in_cpu_cache.start = 0;
+ buf->range_in_cpu_cache.end = buf->size;
+ align_range_up(&buf->range_in_cpu_cache,
+ get_dcache_granularity());
+ buf->range_dirty_in_cpu_cache.start = 0;
+ buf->range_dirty_in_cpu_cache.end = buf->size;
+ align_range_up(&buf->range_dirty_in_cpu_cache,
+ get_dcache_granularity());
+ } else {
+ flush_cpu_dcache(buf->vstart, buf->pstart, buf->size, false,
+ &tmp);
+ drain_cpu_write_buf();
+
+ null_range(&buf->range_in_cpu_cache);
+ null_range(&buf->range_dirty_in_cpu_cache);
+ }
+ null_range(&buf->range_invalid_in_cpu_cache);
+}
+
+void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot)
+{
+ cachi_set_pgprot_cache_options(buf->cache_settings, pgprot);
+}
+
+void cach_set_domain(struct cach_buf *buf, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region)
+{
+ struct hwmem_region *__region;
+ struct hwmem_region full_region;
+
+ if (region != NULL) {
+ __region = region;
+ } else {
+ full_region.offset = 0;
+ full_region.count = 1;
+ full_region.start = 0;
+ full_region.end = buf->size;
+ full_region.size = buf->size;
+
+ __region = &full_region;
+ }
+
+ switch (domain) {
+ case HWMEM_DOMAIN_SYNC:
+ sync_buf_post_cpu(buf, access, __region);
+
+ break;
+
+ case HWMEM_DOMAIN_CPU:
+ sync_buf_pre_cpu(buf, access, __region);
+
+ break;
+ }
+}
+
+/*
+ * Local functions
+ */
+
+enum hwmem_alloc_flags __attribute__((weak)) cachi_get_cache_settings(
+ enum hwmem_alloc_flags requested_cache_settings)
+{
+ static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT |
+ HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE |
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY;
+ /* We don't know the cache setting so we assume worst case. */
+ static const u32 CACHE_SETTING = HWMEM_ALLOC_HINT_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_CACHED | HWMEM_ALLOC_HINT_CACHE_WB |
+ HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE;
+
+ if (requested_cache_settings & CACHE_ON_FLAGS_MASK)
+ return CACHE_SETTING;
+ else if (requested_cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE ||
+ (requested_cache_settings & HWMEM_ALLOC_HINT_UNCACHED &&
+ !(requested_cache_settings &
+ HWMEM_ALLOC_HINT_NO_WRITE_COMBINE)))
+ return HWMEM_ALLOC_HINT_WRITE_COMBINE;
+ else if (requested_cache_settings &
+ (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED))
+ return 0;
+ else
+ /* Nothing specified, use cached */
+ return CACHE_SETTING;
+}
+
+void __attribute__((weak)) cachi_set_pgprot_cache_options(
+ enum hwmem_alloc_flags cache_settings, pgprot_t *pgprot)
+{
+ if (cache_settings & HWMEM_ALLOC_HINT_CACHED)
+ *pgprot = *pgprot; /* To silence compiler and checkpatch */
+ else if (cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE)
+ *pgprot = pgprot_writecombine(*pgprot);
+ else
+ *pgprot = pgprot_noncached(*pgprot);
+}
+
+bool __attribute__((weak)) speculative_data_prefetch(void)
+{
+ /* We don't know so we go with the safe alternative */
+ return true;
+}
+
+static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access,
+ struct hwmem_region *region)
+{
+ bool write = access & HWMEM_ACCESS_WRITE;
+ bool read = access & HWMEM_ACCESS_READ;
+
+ if (!write && !read)
+ return;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) {
+ struct cach_range region_range;
+
+ region_2_range(region, buf->size, &region_range);
+
+ if (read || (write && buf->cache_settings &
+ HWMEM_ALLOC_HINT_CACHE_WB))
+ /* Perform defered invalidates */
+ invalidate_cpu_cache(buf, &region_range);
+ if (read || (write && buf->cache_settings &
+ HWMEM_ALLOC_HINT_CACHE_AOW))
+ expand_range(&buf->range_in_cpu_cache, &region_range);
+ if (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_WB) {
+ struct cach_range dirty_range_addition;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW)
+ dirty_range_addition = region_range;
+ else
+ intersect_range(&buf->range_in_cpu_cache,
+ &region_range, &dirty_range_addition);
+
+ expand_range(&buf->range_dirty_in_cpu_cache,
+ &dirty_range_addition);
+ }
+ }
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) {
+ if (write)
+ buf->in_cpu_write_buf = true;
+ }
+}
+
+static void sync_buf_post_cpu(struct cach_buf *buf,
+ enum hwmem_access next_access, struct hwmem_region *next_region)
+{
+ bool write = next_access & HWMEM_ACCESS_WRITE;
+ bool read = next_access & HWMEM_ACCESS_READ;
+ struct cach_range region_range;
+
+ if (!write && !read)
+ return;
+
+ region_2_range(next_region, buf->size, &region_range);
+
+ if (write) {
+ if (speculative_data_prefetch()) {
+ /* Defer invalidate */
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_in_cpu_cache,
+ &region_range, &intersection);
+
+ expand_range(&buf->range_invalid_in_cpu_cache,
+ &intersection);
+
+ clean_cpu_cache(buf, &region_range);
+ } else {
+ flush_cpu_cache(buf, &region_range);
+ }
+ }
+ if (read)
+ clean_cpu_cache(buf, &region_range);
+
+ if (buf->in_cpu_write_buf) {
+ drain_cpu_write_buf();
+
+ buf->in_cpu_write_buf = false;
+ }
+}
+
+static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range)
+{
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_invalid_in_cpu_cache, range,
+ &intersection);
+ if (is_non_empty_range(&intersection)) {
+ bool flushed_everything;
+
+ expand_range_2_edge(&intersection,
+ &buf->range_invalid_in_cpu_cache);
+
+ /*
+ * Cache handler never uses invalidate to discard data in the
+ * cache so we can use flush instead which is considerably
+ * faster for large buffers.
+ */
+ flush_cpu_dcache(
+ offset_2_vaddr(buf, intersection.start),
+ offset_2_paddr(buf, intersection.start),
+ range_length(&intersection),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
+ &flushed_everything);
+
+ if (flushed_everything) {
+ null_range(&buf->range_invalid_in_cpu_cache);
+ null_range(&buf->range_dirty_in_cpu_cache);
+ } else {
+ /*
+ * No need to shrink range_in_cpu_cache as invalidate
+ * is only used when we can't keep track of what's in
+ * the CPU cache.
+ */
+ shrink_range(&buf->range_invalid_in_cpu_cache,
+ &intersection);
+ }
+ }
+}
+
+static void clean_cpu_cache(struct cach_buf *buf, struct cach_range *range)
+{
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_dirty_in_cpu_cache, range, &intersection);
+ if (is_non_empty_range(&intersection)) {
+ bool cleaned_everything;
+
+ expand_range_2_edge(&intersection,
+ &buf->range_dirty_in_cpu_cache);
+
+ clean_cpu_dcache(
+ offset_2_vaddr(buf, intersection.start),
+ offset_2_paddr(buf, intersection.start),
+ range_length(&intersection),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
+ &cleaned_everything);
+
+ if (cleaned_everything)
+ null_range(&buf->range_dirty_in_cpu_cache);
+ else
+ shrink_range(&buf->range_dirty_in_cpu_cache,
+ &intersection);
+ }
+}
+
+static void flush_cpu_cache(struct cach_buf *buf, struct cach_range *range)
+{
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_in_cpu_cache, range, &intersection);
+ if (is_non_empty_range(&intersection)) {
+ bool flushed_everything;
+
+ expand_range_2_edge(&intersection, &buf->range_in_cpu_cache);
+
+ flush_cpu_dcache(
+ offset_2_vaddr(buf, intersection.start),
+ offset_2_paddr(buf, intersection.start),
+ range_length(&intersection),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
+ &flushed_everything);
+
+ if (flushed_everything) {
+ if (!speculative_data_prefetch())
+ null_range(&buf->range_in_cpu_cache);
+ null_range(&buf->range_dirty_in_cpu_cache);
+ null_range(&buf->range_invalid_in_cpu_cache);
+ } else {
+ if (!speculative_data_prefetch())
+ shrink_range(&buf->range_in_cpu_cache,
+ &intersection);
+ shrink_range(&buf->range_dirty_in_cpu_cache,
+ &intersection);
+ shrink_range(&buf->range_invalid_in_cpu_cache,
+ &intersection);
+ }
+ }
+}
+
+static void null_range(struct cach_range *range)
+{
+ range->start = U32_MAX;
+ range->end = 0;
+}
+
+static void expand_range(struct cach_range *range,
+ struct cach_range *range_2_add)
+{
+ range->start = min(range->start, range_2_add->start);
+ range->end = max(range->end, range_2_add->end);
+}
+
+/*
+ * Expands range to one of enclosing_range's two edges. The function will
+ * choose which of enclosing_range's edges to expand range to in such a
+ * way that the size of range is minimized. range must be located inside
+ * enclosing_range.
+ */
+static void expand_range_2_edge(struct cach_range *range,
+ struct cach_range *enclosing_range)
+{
+ u32 space_on_low_side = range->start - enclosing_range->start;
+ u32 space_on_high_side = enclosing_range->end - range->end;
+
+ if (space_on_low_side < space_on_high_side)
+ range->start = enclosing_range->start;
+ else
+ range->end = enclosing_range->end;
+}
+
+static void shrink_range(struct cach_range *range,
+ struct cach_range *range_2_remove)
+{
+ if (range_2_remove->start > range->start)
+ range->end = min(range->end, range_2_remove->start);
+ else
+ range->start = max(range->start, range_2_remove->end);
+
+ if (range->start >= range->end)
+ null_range(range);
+}
+
+static bool is_non_empty_range(struct cach_range *range)
+{
+ return range->end > range->start;
+}
+
+static void intersect_range(struct cach_range *range_1,
+ struct cach_range *range_2, struct cach_range *intersection)
+{
+ intersection->start = max(range_1->start, range_2->start);
+ intersection->end = min(range_1->end, range_2->end);
+
+ if (intersection->start >= intersection->end)
+ null_range(intersection);
+}
+
+/* Align_up restrictions apply here to */
+static void align_range_up(struct cach_range *range, u32 alignment)
+{
+ if (!is_non_empty_range(range))
+ return;
+
+ range->start = align_down(range->start, alignment);
+ range->end = align_up(range->end, alignment);
+}
+
+static u32 range_length(struct cach_range *range)
+{
+ if (is_non_empty_range(range))
+ return range->end - range->start;
+ else
+ return 0;
+}
+
+static void region_2_range(struct hwmem_region *region, u32 buffer_size,
+ struct cach_range *range)
+{
+ /*
+ * We don't care about invalid regions, instead we limit the region's
+ * range to the buffer's range. This should work good enough, worst
+ * case we synch the entire buffer when we get an invalid region which
+ * is acceptable.
+ */
+ range->start = region->offset + region->start;
+ range->end = min(region->offset + (region->count * region->size) -
+ (region->size - region->end), buffer_size);
+ if (range->start >= range->end) {
+ null_range(range);
+ return;
+ }
+
+ align_range_up(range, get_dcache_granularity());
+}
+
+static void *offset_2_vaddr(struct cach_buf *buf, u32 offset)
+{
+ return (void *)((u32)buf->vstart + offset);
+}
+
+static u32 offset_2_paddr(struct cach_buf *buf, u32 offset)
+{
+ return buf->pstart + offset;
+}
+
+/* Saturates, might return unaligned values when that happens */
+static u32 align_up(u32 value, u32 alignment)
+{
+ u32 remainder = value % alignment;
+ u32 value_2_add;
+
+ if (remainder == 0)
+ return value;
+
+ value_2_add = alignment - remainder;
+
+ if (value_2_add > U32_MAX - value) /* Will overflow */
+ return U32_MAX;
+
+ return value + value_2_add;
+}
+
+static u32 align_down(u32 value, u32 alignment)
+{
+ u32 remainder = value % alignment;
+ if (remainder == 0)
+ return value;
+
+ return value - remainder;
+}
diff --git a/drivers/misc/hwmem/cache_handler.h b/drivers/misc/hwmem/cache_handler.h
new file mode 100644
index 00000000000..792105196fa
--- /dev/null
+++ b/drivers/misc/hwmem/cache_handler.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Cache handler
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/*
+ * Cache handler can not handle simultaneous execution! The caller has to
+ * ensure such a situation does not occur.
+ */
+
+#ifndef _CACHE_HANDLER_H_
+#define _CACHE_HANDLER_H_
+
+#include <linux/types.h>
+#include <linux/hwmem.h>
+
+/*
+ * To not have to double all datatypes we've used hwmem datatypes. If someone
+ * want's to use cache handler but not hwmem then we'll have to define our own
+ * datatypes.
+ */
+
+struct cach_range {
+ u32 start; /* Inclusive */
+ u32 end; /* Exclusive */
+};
+
+/*
+ * Internal, do not touch!
+ */
+struct cach_buf {
+ void *vstart;
+ u32 pstart;
+ u32 size;
+
+ /* Remaining hints are active */
+ enum hwmem_alloc_flags cache_settings;
+
+ bool in_cpu_write_buf;
+ struct cach_range range_in_cpu_cache;
+ struct cach_range range_dirty_in_cpu_cache;
+ struct cach_range range_invalid_in_cpu_cache;
+};
+
+void cach_init_buf(struct cach_buf *buf,
+ enum hwmem_alloc_flags cache_settings, u32 size);
+
+void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr);
+
+void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot);
+
+void cach_set_domain(struct cach_buf *buf, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region);
+
+#endif /* _CACHE_HANDLER_H_ */
diff --git a/drivers/misc/hwmem/contig_alloc.c b/drivers/misc/hwmem/contig_alloc.c
new file mode 100644
index 00000000000..31533ed5988
--- /dev/null
+++ b/drivers/misc/hwmem/contig_alloc.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Contiguous memory allocator
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>,
+ * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <asm/sizes.h>
+
+#define MAX_INSTANCE_NAME_LENGTH 31
+
+struct alloc {
+ struct list_head list;
+
+ bool in_use;
+ phys_addr_t paddr;
+ size_t size;
+};
+
+struct instance {
+ struct list_head list;
+
+ char name[MAX_INSTANCE_NAME_LENGTH + 1];
+
+ phys_addr_t region_paddr;
+ void *region_kaddr;
+ size_t region_size;
+
+ struct list_head alloc_list;
+
+#ifdef CONFIG_DEBUG_FS
+ struct inode *debugfs_inode;
+ int cona_status_free;
+ int cona_status_used;
+ int cona_status_max_cont;
+ int cona_status_max_check;
+ int cona_status_biggest_free;
+ int cona_status_printed;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+};
+
+static LIST_HEAD(instance_list);
+
+static DEFINE_MUTEX(lock);
+
+void *cona_create(const char *name, phys_addr_t region_paddr,
+ size_t region_size);
+void *cona_alloc(void *instance, size_t size);
+void cona_free(void *instance, void *alloc);
+phys_addr_t cona_get_alloc_paddr(void *alloc);
+void *cona_get_alloc_kaddr(void *instance, void *alloc);
+size_t cona_get_alloc_size(void *alloc);
+
+static int init_alloc_list(struct instance *instance);
+static void clean_alloc_list(struct instance *instance);
+static struct alloc *find_free_alloc_bestfit(struct instance *instance,
+ size_t size);
+static struct alloc *split_allocation(struct alloc *alloc,
+ size_t new_alloc_size);
+static phys_addr_t get_alloc_offset(struct instance *instance,
+ struct alloc *alloc);
+
+void *cona_create(const char *name, phys_addr_t region_paddr,
+ size_t region_size)
+{
+ int ret;
+ struct instance *instance;
+ struct vm_struct *vm_area;
+
+ if (region_size == 0)
+ return ERR_PTR(-EINVAL);
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (instance == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(instance->name, name, MAX_INSTANCE_NAME_LENGTH + 1);
+ /* Truncate name if necessary */
+ instance->name[MAX_INSTANCE_NAME_LENGTH] = '\0';
+ instance->region_paddr = region_paddr;
+ instance->region_size = region_size;
+
+ vm_area = get_vm_area(region_size, VM_IOREMAP);
+ if (vm_area == NULL) {
+ printk(KERN_WARNING "CONA: Failed to allocate %u bytes"
+ " kernel virtual memory", region_size);
+ ret = -ENOMSG;
+ goto vmem_alloc_failed;
+ }
+ instance->region_kaddr = vm_area->addr;
+
+ INIT_LIST_HEAD(&instance->alloc_list);
+ ret = init_alloc_list(instance);
+ if (ret < 0)
+ goto init_alloc_list_failed;
+
+ mutex_lock(&lock);
+ list_add_tail(&instance->list, &instance_list);
+ mutex_unlock(&lock);
+
+ return instance;
+
+init_alloc_list_failed:
+ vm_area = remove_vm_area(instance->region_kaddr);
+ if (vm_area == NULL)
+ printk(KERN_ERR "CONA: Failed to free kernel virtual memory,"
+ " resource leak!\n");
+
+ kfree(vm_area);
+vmem_alloc_failed:
+ kfree(instance);
+
+ return ERR_PTR(ret);
+}
+
+void *cona_alloc(void *instance, size_t size)
+{
+ struct instance *instance_l = (struct instance *)instance;
+ struct alloc *alloc;
+
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&lock);
+
+ alloc = find_free_alloc_bestfit(instance_l, size);
+ if (IS_ERR(alloc))
+ goto out;
+ if (size < alloc->size) {
+ alloc = split_allocation(alloc, size);
+ if (IS_ERR(alloc))
+ goto out;
+ } else {
+ alloc->in_use = true;
+ }
+#ifdef CONFIG_DEBUG_FS
+ instance_l->cona_status_max_cont += alloc->size;
+ instance_l->cona_status_max_check =
+ max(instance_l->cona_status_max_check,
+ instance_l->cona_status_max_cont);
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+
+void cona_free(void *instance, void *alloc)
+{
+ struct instance *instance_l = (struct instance *)instance;
+ struct alloc *alloc_l = (struct alloc *)alloc;
+ struct alloc *other;
+
+ mutex_lock(&lock);
+
+ alloc_l->in_use = false;
+
+#ifdef CONFIG_DEBUG_FS
+ instance_l->cona_status_max_cont -= alloc_l->size;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+ other = list_entry(alloc_l->list.prev, struct alloc, list);
+ if ((alloc_l->list.prev != &instance_l->alloc_list) &&
+ !other->in_use) {
+ other->size += alloc_l->size;
+ list_del(&alloc_l->list);
+ kfree(alloc_l);
+ alloc_l = other;
+ }
+ other = list_entry(alloc_l->list.next, struct alloc, list);
+ if ((alloc_l->list.next != &instance_l->alloc_list) &&
+ !other->in_use) {
+ alloc_l->size += other->size;
+ list_del(&other->list);
+ kfree(other);
+ }
+
+ mutex_unlock(&lock);
+}
+
+phys_addr_t cona_get_alloc_paddr(void *alloc)
+{
+ return ((struct alloc *)alloc)->paddr;
+}
+
+void *cona_get_alloc_kaddr(void *instance, void *alloc)
+{
+ struct instance *instance_l = (struct instance *)instance;
+
+ return instance_l->region_kaddr + get_alloc_offset(instance_l,
+ (struct alloc *)alloc);
+}
+
+size_t cona_get_alloc_size(void *alloc)
+{
+ return ((struct alloc *)alloc)->size;
+}
+
+static int init_alloc_list(struct instance *instance)
+{
+ /*
+ * Hack to not get any allocs that cross a 64MiB boundary as B2R2 can't
+ * handle that.
+ */
+ int ret;
+ u32 curr_pos = instance->region_paddr;
+ u32 region_end = instance->region_paddr + instance->region_size;
+ u32 next_64mib_boundary = (curr_pos + SZ_64M) & ~(SZ_64M - 1);
+ struct alloc *alloc;
+
+ if (PAGE_SIZE >= SZ_64M) {
+ printk(KERN_WARNING "CONA: PAGE_SIZE >= 64MiB\n");
+ return -ENOMSG;
+ }
+
+ while (next_64mib_boundary < region_end) {
+ if (next_64mib_boundary - curr_pos > PAGE_SIZE) {
+ alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ alloc->paddr = curr_pos;
+ alloc->size = next_64mib_boundary - curr_pos -
+ PAGE_SIZE;
+ alloc->in_use = false;
+ list_add_tail(&alloc->list, &instance->alloc_list);
+ curr_pos = alloc->paddr + alloc->size;
+ }
+
+ alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ alloc->paddr = curr_pos;
+ alloc->size = PAGE_SIZE;
+ alloc->in_use = true;
+ list_add_tail(&alloc->list, &instance->alloc_list);
+ curr_pos = alloc->paddr + alloc->size;
+
+#ifdef CONFIG_DEBUG_FS
+ instance->cona_status_max_cont += alloc->size;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+ next_64mib_boundary += SZ_64M;
+ }
+
+ alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ alloc->paddr = curr_pos;
+ alloc->size = region_end - curr_pos;
+ alloc->in_use = false;
+ list_add_tail(&alloc->list, &instance->alloc_list);
+
+ return 0;
+
+error:
+ clean_alloc_list(instance);
+
+ return ret;
+}
+
+static void clean_alloc_list(struct instance *instance)
+{
+ while (list_empty(&instance->alloc_list) == 0) {
+ struct alloc *i = list_first_entry(&instance->alloc_list,
+ struct alloc, list);
+
+ list_del(&i->list);
+
+ kfree(i);
+ }
+}
+
+static struct alloc *find_free_alloc_bestfit(struct instance *instance,
+ size_t size)
+{
+ size_t best_diff = ~(size_t)0;
+ struct alloc *alloc = NULL, *i;
+
+ list_for_each_entry(i, &instance->alloc_list, list) {
+ size_t diff = i->size - size;
+ if (i->in_use || i->size < size)
+ continue;
+ if (diff < best_diff) {
+ alloc = i;
+ best_diff = diff;
+ }
+ }
+
+ return alloc != NULL ? alloc : ERR_PTR(-ENOMEM);
+}
+
+static struct alloc *split_allocation(struct alloc *alloc,
+ size_t new_alloc_size)
+{
+ struct alloc *new_alloc;
+
+ new_alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (new_alloc == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ new_alloc->in_use = true;
+ new_alloc->paddr = alloc->paddr;
+ new_alloc->size = new_alloc_size;
+ alloc->size -= new_alloc_size;
+ alloc->paddr += new_alloc_size;
+
+ list_add_tail(&new_alloc->list, &alloc->list);
+
+ return new_alloc;
+}
+
+static phys_addr_t get_alloc_offset(struct instance *instance,
+ struct alloc *alloc)
+{
+ return alloc->paddr - instance->region_paddr;
+}
+
+/* Debug */
+
+#ifdef CONFIG_DEBUG_FS
+
+static int print_alloc(struct instance *instance, struct alloc *alloc,
+ char **buf, size_t buf_size);
+static int print_alloc_status(struct instance *instance, char **buf,
+ size_t buf_size);
+static struct instance *get_instance_from_file(struct file *file);
+static int debugfs_allocs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos);
+
+static const struct file_operations debugfs_allocs_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_allocs_read,
+};
+
+static int print_alloc(struct instance *instance, struct alloc *alloc,
+ char **buf, size_t buf_size)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ size_t buf_size_l;
+ if (i == 0)
+ buf_size_l = 0;
+ else
+ buf_size_l = buf_size;
+
+ if (i == 1) {
+ if (alloc->in_use)
+ instance->cona_status_used += alloc->size;
+ else
+ instance->cona_status_free += alloc->size;
+ }
+
+ if (!alloc->in_use) {
+ instance->cona_status_biggest_free =
+ max((size_t)alloc->size,
+ (size_t)instance->cona_status_biggest_free);
+ }
+
+ ret = snprintf(*buf, buf_size_l, "paddr: %10x\tsize: %10u\t"
+ "in use: %1u\t used: %10u (%dMB)"
+ " \t free: %10u (%dMB)\n",
+ alloc->paddr,
+ alloc->size,
+ alloc->in_use,
+ instance->cona_status_used,
+ instance->cona_status_used/1024/1024,
+ instance->cona_status_free,
+ instance->cona_status_free/1024/1024);
+
+ if (ret < 0)
+ return -ENOMSG;
+ else if (ret + 1 > buf_size)
+ return -EINVAL;
+ }
+
+ *buf += ret;
+
+ return 0;
+}
+
+static int print_alloc_status(struct instance *instance, char **buf,
+ size_t buf_size)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ size_t buf_size_l;
+ if (i == 0)
+ buf_size_l = 0;
+ else
+ buf_size_l = buf_size;
+
+ ret = snprintf(*buf, buf_size_l, "Overall peak usage:\t%10u "
+ "(%dMB)\nCurrent max usage:\t%10u (%dMB)\n"
+ "Current biggest free:\t%10d (%dMB)\n",
+ instance->cona_status_max_check,
+ instance->cona_status_max_check/1024/1024,
+ instance->cona_status_max_cont,
+ instance->cona_status_max_cont/1024/1024,
+ instance->cona_status_biggest_free,
+ instance->cona_status_biggest_free/1024/1024);
+
+ if (ret < 0)
+ return -ENOMSG;
+ else if (ret + 1 > buf_size)
+ return -EINVAL;
+ }
+
+ *buf += ret;
+
+ return 0;
+}
+
+static struct instance *get_instance_from_file(struct file *file)
+{
+ struct instance *curr_instance;
+
+ list_for_each_entry(curr_instance, &instance_list, list) {
+ if (file->f_dentry->d_inode == curr_instance->debugfs_inode)
+ return curr_instance;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static int debugfs_allocs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /*
+ * We assume the supplied buffer and PAGE_SIZE is large enough to hold
+ * information about at least one alloc, if not no data will be
+ * returned.
+ */
+
+ int ret;
+ struct instance *instance;
+ struct alloc *curr_alloc;
+ char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *local_buf_pos = local_buf;
+ size_t available_space = min((size_t)PAGE_SIZE, count);
+ /* private_data is intialized to NULL in open which I assume is 0. */
+ void **curr_pos = &file->private_data;
+ size_t bytes_read;
+ bool readout_aborted = false;
+
+ if (local_buf == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&lock);
+ instance = get_instance_from_file(file);
+ if (IS_ERR(instance)) {
+ ret = PTR_ERR(instance);
+ goto out;
+ }
+
+ list_for_each_entry(curr_alloc, &instance->alloc_list, list) {
+ phys_addr_t alloc_offset = get_alloc_offset(instance,
+ curr_alloc);
+ if (alloc_offset < (phys_addr_t)*curr_pos)
+ continue;
+
+ ret = print_alloc(instance, curr_alloc, &local_buf_pos,
+ available_space - (size_t)(local_buf_pos -
+ local_buf));
+
+ if (ret == -EINVAL) { /* No more room */
+ readout_aborted = true;
+ break;
+ } else if (ret < 0) {
+ goto out;
+ }
+ /*
+ * There could be an overflow issue here in the unlikely case
+ * where the region is placed at the end of the address range
+ * and the last alloc is 1 byte large. Since this is debug code
+ * and that case most likely never will happen I've chosen to
+ * defer fixing it till it happens.
+ */
+ *curr_pos = (void *)(alloc_offset + 1);
+
+ /* Make sure to also print status if there were any prints */
+ instance->cona_status_printed = false;
+ }
+
+ if (!readout_aborted && !instance->cona_status_printed) {
+ ret = print_alloc_status(instance, &local_buf_pos,
+ available_space -
+ (size_t)(local_buf_pos - local_buf));
+
+ if (ret == -EINVAL) /* No more room */
+ readout_aborted = true;
+ else if (ret < 0)
+ goto out;
+ else
+ instance->cona_status_printed = true;
+ }
+
+ if (!readout_aborted) {
+ instance->cona_status_free = 0;
+ instance->cona_status_used = 0;
+ instance->cona_status_biggest_free = 0;
+ }
+
+ bytes_read = (size_t)(local_buf_pos - local_buf);
+
+ ret = copy_to_user(buf, local_buf, bytes_read);
+ if (ret < 0)
+ goto out;
+
+ ret = bytes_read;
+
+out:
+ kfree(local_buf);
+ mutex_unlock(&lock);
+
+ return ret;
+}
+
+static int __init init_debugfs(void)
+{
+ struct instance *curr_instance;
+ struct dentry *debugfs_root_dir = debugfs_create_dir("cona", NULL);
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(curr_instance, &instance_list, list) {
+ struct dentry *file_dentry;
+ char tmp_str[MAX_INSTANCE_NAME_LENGTH + 7 + 1];
+ tmp_str[0] = '\0';
+ strcat(tmp_str, curr_instance->name);
+ strcat(tmp_str, "_allocs");
+ file_dentry = debugfs_create_file(tmp_str, 0444,
+ debugfs_root_dir, 0, &debugfs_allocs_fops);
+ if (file_dentry != NULL)
+ curr_instance->debugfs_inode = file_dentry->d_inode;
+ }
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+/*
+ * Must be executed after all instances have been created, hence the
+ * late_initcall.
+ */
+late_initcall(init_debugfs);
+
+#endif /* #ifdef CONFIG_DEBUG_FS */
diff --git a/drivers/misc/hwmem/hwmem-ioctl.c b/drivers/misc/hwmem/hwmem-ioctl.c
new file mode 100644
index 00000000000..e9e50de78bd
--- /dev/null
+++ b/drivers/misc/hwmem/hwmem-ioctl.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Hardware memory driver, hwmem
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/mm_types.h>
+#include <linux/hwmem.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+
+static int hwmem_open(struct inode *inode, struct file *file);
+static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma);
+static int hwmem_release_fop(struct inode *inode, struct file *file);
+static long hwmem_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+static unsigned long hwmem_get_unmapped_area(struct file *file,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
+static const struct file_operations hwmem_fops = {
+ .open = hwmem_open,
+ .mmap = hwmem_ioctl_mmap,
+ .unlocked_ioctl = hwmem_ioctl,
+ .release = hwmem_release_fop,
+ .get_unmapped_area = hwmem_get_unmapped_area,
+};
+
+static struct miscdevice hwmem_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hwmem",
+ .fops = &hwmem_fops,
+};
+
+struct hwmem_file {
+ struct mutex lock;
+ struct idr idr; /* id -> struct hwmem_alloc*, ref counted */
+ struct hwmem_alloc *fd_alloc; /* Ref counted */
+};
+
+static s32 create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc)
+{
+ int id, ret;
+
+ while (true) {
+ if (idr_pre_get(&hwfile->idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
+
+ ret = idr_get_new_above(&hwfile->idr, alloc, 1, &id);
+ if (ret == 0)
+ break;
+ else if (ret != -EAGAIN)
+ return -ENOMEM;
+ }
+
+ /*
+ * IDR always returns the lowest free id so there is no wrapping issue
+ * because of this.
+ */
+ if (id >= (s32)1 << (31 - PAGE_SHIFT)) {
+ dev_err(hwmem_device.this_device, "Out of IDs!\n");
+ idr_remove(&hwfile->idr, id);
+ return -ENOMSG;
+ }
+
+ return (s32)id << PAGE_SHIFT;
+}
+
+static void remove_id(struct hwmem_file *hwfile, s32 id)
+{
+ idr_remove(&hwfile->idr, id >> PAGE_SHIFT);
+}
+
+static struct hwmem_alloc *resolve_id(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = id ? idr_find(&hwfile->idr, id >> PAGE_SHIFT) :
+ hwfile->fd_alloc;
+ if (alloc == NULL)
+ alloc = ERR_PTR(-EINVAL);
+
+ return alloc;
+}
+
+static s32 alloc(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
+{
+ s32 ret = 0;
+ struct hwmem_alloc *alloc;
+
+ alloc = hwmem_alloc(req->size, req->flags, req->default_access,
+ req->mem_type);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ ret = create_id(hwfile, alloc);
+ if (ret < 0)
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int alloc_fd(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ if (hwfile->fd_alloc)
+ return -EINVAL;
+
+ alloc = hwmem_alloc(req->size, req->flags, req->default_access,
+ req->mem_type);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwfile->fd_alloc = alloc;
+
+ return 0;
+}
+
+static int release(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ if (id == 0)
+ return -EINVAL;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ remove_id(hwfile, id);
+ hwmem_release(alloc);
+
+ return 0;
+}
+
+static int set_cpu_domain(struct hwmem_file *hwfile,
+ struct hwmem_set_domain_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_CPU,
+ (struct hwmem_region *)&req->region);
+}
+
+static int set_sync_domain(struct hwmem_file *hwfile,
+ struct hwmem_set_domain_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_SYNC,
+ (struct hwmem_region *)&req->region);
+}
+
+static int pin(struct hwmem_file *hwfile, struct hwmem_pin_request *req)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+ enum hwmem_mem_type mem_type;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_get_info(alloc, NULL, &mem_type, NULL);
+ if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS)
+ return -EINVAL;
+
+ ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length);
+ if (ret < 0)
+ return ret;
+
+ req->phys_addr = mem_chunk.paddr;
+
+ return 0;
+}
+
+static int unpin(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_unpin(alloc);
+
+ return 0;
+}
+
+static int set_access(struct hwmem_file *hwfile,
+ struct hwmem_set_access_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_access(alloc, req->access, req->pid);
+}
+
+static int get_info(struct hwmem_file *hwfile,
+ struct hwmem_get_info_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_get_info(alloc, &req->size, &req->mem_type, &req->access);
+
+ return 0;
+}
+
+static s32 export(struct hwmem_file *hwfile, s32 id)
+{
+ s32 ret;
+ struct hwmem_alloc *alloc;
+ enum hwmem_access access;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /*
+ * The user could be about to send the buffer to a driver but
+ * there is a chance the current thread group don't have import rights
+ * if it gained access to the buffer via a inter-process fd transfer
+ * (fork, Android binder), if this is the case the driver will not be
+ * able to resolve the buffer name. To avoid this situation we give the
+ * current thread group import rights. This will not breach the
+ * security as the process already has access to the buffer (otherwise
+ * it would not be able to get here).
+ */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+
+ ret = hwmem_set_access(alloc, (access | HWMEM_ACCESS_IMPORT),
+ task_tgid_nr(current));
+ if (ret < 0)
+ return ret;
+
+ return hwmem_get_name(alloc);
+}
+
+static s32 import(struct hwmem_file *hwfile, s32 name)
+{
+ s32 ret = 0;
+ struct hwmem_alloc *alloc;
+ enum hwmem_access access;
+
+ alloc = hwmem_resolve_by_name(name);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /* Check access permissions for process */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+ if (!(access & HWMEM_ACCESS_IMPORT)) {
+ ret = -EPERM;
+ goto error;
+ }
+
+ ret = create_id(hwfile, alloc);
+ if (ret < 0)
+ goto error;
+
+ return ret;
+
+error:
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int import_fd(struct hwmem_file *hwfile, s32 name)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+ enum hwmem_access access;
+
+ if (hwfile->fd_alloc)
+ return -EINVAL;
+
+ alloc = hwmem_resolve_by_name(name);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /* Check access permissions for process */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+ if (!(access & HWMEM_ACCESS_IMPORT)) {
+ ret = -EPERM;
+ goto error;
+ }
+
+ hwfile->fd_alloc = alloc;
+
+ return 0;
+
+error:
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int hwmem_open(struct inode *inode, struct file *file)
+{
+ struct hwmem_file *hwfile;
+
+ hwfile = kzalloc(sizeof(struct hwmem_file), GFP_KERNEL);
+ if (hwfile == NULL)
+ return -ENOMEM;
+
+ idr_init(&hwfile->idr);
+ mutex_init(&hwfile->lock);
+ file->private_data = hwfile;
+
+ return 0;
+}
+
+static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int ret;
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+ struct hwmem_alloc *alloc;
+
+ mutex_lock(&hwfile->lock);
+
+ alloc = resolve_id(hwfile, (s32)vma->vm_pgoff << PAGE_SHIFT);
+ if (IS_ERR(alloc)) {
+ ret = PTR_ERR(alloc);
+ goto out;
+ }
+
+ ret = hwmem_mmap(alloc, vma);
+
+out:
+ mutex_unlock(&hwfile->lock);
+
+ return ret;
+}
+
+static int hwmem_release_idr_for_each_wrapper(int id, void *ptr, void *data)
+{
+ hwmem_release((struct hwmem_alloc *)ptr);
+
+ return 0;
+}
+
+static int hwmem_release_fop(struct inode *inode, struct file *file)
+{
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+
+ idr_for_each(&hwfile->idr, hwmem_release_idr_for_each_wrapper, NULL);
+ idr_remove_all(&hwfile->idr);
+ idr_destroy(&hwfile->idr);
+
+ if (hwfile->fd_alloc)
+ hwmem_release(hwfile->fd_alloc);
+
+ mutex_destroy(&hwfile->lock);
+
+ kfree(hwfile);
+
+ return 0;
+}
+
+static long hwmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOSYS;
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+
+ mutex_lock(&hwfile->lock);
+
+ switch (cmd) {
+ case HWMEM_ALLOC_IOC:
+ {
+ struct hwmem_alloc_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_alloc_request)))
+ ret = -EFAULT;
+ else
+ ret = alloc(hwfile, &req);
+ }
+ break;
+ case HWMEM_ALLOC_FD_IOC:
+ {
+ struct hwmem_alloc_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_alloc_request)))
+ ret = -EFAULT;
+ else
+ ret = alloc_fd(hwfile, &req);
+ }
+ break;
+ case HWMEM_RELEASE_IOC:
+ ret = release(hwfile, (s32)arg);
+ break;
+ case HWMEM_SET_CPU_DOMAIN_IOC:
+ {
+ struct hwmem_set_domain_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_domain_request)))
+ ret = -EFAULT;
+ else
+ ret = set_cpu_domain(hwfile, &req);
+ }
+ break;
+ case HWMEM_SET_SYNC_DOMAIN_IOC:
+ {
+ struct hwmem_set_domain_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_domain_request)))
+ ret = -EFAULT;
+ else
+ ret = set_sync_domain(hwfile, &req);
+ }
+ break;
+ case HWMEM_PIN_IOC:
+ {
+ struct hwmem_pin_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_pin_request)))
+ ret = -EFAULT;
+ else
+ ret = pin(hwfile, &req);
+ if (ret == 0 && copy_to_user((void __user *)arg, &req,
+ sizeof(struct hwmem_pin_request)))
+ ret = -EFAULT;
+ }
+ break;
+ case HWMEM_UNPIN_IOC:
+ ret = unpin(hwfile, (s32)arg);
+ break;
+ case HWMEM_SET_ACCESS_IOC:
+ {
+ struct hwmem_set_access_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_access_request)))
+ ret = -EFAULT;
+ else
+ ret = set_access(hwfile, &req);
+ }
+ break;
+ case HWMEM_GET_INFO_IOC:
+ {
+ struct hwmem_get_info_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_get_info_request)))
+ ret = -EFAULT;
+ else
+ ret = get_info(hwfile, &req);
+ if (ret == 0 && copy_to_user((void __user *)arg, &req,
+ sizeof(struct hwmem_get_info_request)))
+ ret = -EFAULT;
+ }
+ break;
+ case HWMEM_EXPORT_IOC:
+ ret = export(hwfile, (s32)arg);
+ break;
+ case HWMEM_IMPORT_IOC:
+ ret = import(hwfile, (s32)arg);
+ break;
+ case HWMEM_IMPORT_FD_IOC:
+ ret = import_fd(hwfile, (s32)arg);
+ break;
+ }
+
+ mutex_unlock(&hwfile->lock);
+
+ return ret;
+}
+
+static unsigned long hwmem_get_unmapped_area(struct file *file,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ /*
+ * pgoff will not be valid as it contains a buffer id (right shifted
+ * PAGE_SHIFT bits). To not confuse get_unmapped_area we'll not pass
+ * on file or pgoff.
+ */
+ return current->mm->get_unmapped_area(NULL, addr, len, 0, flags);
+}
+
+int __init hwmem_ioctl_init(void)
+{
+ if (PAGE_SHIFT < 1 || PAGE_SHIFT > 30 || sizeof(size_t) != 4 ||
+ sizeof(int) > 4 || sizeof(enum hwmem_alloc_flags) != 4 ||
+ sizeof(enum hwmem_access) != 4 ||
+ sizeof(enum hwmem_mem_type) != 4) {
+ dev_err(hwmem_device.this_device, "PAGE_SHIFT < 1 || PAGE_SHIFT"
+ " > 30 || sizeof(size_t) != 4 || sizeof(int) > 4 ||"
+ " sizeof(enum hwmem_alloc_flags) != 4 || sizeof(enum"
+ " hwmem_access) != 4 || sizeof(enum hwmem_mem_type)"
+ " != 4\n");
+ return -ENOMSG;
+ }
+ if (PAGE_SHIFT > 15)
+ dev_warn(hwmem_device.this_device, "Due to the page size only"
+ " %u id:s per file instance are available\n",
+ ((u32)1 << (31 - PAGE_SHIFT)) - 1);
+
+ return misc_register(&hwmem_device);
+}
+
+void __exit hwmem_ioctl_exit(void)
+{
+ misc_deregister(&hwmem_device);
+}
diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c
new file mode 100644
index 00000000000..b91d99bc2be
--- /dev/null
+++ b/drivers/misc/hwmem/hwmem-main.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Hardware memory driver, hwmem
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>,
+ * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pid.h>
+#include <linux/list.h>
+#include <linux/hwmem.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/kallsyms.h>
+#include <linux/vmalloc.h>
+#include "cache_handler.h"
+
+#define S32_MAX 2147483647
+
+struct hwmem_alloc_threadg_info {
+ struct list_head list;
+
+ struct pid *threadg_pid; /* Ref counted */
+
+ enum hwmem_access access;
+};
+
+struct hwmem_alloc {
+ struct list_head list;
+
+ atomic_t ref_cnt;
+
+ enum hwmem_alloc_flags flags;
+ struct hwmem_mem_type_struct *mem_type;
+
+ void *allocator_hndl;
+ phys_addr_t paddr;
+ void *kaddr;
+ size_t size;
+ s32 name;
+
+ /* Access control */
+ enum hwmem_access default_access;
+ struct list_head threadg_info_list;
+
+ /* Cache handling */
+ struct cach_buf cach_buf;
+
+#ifdef CONFIG_DEBUG_FS
+ /* Debug */
+ void *creator;
+ pid_t creator_tgid;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+};
+
+static struct platform_device *hwdev;
+
+static LIST_HEAD(alloc_list);
+static DEFINE_IDR(global_idr);
+static DEFINE_MUTEX(lock);
+
+static void vm_open(struct vm_area_struct *vma);
+static void vm_close(struct vm_area_struct *vma);
+static struct vm_operations_struct vm_ops = {
+ .open = vm_open,
+ .close = vm_close,
+};
+
+static void kunmap_alloc(struct hwmem_alloc *alloc);
+
+/* Helpers */
+
+static void destroy_alloc_threadg_info(
+ struct hwmem_alloc_threadg_info *info)
+{
+ if (info->threadg_pid)
+ put_pid(info->threadg_pid);
+
+ kfree(info);
+}
+
+static void clean_alloc_threadg_info_list(struct hwmem_alloc *alloc)
+{
+ struct hwmem_alloc_threadg_info *info;
+ struct hwmem_alloc_threadg_info *tmp;
+
+ list_for_each_entry_safe(info, tmp, &(alloc->threadg_info_list),
+ list) {
+ list_del(&info->list);
+ destroy_alloc_threadg_info(info);
+ }
+}
+
+static enum hwmem_access get_access(struct hwmem_alloc *alloc)
+{
+ struct hwmem_alloc_threadg_info *info;
+ struct pid *my_pid;
+ bool found = false;
+
+ my_pid = find_get_pid(task_tgid_nr(current));
+ if (!my_pid)
+ return 0;
+
+ list_for_each_entry(info, &(alloc->threadg_info_list), list) {
+ if (info->threadg_pid == my_pid) {
+ found = true;
+ break;
+ }
+ }
+
+ put_pid(my_pid);
+
+ if (found)
+ return info->access;
+ else
+ return alloc->default_access;
+}
+
+static void clear_alloc_mem(struct hwmem_alloc *alloc)
+{
+ cach_set_domain(&alloc->cach_buf, HWMEM_ACCESS_WRITE,
+ HWMEM_DOMAIN_CPU, NULL);
+
+ memset(alloc->kaddr, 0, alloc->size);
+}
+
+static void destroy_alloc(struct hwmem_alloc *alloc)
+{
+ list_del(&alloc->list);
+
+ if (alloc->name != 0) {
+ idr_remove(&global_idr, alloc->name);
+ alloc->name = 0;
+ }
+
+ clean_alloc_threadg_info_list(alloc);
+
+ kunmap_alloc(alloc);
+
+ if (!IS_ERR_OR_NULL(alloc->allocator_hndl))
+ alloc->mem_type->allocator_api.free(
+ alloc->mem_type->allocator_instance,
+ alloc->allocator_hndl);
+
+ kfree(alloc);
+}
+
+static int kmap_alloc(struct hwmem_alloc *alloc)
+{
+ int ret;
+ pgprot_t pgprot;
+ void *alloc_kaddr;
+
+ alloc_kaddr = alloc->mem_type->allocator_api.get_alloc_kaddr(
+ alloc->mem_type->allocator_instance, alloc->allocator_hndl);
+ if (IS_ERR(alloc_kaddr))
+ return PTR_ERR(alloc_kaddr);
+
+ pgprot = PAGE_KERNEL;
+ cach_set_pgprot_cache_options(&alloc->cach_buf, &pgprot);
+
+ ret = ioremap_page_range((unsigned long)alloc_kaddr,
+ (unsigned long)alloc_kaddr + alloc->size, alloc->paddr, pgprot);
+ if (ret < 0) {
+ dev_warn(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr,
+ alloc->paddr + alloc->size);
+ return ret;
+ }
+
+ alloc->kaddr = alloc_kaddr;
+
+ return 0;
+}
+
+static void kunmap_alloc(struct hwmem_alloc *alloc)
+{
+ if (alloc->kaddr == NULL)
+ return;
+
+ unmap_kernel_range((unsigned long)alloc->kaddr, alloc->size);
+
+ alloc->kaddr = NULL;
+}
+
+static struct hwmem_mem_type_struct *resolve_mem_type(
+ enum hwmem_mem_type mem_type)
+{
+ unsigned int i;
+ for (i = 0; i < hwmem_num_mem_types; i++) {
+ if (hwmem_mem_types[i].id == mem_type)
+ return &hwmem_mem_types[i];
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+/* HWMEM API */
+
+struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags,
+ enum hwmem_access def_access, enum hwmem_mem_type mem_type)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+
+ if (hwdev == NULL) {
+ printk(KERN_ERR "HWMEM: Badly configured\n");
+ return ERR_PTR(-ENOMSG);
+ }
+
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&lock);
+
+ size = PAGE_ALIGN(size);
+
+ alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto alloc_alloc_failed;
+ }
+
+ INIT_LIST_HEAD(&alloc->list);
+ atomic_inc(&alloc->ref_cnt);
+ alloc->flags = flags;
+ alloc->default_access = def_access;
+ INIT_LIST_HEAD(&alloc->threadg_info_list);
+#ifdef CONFIG_DEBUG_FS
+ alloc->creator = __builtin_return_address(0);
+ alloc->creator_tgid = task_tgid_nr(current);
+#endif
+ alloc->mem_type = resolve_mem_type(mem_type);
+ if (IS_ERR(alloc->mem_type)) {
+ ret = PTR_ERR(alloc->mem_type);
+ goto resolve_mem_type_failed;
+ }
+
+ alloc->allocator_hndl = alloc->mem_type->allocator_api.alloc(
+ alloc->mem_type->allocator_instance, size);
+ if (IS_ERR(alloc->allocator_hndl)) {
+ ret = PTR_ERR(alloc->allocator_hndl);
+ goto allocator_failed;
+ }
+
+ alloc->paddr = alloc->mem_type->allocator_api.get_alloc_paddr(
+ alloc->allocator_hndl);
+ alloc->size = alloc->mem_type->allocator_api.get_alloc_size(
+ alloc->allocator_hndl);
+
+ cach_init_buf(&alloc->cach_buf, alloc->flags, alloc->size);
+ ret = kmap_alloc(alloc);
+ if (ret < 0)
+ goto kmap_alloc_failed;
+ cach_set_buf_addrs(&alloc->cach_buf, alloc->kaddr, alloc->paddr);
+
+ list_add_tail(&alloc->list, &alloc_list);
+
+ clear_alloc_mem(alloc);
+
+ goto out;
+
+kmap_alloc_failed:
+allocator_failed:
+resolve_mem_type_failed:
+ destroy_alloc(alloc);
+alloc_alloc_failed:
+ alloc = ERR_PTR(ret);
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+EXPORT_SYMBOL(hwmem_alloc);
+
+void hwmem_release(struct hwmem_alloc *alloc)
+{
+ mutex_lock(&lock);
+
+ if (atomic_dec_and_test(&alloc->ref_cnt))
+ destroy_alloc(alloc);
+
+ mutex_unlock(&lock);
+}
+EXPORT_SYMBOL(hwmem_release);
+
+int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region)
+{
+ mutex_lock(&lock);
+
+ cach_set_domain(&alloc->cach_buf, access, domain, region);
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hwmem_set_domain);
+
+int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks,
+ u32 *mem_chunks_length)
+{
+ if (*mem_chunks_length < 1) {
+ *mem_chunks_length = 1;
+ return -ENOSPC;
+ }
+
+ mutex_lock(&lock);
+
+ mem_chunks[0].paddr = alloc->paddr;
+ mem_chunks[0].size = alloc->size;
+ *mem_chunks_length = 1;
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hwmem_pin);
+
+void hwmem_unpin(struct hwmem_alloc *alloc)
+{
+}
+EXPORT_SYMBOL(hwmem_unpin);
+
+static void vm_open(struct vm_area_struct *vma)
+{
+ atomic_inc(&((struct hwmem_alloc *)vma->vm_private_data)->ref_cnt);
+}
+
+static void vm_close(struct vm_area_struct *vma)
+{
+ hwmem_release((struct hwmem_alloc *)vma->vm_private_data);
+}
+
+int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma)
+{
+ int ret = 0;
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
+ enum hwmem_access access;
+ mutex_lock(&lock);
+
+ access = get_access(alloc);
+
+ /* Check permissions */
+ if ((!(access & HWMEM_ACCESS_WRITE) &&
+ (vma->vm_flags & VM_WRITE)) ||
+ (!(access & HWMEM_ACCESS_READ) &&
+ (vma->vm_flags & VM_READ))) {
+ ret = -EPERM;
+ goto illegal_access;
+ }
+
+ if (vma_size > alloc->size) {
+ ret = -EINVAL;
+ goto illegal_size;
+ }
+
+ /*
+ * We don't want Linux to do anything (merging etc) with our VMAs as
+ * the offset is not necessarily valid
+ */
+ vma->vm_flags |= VM_SPECIAL;
+ cach_set_pgprot_cache_options(&alloc->cach_buf, &vma->vm_page_prot);
+ vma->vm_private_data = (void *)alloc;
+ atomic_inc(&alloc->ref_cnt);
+ vma->vm_ops = &vm_ops;
+
+ ret = remap_pfn_range(vma, vma->vm_start, alloc->paddr >> PAGE_SHIFT,
+ min(vma_size, (unsigned long)alloc->size), vma->vm_page_prot);
+ if (ret < 0)
+ goto map_failed;
+
+ goto out;
+
+map_failed:
+ atomic_dec(&alloc->ref_cnt);
+illegal_size:
+illegal_access:
+
+out:
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_mmap);
+
+void *hwmem_kmap(struct hwmem_alloc *alloc)
+{
+ void *ret;
+
+ mutex_lock(&lock);
+
+ ret = alloc->kaddr;
+
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_kmap);
+
+void hwmem_kunmap(struct hwmem_alloc *alloc)
+{
+}
+EXPORT_SYMBOL(hwmem_kunmap);
+
+int hwmem_set_access(struct hwmem_alloc *alloc,
+ enum hwmem_access access, pid_t pid_nr)
+{
+ int ret;
+ struct hwmem_alloc_threadg_info *info;
+ struct pid *pid;
+ bool found = false;
+
+ pid = find_get_pid(pid_nr);
+ if (!pid) {
+ ret = -EINVAL;
+ goto error_get_pid;
+ }
+
+ list_for_each_entry(info, &(alloc->threadg_info_list), list) {
+ if (info->threadg_pid == pid) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto error_alloc_info;
+ }
+
+ info->threadg_pid = pid;
+ info->access = access;
+
+ list_add_tail(&(info->list), &(alloc->threadg_info_list));
+ } else {
+ info->access = access;
+ }
+
+ return 0;
+
+error_alloc_info:
+ put_pid(pid);
+error_get_pid:
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_set_access);
+
+void hwmem_get_info(struct hwmem_alloc *alloc, u32 *size,
+ enum hwmem_mem_type *mem_type, enum hwmem_access *access)
+{
+ mutex_lock(&lock);
+
+ if (size != NULL)
+ *size = alloc->size;
+ if (mem_type != NULL)
+ *mem_type = alloc->mem_type->id;
+ if (access != NULL)
+ *access = get_access(alloc);
+
+ mutex_unlock(&lock);
+}
+EXPORT_SYMBOL(hwmem_get_info);
+
+s32 hwmem_get_name(struct hwmem_alloc *alloc)
+{
+ int ret = 0, name;
+
+ mutex_lock(&lock);
+
+ if (alloc->name != 0) {
+ ret = alloc->name;
+ goto out;
+ }
+
+ while (true) {
+ if (idr_pre_get(&global_idr, GFP_KERNEL) == 0) {
+ ret = -ENOMEM;
+ goto pre_get_id_failed;
+ }
+
+ ret = idr_get_new_above(&global_idr, alloc, 1, &name);
+ if (ret == 0)
+ break;
+ else if (ret != -EAGAIN)
+ goto get_id_failed;
+ }
+
+ if (name > S32_MAX) {
+ ret = -ENOMSG;
+ goto overflow;
+ }
+
+ alloc->name = name;
+
+ ret = name;
+ goto out;
+
+overflow:
+ idr_remove(&global_idr, name);
+get_id_failed:
+pre_get_id_failed:
+
+out:
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_get_name);
+
+struct hwmem_alloc *hwmem_resolve_by_name(s32 name)
+{
+ struct hwmem_alloc *alloc;
+
+ mutex_lock(&lock);
+
+ alloc = idr_find(&global_idr, name);
+ if (alloc == NULL) {
+ alloc = ERR_PTR(-EINVAL);
+ goto find_failed;
+ }
+ atomic_inc(&alloc->ref_cnt);
+
+ goto out;
+
+find_failed:
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+EXPORT_SYMBOL(hwmem_resolve_by_name);
+
+/* Debug */
+
+#ifdef CONFIG_DEBUG_FS
+
+static int debugfs_allocs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos);
+
+static const struct file_operations debugfs_allocs_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_allocs_read,
+};
+
+static int print_alloc(struct hwmem_alloc *alloc, char **buf, size_t buf_size)
+{
+ int ret;
+ char creator[KSYM_SYMBOL_LEN];
+ int i;
+
+ if (sprint_symbol(creator, (unsigned long)alloc->creator) < 0)
+ creator[0] = '\0';
+
+ for (i = 0; i < 2; i++) {
+ size_t buf_size_l;
+ if (i == 0)
+ buf_size_l = 0;
+ else
+ buf_size_l = buf_size;
+
+ ret = snprintf(*buf, buf_size_l,
+ "%#x\n"
+ "\tSize: %u\n"
+ "\tMemory type: %u\n"
+ "\tName: %#x\n"
+ "\tReference count: %i\n"
+ "\tAllocation flags: %#x\n"
+ "\t$ settings: %#x\n"
+ "\tDefault access: %#x\n"
+ "\tPhysical address: %#x\n"
+ "\tKernel virtual address: %#x\n"
+ "\tCreator: %s\n"
+ "\tCreator thread group id: %u\n",
+ (unsigned int)alloc, alloc->size, alloc->mem_type->id,
+ alloc->name, atomic_read(&alloc->ref_cnt),
+ alloc->flags, alloc->cach_buf.cache_settings,
+ alloc->default_access, alloc->paddr,
+ (unsigned int)alloc->kaddr, creator,
+ alloc->creator_tgid);
+ if (ret < 0)
+ return -ENOMSG;
+ else if (ret + 1 > buf_size)
+ return -EINVAL;
+ }
+
+ *buf += ret;
+
+ return 0;
+}
+
+static int debugfs_allocs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /*
+ * We assume the supplied buffer and PAGE_SIZE is large enough to hold
+ * information about at least one alloc, if not no data will be
+ * returned.
+ */
+
+ int ret;
+ size_t i = 0;
+ struct hwmem_alloc *curr_alloc;
+ char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *local_buf_pos = local_buf;
+ size_t available_space = min((size_t)PAGE_SIZE, count);
+ /* private_data is intialized to NULL in open which I assume is 0. */
+ void **curr_pos = &file->private_data;
+ size_t bytes_read;
+
+ if (local_buf == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(curr_alloc, &alloc_list, list) {
+ if (i++ < (size_t)*curr_pos)
+ continue;
+
+ ret = print_alloc(curr_alloc, &local_buf_pos, available_space -
+ (size_t)(local_buf_pos - local_buf));
+ if (ret == -EINVAL) /* No more room */
+ break;
+ else if (ret < 0)
+ goto out;
+
+ *curr_pos = (void *)i;
+ }
+
+ bytes_read = (size_t)(local_buf_pos - local_buf);
+
+ ret = copy_to_user(buf, local_buf, bytes_read);
+ if (ret < 0)
+ goto out;
+
+ ret = bytes_read;
+
+out:
+ kfree(local_buf);
+
+ mutex_unlock(&lock);
+
+ return ret;
+}
+
+static void init_debugfs(void)
+{
+ /* Hwmem is never unloaded so dropping the dentrys is ok. */
+ struct dentry *debugfs_root_dir = debugfs_create_dir("hwmem", NULL);
+ (void)debugfs_create_file("allocs", 0444, debugfs_root_dir, 0,
+ &debugfs_allocs_fops);
+}
+
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+/* Module */
+
+extern int hwmem_ioctl_init(void);
+
+static int __devinit hwmem_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (hwdev) {
+ dev_err(&pdev->dev, "Probed multiple times\n");
+ return -EINVAL;
+ }
+
+ hwdev = pdev;
+
+ /*
+ * No need to flush the caches here. If we can keep track of the cache
+ * content then none of our memory will be in the caches, if we can't
+ * keep track of the cache content we always assume all our memory is
+ * in the caches.
+ */
+
+ ret = hwmem_ioctl_init();
+ if (ret < 0)
+ dev_warn(&pdev->dev, "Failed to start hwmem-ioctl, continuing"
+ " anyway\n");
+
+#ifdef CONFIG_DEBUG_FS
+ init_debugfs();
+#endif
+
+ dev_info(&pdev->dev, "Probed OK\n");
+
+ return 0;
+}
+
+static struct platform_driver hwmem_driver = {
+ .probe = hwmem_probe,
+ .driver = {
+ .name = "hwmem",
+ },
+};
+
+static int __init hwmem_init(void)
+{
+ return platform_driver_register(&hwmem_driver);
+}
+subsys_initcall(hwmem_init);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hardware memory driver");
+
diff --git a/drivers/misc/stm.c b/drivers/misc/stm.c
new file mode 100644
index 00000000000..a05b2d14573
--- /dev/null
+++ b/drivers/misc/stm.c
@@ -0,0 +1,797 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * Philippe Langlais <philippe.Langlais@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <trace/stm.h>
+
+/* STM Registers */
+#define STM_CR (stm.virtbase)
+#define STM_MMC (stm.virtbase + 0x008)
+#define STM_TER (stm.virtbase + 0x010)
+#define STMPERIPHID0 (stm.virtbase + 0xFC0)
+#define STMPERIPHID1 (stm.virtbase + 0xFC8)
+#define STMPERIPHID2 (stm.virtbase + 0xFD0)
+#define STMPERIPHID3 (stm.virtbase + 0xFD8)
+#define STMPCELLID0 (stm.virtbase + 0xFE0)
+#define STMPCELLID1 (stm.virtbase + 0xFE8)
+#define STMPCELLID2 (stm.virtbase + 0xFF0)
+#define STMPCELLID3 (stm.virtbase + 0xFF8)
+
+#define STM_CLOCK_SHIFT 6
+#define STM_CLOCK_MASK 0x1C0
+
+/* Hardware mode for all sources */
+#define STM_MMC_DEFAULT 0xFFFFFFFF
+
+/* Max number of channels (multiple of 256) */
+#define STM_NUMBER_OF_CHANNEL CONFIG_STM_NUMBER_OF_CHANNEL
+
+/* # dynamically allocated channel with stm_trace_buffer */
+#define NB_KERNEL_DYNAMIC_CHANNEL 128
+
+static struct stm_device {
+ const struct stm_platform_data *pdata;
+ void __iomem *virtbase;
+ /* Used to register the allocated channels */
+ DECLARE_BITMAP(ch_bitmap, STM_NUMBER_OF_CHANNEL);
+} stm;
+
+volatile struct stm_channel __iomem *stm_channels;
+
+static struct cdev cdev;
+static struct class *stm_class;
+static int stm_major;
+
+static DEFINE_SPINLOCK(lock);
+
+/* Middle value for clock divisor */
+static enum clock_div stm_clockdiv = STM_CLOCK_DIV8;
+
+/* Default value for STM output connection */
+static enum stm_connection_type stm_connection = STM_DEFAULT_CONNECTION;
+
+#define STM_BUFSIZE 256
+struct channel_data {
+ DECLARE_BITMAP(bitmap, STM_NUMBER_OF_CHANNEL);
+ int numero;
+ spinlock_t lock;
+ u8 data_buffer[STM_BUFSIZE];
+};
+
+static u64 stm_printk_buf[1024/sizeof(u64)];
+static arch_spinlock_t stm_buf_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+
+int stm_alloc_channel(int offset)
+{
+ int channel;
+
+ /* Look for a free channel from offset */
+ do {
+ channel = find_next_zero_bit(stm.ch_bitmap,
+ STM_NUMBER_OF_CHANNEL, offset);
+ } while ((channel < STM_NUMBER_OF_CHANNEL)
+ && test_and_set_bit(channel, stm.ch_bitmap));
+ return channel;
+}
+EXPORT_SYMBOL(stm_alloc_channel);
+
+void stm_free_channel(int channel)
+{
+ clear_bit(channel, stm.ch_bitmap);
+}
+EXPORT_SYMBOL(stm_free_channel);
+
+static int stm_get_channel(struct channel_data *ch_data, int __user *arg)
+{
+ int channel, err;
+
+ channel = stm_alloc_channel(0);
+ if (channel < STM_NUMBER_OF_CHANNEL) {
+ /* One free found ! */
+ err = put_user(channel, arg);
+ if (err)
+ stm_free_channel(channel);
+ else
+ /* Register it in the context of the file */
+ set_bit(channel, ch_data->bitmap);
+ } else
+ err = -ENOMEM;
+ return err;
+}
+
+static int stm_release_channel(struct channel_data *ch_data, int channel)
+{
+ if ((channel < 0) || (channel >= STM_NUMBER_OF_CHANNEL))
+ return -EINVAL;
+ stm_free_channel(channel);
+ clear_bit(channel, ch_data->bitmap);
+ return 0;
+}
+
+/*
+ * Trace a buffer on a given channel
+ * with auto time stamping on last byte(s) only
+ */
+int stm_trace_buffer_onchannel(int channel,
+ const void *data, size_t length)
+{
+ int i, mod64;
+ volatile struct stm_channel __iomem *pch;
+
+ if (channel >= STM_NUMBER_OF_CHANNEL || !stm_channels)
+ return 0;
+
+ pch = &stm_channels[channel];
+
+ /* Align data pointer to u64 & time stamp last byte(s) */
+ mod64 = (int)data & 7;
+ i = length - 8 + mod64;
+ switch (mod64) {
+ case 0:
+ if (i)
+ pch->no_stamp64 = *(u64 *)data;
+ else {
+ pch->stamp64 = *(u64 *)data;
+ return length;
+ }
+ data += 8;
+ break;
+ case 1:
+ pch->no_stamp8 = *(u8 *)data;
+ pch->no_stamp16 = *(u16 *)(data+1);
+ if (i)
+ pch->no_stamp32 = *(u32 *)(data+3);
+ else {
+ pch->stamp32 = *(u32 *)(data+3);
+ return length;
+ }
+ data += 7;
+ break;
+ case 2:
+ pch->no_stamp16 = *(u16 *)data;
+ if (i)
+ pch->no_stamp32 = *(u32 *)(data+2);
+ else {
+ pch->stamp32 = *(u32 *)(data+2);
+ return length;
+ }
+ data += 6;
+ break;
+ case 3:
+ pch->no_stamp8 = *(u8 *)data;
+ if (i)
+ pch->no_stamp32 = *(u32 *)(data+1);
+ else {
+ pch->stamp32 = *(u32 *)(data+1);
+ return length;
+ }
+ data += 5;
+ break;
+ case 4:
+ if (i)
+ pch->no_stamp32 = *(u32 *)data;
+ else {
+ pch->stamp32 = *(u32 *)data;
+ return length;
+ }
+ data += 4;
+ break;
+ case 5:
+ pch->no_stamp8 = *(u8 *)data;
+ if (i)
+ pch->no_stamp16 = *(u16 *)(data+1);
+ else {
+ pch->stamp16 = *(u16 *)(data+1);
+ return length;
+ }
+ data += 3;
+ break;
+ case 6:
+ if (i)
+ pch->no_stamp16 = *(u16 *)data;
+ else {
+ pch->stamp16 = *(u16 *)data;
+ return length;
+ }
+ data += 2;
+ break;
+ case 7:
+ if (i)
+ pch->no_stamp8 = *(u8 *)data;
+ else {
+ pch->stamp8 = *(u8 *)data;
+ return length;
+ }
+ data++;
+ break;
+ }
+ for (;;) {
+ if (i > 8) {
+ pch->no_stamp64 = *(u64 *)data;
+ data += 8;
+ i -= 8;
+ } else if (i == 8) {
+ pch->stamp64 = *(u64 *)data;
+ break;
+ } else if (i > 4) {
+ pch->no_stamp32 = *(u32 *)data;
+ data += 4;
+ i -= 4;
+ } else if (i == 4) {
+ pch->stamp32 = *(u32 *)data;
+ break;
+ } else if (i > 2) {
+ pch->no_stamp16 = *(u16 *)data;
+ data += 2;
+ i -= 2;
+ } else if (i == 2) {
+ pch->stamp16 = *(u16 *)data;
+ break;
+ } else {
+ pch->stamp8 = *(u8 *)data;
+ break;
+ }
+ }
+ return length;
+}
+EXPORT_SYMBOL(stm_trace_buffer_onchannel);
+
+static int stm_open(struct inode *inode, struct file *file)
+{
+ struct channel_data *channel_data;
+
+ channel_data = kzalloc(sizeof(struct channel_data), GFP_KERNEL);
+ if (channel_data == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&channel_data->lock);
+ channel_data->numero = -1; /* Channel not yet allocated */
+ file->private_data = channel_data;
+
+ return 0;
+}
+
+static int stm_release(struct inode *inode, struct file *file)
+{
+ struct channel_data *channel;
+
+ channel = (struct channel_data *)file->private_data;
+
+ /* Free allocated channel if necessary */
+ if (channel->numero != -1)
+ stm_free_channel(channel->numero);
+
+ bitmap_andnot(stm.ch_bitmap, stm.ch_bitmap,
+ channel->bitmap, STM_NUMBER_OF_CHANNEL);
+
+ kfree(channel);
+ return 0;
+}
+
+static ssize_t stm_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off)
+{
+ struct channel_data *channel = file->private_data;
+
+ /* Alloc channel at first write */
+ if (channel->numero == -1) {
+ channel->numero = stm_alloc_channel(0);
+ if (channel->numero > STM_NUMBER_OF_CHANNEL)
+ return -ENOMEM;
+ }
+
+ if (size > STM_BUFSIZE)
+ size = STM_BUFSIZE;
+
+ spin_lock(&channel->lock);
+
+ if (copy_from_user
+ (channel->data_buffer, (void __user *) buf, size)) {
+ spin_unlock(&channel->lock);
+ return -EFAULT;
+ }
+ size = stm_trace_buffer_onchannel(channel->numero,
+ channel->data_buffer, size);
+
+ spin_unlock(&channel->lock);
+
+ return size;
+}
+
+static int stm_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /*
+ * Don't allow a mapping that covers more than the STM channels
+ */
+ if ((vma->vm_end - vma->vm_start) >
+ STM_NUMBER_OF_CHANNEL*sizeof(struct stm_channel))
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ stm.pdata->channels_phys_base>>PAGE_SHIFT,
+ STM_NUMBER_OF_CHANNEL*sizeof(struct stm_channel),
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/* Enable the trace for given sources (bitfield) */
+static void stm_enable_src(unsigned int v)
+{
+ unsigned int cr_val;
+ spin_lock(&lock);
+ cr_val = readl(STM_CR);
+ cr_val &= ~STM_CLOCK_MASK;
+ writel(cr_val|(stm_clockdiv<<STM_CLOCK_SHIFT), STM_CR);
+ writel(v, STM_TER);
+ spin_unlock(&lock);
+}
+
+/* Disable all sources */
+static void stm_disable_src(void)
+{
+ writel(0x0, STM_CR); /* stop clock */
+ writel(0x0, STM_TER); /* Disable cores */
+}
+
+/* Set clock speed */
+static int stm_set_ckdiv(enum clock_div v)
+{
+ unsigned int val;
+
+ spin_lock(&lock);
+ val = readl(STM_CR);
+ val &= ~STM_CLOCK_MASK;
+ writel(val | ((v << STM_CLOCK_SHIFT) & STM_CLOCK_MASK), STM_CR);
+ spin_unlock(&lock);
+ stm_clockdiv = v;
+
+ return 0;
+}
+
+/* Return the control register */
+static inline unsigned int stm_get_cr(void)
+{
+ return readl(STM_CR);
+}
+
+/*
+ * Set Trace MODE lossless/lossy (Software/Hardware)
+ * each bit represent the corresponding mode of this source
+ */
+static inline void stm_set_modes(unsigned int modes)
+{
+ writel(modes, STM_MMC);
+}
+
+/* Get Trace MODE lossless/lossy (Software/Hardware)
+ * each bit represent the corresponding mode of this source */
+static inline unsigned int stm_get_modes(void)
+{
+ return readl(STM_MMC);
+}
+
+/* Count # of free channels */
+static int stm_nb_free_channels(void)
+{
+ int nb_channels, offset;
+
+ nb_channels = 0;
+ offset = 0;
+ for (;;) {
+ offset = find_next_zero_bit(stm.ch_bitmap,
+ STM_NUMBER_OF_CHANNEL, offset);
+ if (offset == STM_NUMBER_OF_CHANNEL)
+ break;
+ offset++;
+ nb_channels++;
+ }
+ return nb_channels;
+}
+
+static long stm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct channel_data *channel = file->private_data;
+
+ switch (cmd) {
+
+ case STM_CONNECTION:
+ if (stm.pdata->stm_connection)
+ stm.pdata->stm_connection(arg);
+ stm_connection = arg;
+ break;
+
+ case STM_DISABLE:
+ stm_disable_src();
+ break;
+
+ case STM_GET_NB_MAX_CHANNELS:
+ err = put_user(STM_NUMBER_OF_CHANNEL, (unsigned int *)arg);
+ break;
+
+ case STM_GET_NB_FREE_CHANNELS:
+ err = put_user(stm_nb_free_channels(), (unsigned int *)arg);
+ break;
+
+ case STM_GET_CHANNEL_NO:
+ err = put_user(channel->numero, (unsigned int *)arg);
+ break;
+
+ case STM_SET_CLOCK_DIV:
+ err = stm_set_ckdiv((enum clock_div) arg);
+ break;
+
+ case STM_SET_MODE:
+ stm_set_modes(arg);
+ break;
+
+ case STM_GET_MODE:
+ err = put_user(stm_get_modes(), (unsigned int *)arg);
+ break;
+
+ case STM_GET_CTRL_REG:
+ err = put_user(stm_get_cr(), (unsigned int *)arg);
+ break;
+
+ case STM_ENABLE_SRC:
+ stm_enable_src(arg);
+ break;
+
+ case STM_GET_FREE_CHANNEL:
+ err = stm_get_channel(channel, (int *)arg);
+ break;
+
+ case STM_RELEASE_CHANNEL:
+ err = stm_release_channel(channel, arg);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * Trace a buffer on a dynamically allocated channel
+ * with auto time stamping on the first byte(s) only
+ * Dynamic channel number >=
+ * STM_NUMBER_OF_CHANNEL - NB_KERNEL_DYNAMIC_CHANNEL
+ */
+int stm_trace_buffer(const void *data, size_t length)
+{
+ int channel;
+
+ channel = stm_alloc_channel(STM_NUMBER_OF_CHANNEL
+ - NB_KERNEL_DYNAMIC_CHANNEL);
+ if (channel < STM_NUMBER_OF_CHANNEL) {
+ length = stm_trace_buffer_onchannel(channel, data, length);
+ stm_free_channel(channel);
+ return length;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(stm_trace_buffer);
+
+static const struct file_operations stm_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = stm_ioctl,
+ .open = stm_open,
+ .llseek = no_llseek,
+ .write = stm_write,
+ .release = stm_release,
+ .mmap = stm_mmap,
+};
+
+/*
+ * Init and deinit driver
+ */
+
+static int __devinit stm_probe(struct platform_device *pdev)
+{
+ int retval = 0;
+
+ if (!pdev || !pdev->dev.platform_data) {
+ pr_alert("No device/platform_data found on STM driver\n");
+ return -ENODEV;
+ }
+
+ stm.pdata = pdev->dev.platform_data;
+
+ cdev_init(&cdev, &stm_fops);
+ cdev.owner = THIS_MODULE;
+
+ stm_channels =
+ ioremap_nocache(stm.pdata->channels_phys_base,
+ STM_NUMBER_OF_CHANNEL*sizeof(*stm_channels));
+ if (stm_channels == NULL) {
+ dev_err(&pdev->dev, "could not remap STM Msg register\n");
+ return -ENODEV;
+ }
+
+ stm.virtbase = ioremap_nocache(stm.pdata->regs_phys_base, SZ_4K);
+ if (stm.virtbase == NULL) {
+ retval = -EIO;
+ dev_err(&pdev->dev, "could not remap STM Register\n");
+ goto err_channels;
+ }
+
+ retval = cdev_add(&cdev, MKDEV(stm_major, 0), 1);
+ if (retval) {
+ dev_err(&pdev->dev, "chardev registration failed\n");
+ goto err_channels;
+ }
+
+ if (IS_ERR(device_create(stm_class, &pdev->dev,
+ MKDEV(stm_major, 0), NULL, STM_DEV_NAME)))
+ dev_err(&pdev->dev, "can't create device\n");
+
+ /* Check chip IDs if necessary */
+ if (stm.pdata->id_mask) {
+ u32 periph_id, cell_id;
+
+ periph_id = (readb(STMPERIPHID3)<<24) +
+ (readb(STMPERIPHID2)<<16) +
+ (readb(STMPERIPHID1)<<8) +
+ readb(STMPERIPHID0);
+ cell_id = (readb(STMPCELLID3)<<24) +
+ (readb(STMPCELLID2)<<16) +
+ (readb(STMPCELLID1)<<8) +
+ readb(STMPCELLID0);
+ /* Only warns if it isn't a ST-Ericsson supported one */
+ if ((periph_id & stm.pdata->id_mask) != 0x00080dec ||
+ cell_id != 0xb105f00d) {
+ dev_warn(&pdev->dev, "STM-Trace IC not compatible\n");
+ dev_warn(&pdev->dev, "periph_id=%x\n", periph_id);
+ dev_warn(&pdev->dev, "pcell_id=%x\n", cell_id);
+ }
+ }
+
+ /* Reserve channels if necessary */
+ if (stm.pdata->channels_reserved_sz) {
+ int i;
+
+ for (i = 0; i < stm.pdata->channels_reserved_sz; i++) {
+ set_bit(stm.pdata->channels_reserved[i],
+ stm.ch_bitmap);
+ }
+ }
+ /* Reserve kernel trace channels on demand */
+#ifdef CONFIG_STM_PRINTK
+ set_bit(CONFIG_STM_PRINTK_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_FTRACE
+ set_bit(CONFIG_STM_FTRACE_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_CTX_SWITCH
+ set_bit(CONFIG_STM_CTX_SWITCH_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_WAKEUP
+ set_bit(CONFIG_STM_WAKEUP_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_STACK_TRACE
+ set_bit(CONFIG_STM_STACK_TRACE_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_TRACE_PRINTK
+ set_bit(CONFIG_STM_TRACE_PRINTK_CHANNEL, stm.ch_bitmap);
+ set_bit(CONFIG_STM_TRACE_BPRINTK_CHANNEL, stm.ch_bitmap);
+#endif
+
+ if (stm.pdata->stm_connection) {
+ retval = stm.pdata->stm_connection(stm_connection);
+ if (retval) {
+ dev_err(&pdev->dev, "failed to connect STM output\n");
+ goto err_channels;
+ }
+ }
+
+ /* Enable STM Masters given in pdata */
+ if (stm.pdata->masters_enabled)
+ stm_enable_src(stm.pdata->masters_enabled);
+
+ stm_set_modes(STM_MMC_DEFAULT); /* Set all sources in HW mode */
+
+ dev_info(&pdev->dev, "STM-Trace driver probed successfully\n");
+ stm_printk("STM-Trace driver initialized\n");
+ return 0;
+
+err_channels:
+ iounmap(stm_channels);
+ return retval;
+}
+
+static int __devexit stm_remove(struct platform_device *pdev)
+{
+ device_destroy(stm_class, MKDEV(stm_major, 0));
+ cdev_del(&cdev);
+
+ if (stm.pdata->stm_connection)
+ (void) stm.pdata->stm_connection(STM_DISCONNECT);
+
+ stm_disable_src();
+ iounmap(stm.virtbase);
+ iounmap(stm_channels);
+
+ return 0;
+}
+
+int stm_printk(const char *fmt, ...)
+{
+ int ret;
+ size_t size;
+ va_list args;
+
+ va_start(args, fmt);
+ arch_spin_lock(&stm_buf_lock);
+ size = vscnprintf((char *)stm_printk_buf,
+ sizeof(stm_printk_buf), fmt, args);
+ ret = stm_trace_buffer(stm_printk_buf, size);
+ arch_spin_unlock(&stm_buf_lock);
+ va_end(args);
+ return ret;
+}
+EXPORT_SYMBOL(stm_printk);
+
+/*
+ * Debugfs interface
+ */
+
+static int stm_connection_show(void *data, u64 *val)
+{
+ *val = stm_connection;
+ return 0;
+}
+
+static int stm_connection_set(void *data, u64 val)
+{
+ if (stm.pdata->stm_connection) {
+ stm_connection = val;
+ stm.pdata->stm_connection(val);
+ }
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_connection_fops, stm_connection_show,
+ stm_connection_set, "%llu\n");
+
+static int stm_clockdiv_show(void *data, u64 *val)
+{
+ *val = stm_clockdiv;
+ return 0;
+}
+
+static int stm_clockdiv_set(void *data, u64 val)
+{
+ stm_set_ckdiv(val);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_clockdiv_fops, stm_clockdiv_show,
+ stm_clockdiv_set, "%llu\n");
+
+static int stm_masters_enable_show(void *data, u64 *val)
+{
+ *val = readl(STM_TER);
+ return 0;
+}
+
+static int stm_masters_enable_set(void *data, u64 val)
+{
+ stm_enable_src(val);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_masters_enable_fops, stm_masters_enable_show,
+ stm_masters_enable_set, "%08llx\n");
+
+static int stm_masters_modes_show(void *data, u64 *val)
+{
+ *val = stm_get_modes();
+ return 0;
+}
+
+static int stm_masters_modes_set(void *data, u64 val)
+{
+ stm_set_modes(val);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_masters_modes_fops, stm_masters_modes_show,
+ stm_masters_modes_set, "%08llx\n");
+
+/* Count # of free channels */
+static int stm_free_channels_show(void *data, u64 *val)
+{
+ *val = stm_nb_free_channels();
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_free_channels_fops, stm_free_channels_show,
+ NULL, "%lld\n");
+
+static __init int stm_init_debugfs(void)
+{
+ struct dentry *d_stm;
+
+ d_stm = debugfs_create_dir(STM_DEV_NAME, NULL);
+ if (!d_stm)
+ return -ENOMEM;
+
+ (void) debugfs_create_file("connection", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_connection_fops);
+ (void) debugfs_create_file("clockdiv", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_clockdiv_fops);
+ (void) debugfs_create_file("masters_enable", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_masters_enable_fops);
+ (void) debugfs_create_file("masters_modes", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_masters_modes_fops);
+ (void) debugfs_create_file("free_channels", S_IRUGO, d_stm,
+ NULL, &stm_free_channels_fops);
+ return 0;
+}
+fs_initcall(stm_init_debugfs);
+
+static struct platform_driver stm_driver = {
+ .probe = stm_probe,
+ .remove = __devexit_p(stm_remove),
+ .driver = {
+ .name = STM_DEV_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init stm_init(void)
+{
+ int retval;
+ dev_t dev;
+
+ stm_class = class_create(THIS_MODULE, STM_DEV_NAME);
+ if (IS_ERR(stm_class)) {
+ pr_err("stm: can't register stm class\n");
+ return PTR_ERR(stm_class);
+ }
+
+ retval = alloc_chrdev_region(&dev, 0, 1, STM_DEV_NAME);
+ if (retval) {
+ pr_err("stm: can't register character device\n");
+ class_destroy(stm_class);
+ return retval;
+ }
+ stm_major = MAJOR(dev);
+ return platform_driver_register(&stm_driver);
+}
+
+static void __exit stm_exit(void)
+{
+ platform_driver_unregister(&stm_driver);
+ unregister_chrdev_region(MKDEV(stm_major, 0), 1);
+ class_destroy(stm_class);
+}
+
+arch_initcall(stm_init); /* STM init ASAP need to wait GPIO init */
+module_exit(stm_exit);
+
+MODULE_AUTHOR("Paul Ghaleb - ST Microelectronics");
+MODULE_AUTHOR("Pierre Peiffer - ST-Ericsson");
+MODULE_AUTHOR("Philippe Langlais - ST-Ericsson");
+MODULE_DESCRIPTION("System Trace Module driver");
+MODULE_ALIAS("stm");
+MODULE_ALIAS("stm-trace");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1e0e27cbe98..9c4fdedb2cc 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -107,6 +107,8 @@ struct mmc_blk_data {
*/
unsigned int part_curr;
struct device_attribute force_ro;
+ struct device_attribute boot_partition_ro_lock;
+ int area_type;
};
static DEFINE_MUTEX(open_lock);
@@ -165,6 +167,72 @@ static void mmc_blk_put(struct mmc_blk_data *md)
mutex_unlock(&open_lock);
}
+#define EXT_CSD_BOOT_WP_PWR_WP_TEXT "pwr_ro"
+#define EXT_CSD_BOOT_WP_PERM_WP_TEXT "perm_ro"
+#define EXT_CSD_BOOT_WP_WP_DISABLED_TEXT "rw"
+static ssize_t boot_partition_ro_lock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card = md->queue.card;
+ const char *out_text;
+
+ if (card->ext_csd.boot_locked
+ & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
+ out_text = EXT_CSD_BOOT_WP_PERM_WP_TEXT;
+ else if (card->ext_csd.boot_locked
+ & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
+ out_text = EXT_CSD_BOOT_WP_PWR_WP_TEXT;
+ else
+ out_text = EXT_CSD_BOOT_WP_WP_DISABLED_TEXT;
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", out_text);
+
+ return ret;
+}
+
+static ssize_t boot_partition_ro_lock_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ struct mmc_blk_data *md;
+ struct mmc_card *card;
+ u8 set = 0;
+
+ md = mmc_blk_get(dev_to_disk(dev));
+ card = md->queue.card;
+
+ if (!strncmp(buf, EXT_CSD_BOOT_WP_PWR_WP_TEXT,
+ strlen(EXT_CSD_BOOT_WP_PWR_WP_TEXT)))
+ set = EXT_CSD_BOOT_WP_B_PWR_WP_EN;
+ else if (!strncmp(buf, EXT_CSD_BOOT_WP_PERM_WP_TEXT,
+ strlen(EXT_CSD_BOOT_WP_PERM_WP_TEXT)))
+ set = EXT_CSD_BOOT_WP_B_PERM_WP_EN;
+
+ if (set) {
+ mmc_claim_host(card->host);
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BOOT_WP,
+ set,
+ card->ext_csd.part_time);
+ if (ret)
+ pr_err("Boot Partition Lock failed: %d\n", ret);
+ else
+ card->ext_csd.boot_locked = set;
+
+ mmc_release_host(card->host);
+
+ if (!ret)
+ set_disk_ro(md->disk, 1);
+ }
+ ret = count;
+
+ mmc_blk_put(md);
+ return ret;
+}
+
static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1339,7 +1407,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct device *parent,
sector_t size,
bool default_ro,
- const char *subname)
+ const char *subname,
+ int area_type)
{
struct mmc_blk_data *md;
int devidx, ret;
@@ -1364,10 +1433,11 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
if (!subname) {
md->name_idx = find_first_zero_bit(name_use, max_devices);
__set_bit(md->name_idx, name_use);
- }
- else
+ } else {
md->name_idx = ((struct mmc_blk_data *)
dev_to_disk(parent)->private_data)->name_idx;
+ md->area_type = area_type;
+ }
/*
* Set the read-only status based on the supported commands
@@ -1462,7 +1532,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
size = card->csd.capacity << (card->csd.read_blkbits - 9);
}
- md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
+ md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL, false);
return md;
}
@@ -1471,13 +1541,14 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
unsigned int part_type,
sector_t size,
bool default_ro,
- const char *subname)
+ const char *subname,
+ int area_type)
{
char cap_str[10];
struct mmc_blk_data *part_md;
part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
- subname);
+ subname, area_type);
if (IS_ERR(part_md))
return PTR_ERR(part_md);
part_md->part_type = part_type;
@@ -1510,7 +1581,8 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
card->part[idx].part_cfg,
card->part[idx].size >> 9,
card->part[idx].force_ro,
- card->part[idx].name);
+ card->part[idx].name,
+ card->part[idx].area_type);
if (ret)
return ret;
}
@@ -1542,6 +1614,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
if (md) {
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+ if (md->area_type == MMC_BLK_DATA_AREA_BOOT)
+ device_remove_file(disk_to_dev(md->disk),
+ &md->boot_partition_ro_lock);
/* Stop new requests from getting into the queue */
del_gendisk(md->disk);
@@ -1579,7 +1654,24 @@ static int mmc_add_disk(struct mmc_blk_data *md)
md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
if (ret)
- del_gendisk(md->disk);
+ goto force_ro_fail;
+
+ if (md->area_type == MMC_BLK_DATA_AREA_BOOT) {
+ md->boot_partition_ro_lock.show = boot_partition_ro_lock_show;
+ md->boot_partition_ro_lock.store = boot_partition_ro_lock_store;
+ md->boot_partition_ro_lock.attr.name = "boot_partition_ro_lock";
+ md->boot_partition_ro_lock.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->boot_partition_ro_lock);
+ if (ret)
+ goto boot_partition_ro_lock_fail;
+ }
+ return ret;
+
+boot_partition_ro_lock_fail:
+ device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+force_ro_fail:
+ del_gendisk(md->disk);
return ret;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index d240427c124..6383bed053b 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -340,6 +340,15 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
/*
+ * Note that the call to mmc_part_add defaults to read
+ * only. If this default assumption is changed, the call must
+ * take into account the value of boot_locked below.
+ */
+ card->ext_csd.boot_locked = ext_csd[EXT_CSD_BOOT_WP] &
+ (EXT_CSD_BOOT_WP_B_PERM_WP_EN |
+ EXT_CSD_BOOT_WP_B_PWR_WP_EN);
+
+ /*
* There are two boot regions of equal size, defined in
* multiples of 128K.
*/
@@ -348,7 +357,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
mmc_part_add(card, part_size,
EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
- "boot%d", idx, true);
+ "boot%d", idx, true, MMC_BLK_DATA_AREA_BOOT);
}
}
}
@@ -435,7 +444,8 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
hc_wp_grp_sz);
mmc_part_add(card, part_size << 19,
EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
- "gp%d", idx, false);
+ "gp%d", idx, false,
+ MMC_BLK_DATA_AREA_GP);
}
}
card->ext_csd.sec_trim_mult =
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 0726e59fd41..974851f7e09 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -19,6 +19,7 @@
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/log2.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/amba/bus.h>
@@ -45,6 +46,7 @@ static unsigned int fmax = 515633;
* struct variant_data - MMCI variant-specific quirks
* @clkreg: default value for MCICLOCK register
* @clkreg_enable: enable value for MMCICLOCK register
+ * @dma_sdio_req_ctrl: enable value for DMAREQCTL register for SDIO write
* @datalength_bits: number of bits in the MMCIDATALENGTH register
* @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
* is asserted (likewise for RX)
@@ -53,28 +55,37 @@ static unsigned int fmax = 515633;
* @sdio: variant supports SDIO
* @st_clkdiv: true if using a ST-specific clock divider algorithm
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
+ * @non_power_of_2_blksize: true if block sizes can be other than power of two
+ * @pwrreg_powerup: power up value for MMCIPOWER register
+ * @signal_direction: input/out direction of bus signals can be indicated
*/
struct variant_data {
unsigned int clkreg;
unsigned int clkreg_enable;
+ unsigned int dma_sdio_req_ctrl;
unsigned int datalength_bits;
unsigned int fifosize;
unsigned int fifohalfsize;
bool sdio;
bool st_clkdiv;
bool blksz_datactrl16;
+ bool non_power_of_2_blksize;
+ u32 pwrreg_powerup;
+ bool signal_direction;
};
static struct variant_data variant_arm = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
.datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
};
static struct variant_data variant_arm_extended_fifo = {
.fifosize = 128 * 4,
.fifohalfsize = 64 * 4,
.datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
};
static struct variant_data variant_u300 = {
@@ -83,6 +94,8 @@ static struct variant_data variant_u300 = {
.clkreg_enable = MCI_ST_U300_HWFCEN,
.datalength_bits = 16,
.sdio = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .signal_direction = true,
};
static struct variant_data variant_ux500 = {
@@ -90,9 +103,12 @@ static struct variant_data variant_ux500 = {
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .dma_sdio_req_ctrl = MCI_ST_DPSM_DMAREQCTL,
.datalength_bits = 24,
.sdio = true,
.st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .signal_direction = true,
};
static struct variant_data variant_ux500v2 = {
@@ -100,13 +116,42 @@ static struct variant_data variant_ux500v2 = {
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .dma_sdio_req_ctrl = MCI_ST_DPSM_DMAREQCTL,
.datalength_bits = 24,
.sdio = true,
.st_clkdiv = true,
.blksz_datactrl16 = true,
+ .non_power_of_2_blksize = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .signal_direction = true,
};
/*
+ * Validate mmc prerequisites
+ */
+static int mmci_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ if (!data)
+ return 0;
+
+ if (!host->variant->non_power_of_2_blksize &&
+ !is_power_of_2(data->blksz)) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported block size (%d bytes)\n", data->blksz);
+ return -EINVAL;
+ }
+
+ if (data->sg->offset & 3) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported alginment (0x%x)\n", data->sg->offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
* This must be called with host->lock held
*/
static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
@@ -166,14 +211,8 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
host->mrq = NULL;
host->cmd = NULL;
- /*
- * Need to drop the host lock here; mmc_request_done may call
- * back into the driver...
- */
- spin_unlock(&host->lock);
pm_runtime_put(mmc_dev(host->mmc));
mmc_request_done(host->mmc, mrq);
- spin_lock(&host->lock);
}
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
@@ -398,8 +437,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
if (!chan)
return -EINVAL;
- /* If less than or equal to the fifo size, don't bother with DMA */
- if (data->blksz * data->blocks <= variant->fifosize)
+ /*
+ * If less than or equal to the fifo size, don't bother with DMA
+ * SDIO transfers may not be 4 bytes aligned, fall back to PIO
+ */
+ if (data->blksz * data->blocks <= variant->fifosize ||
+ (data->blksz * data->blocks) & 3)
return -EINVAL;
device = chan->device;
@@ -434,6 +477,7 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
{
int ret;
struct mmc_data *data = host->data;
+ struct variant_data *variant = host->variant;
ret = mmci_dma_prep_data(host, host->data, NULL);
if (ret)
@@ -448,6 +492,11 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
datactrl |= MCI_DPSM_DMAENABLE;
+ /* Some hardware versions need special flags for SDIO DMA write */
+ if (variant->sdio && host->mmc->card && mmc_card_sdio(host->mmc->card)
+ && (data->flags & MMC_DATA_WRITE))
+ datactrl |= variant->dma_sdio_req_ctrl;
+
/* Trigger the DMA transfer */
writel(datactrl, host->base + MMCIDATACTRL);
@@ -492,6 +541,9 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
if (!data)
return;
+ if (mmci_validate_data(host, mrq->data))
+ return;
+
if (data->host_cookie) {
data->host_cookie = 0;
return;
@@ -594,7 +646,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
writel(host->size, base + MMCIDATALENGTH);
blksz_bits = ffs(data->blksz) - 1;
- BUG_ON(1 << blksz_bits != data->blksz);
if (variant->blksz_datactrl16)
datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
@@ -604,6 +655,34 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
+ /* The ST Micro variants has a special bit to enable SDIO */
+ if (variant->sdio && host->mmc->card)
+ if (mmc_card_sdio(host->mmc->card)) {
+ /*
+ * The ST Micro variants has a special bit
+ * to enable SDIO.
+ */
+ datactrl |= MCI_ST_DPSM_SDIOEN;
+
+ /*
+ * The ST Micro variant for SDIO transfer sizes
+ * less then or equal to 8 bytes needs to have clock
+ * H/W flow control disabled. Since flow control is
+ * not really needed for anything that fits in the
+ * FIFO, we can disable it for any write smaller
+ * than the FIFO size.
+ */
+ if ((host->size <= variant->fifosize) &&
+ (data->flags & MMC_DATA_WRITE))
+ writel(readl(host->base + MMCICLOCK) &
+ ~variant->clkreg_enable,
+ host->base + MMCICLOCK);
+ else
+ writel(readl(host->base + MMCICLOCK) |
+ variant->clkreg_enable,
+ host->base + MMCICLOCK);
+ }
+
/*
* Attempt to use DMA operation mode, if this
* should fail, fall back to PIO mode
@@ -632,11 +711,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
irqmask = MCI_TXFIFOHALFEMPTYMASK;
}
- /* The ST Micro variants has a special bit to enable SDIO */
- if (variant->sdio && host->mmc->card)
- if (mmc_card_sdio(host->mmc->card))
- datactrl |= MCI_ST_DPSM_SDIOEN;
-
writel(datactrl, base + MMCIDATACTRL);
writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
mmci_set_mask1(host, irqmask);
@@ -783,7 +857,18 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema
if (count <= 0)
break;
- readsl(base + MMCIFIFO, ptr, count >> 2);
+ /*
+ * SDIO especially may want to receive something that is
+ * not divisible by 4 (as opposed to card sectors
+ * etc). Therefore make sure to to always read the last bytes
+ * while only doing full 32-bit reads towards the FIFO.
+ */
+ if (count < 4) {
+ unsigned char buf[4];
+ readsl(base + MMCIFIFO, buf, 1);
+ memcpy(ptr, buf, count);
+ } else
+ readsl(base + MMCIFIFO, ptr, count >> 2);
ptr += count;
remain -= count;
@@ -812,23 +897,6 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem
count = min(remain, maxcnt);
/*
- * The ST Micro variant for SDIO transfer sizes
- * less then 8 bytes should have clock H/W flow
- * control disabled.
- */
- if (variant->sdio &&
- mmc_card_sdio(host->mmc->card)) {
- if (count < 8)
- writel(readl(host->base + MMCICLOCK) &
- ~variant->clkreg_enable,
- host->base + MMCICLOCK);
- else
- writel(readl(host->base + MMCICLOCK) |
- variant->clkreg_enable,
- host->base + MMCICLOCK);
- }
-
- /*
* SDIO especially may want to send something that is
* not divisible by 4 (as opposed to card sectors
* etc), and the FIFO only accept full 32-bit writes.
@@ -984,10 +1052,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(host->mrq != NULL);
- if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
- dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
- mrq->data->blksz);
- mrq->cmd->error = -EINVAL;
+ mrq->cmd->error = mmci_validate_data(host, mrq->data);
+ if (mrq->cmd->error) {
mmc_request_done(mmc, mrq);
return;
}
@@ -1012,10 +1078,15 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
u32 pwr = 0;
unsigned long flags;
int ret;
+ if (host->plat->ios_handler &&
+ host->plat->ios_handler(mmc_dev(mmc), ios))
+ dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
+
switch (ios->power_mode) {
case MMC_POWER_OFF:
if (host->vcc)
@@ -1035,19 +1106,35 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
return;
}
}
- if (host->plat->vdd_handler)
- pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
- ios->power_mode);
- /* The ST version does not have this, fall through to POWER_ON */
- if (host->hw_designer != AMBA_VENDOR_ST) {
- pwr |= MCI_PWR_UP;
- break;
- }
+ /*
+ * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
+ * and instead uses MCI_PWR_ON so apply whatever value is
+ * configured in the variant data.
+ */
+ pwr |= variant->pwrreg_powerup;
+
+ break;
case MMC_POWER_ON:
pwr |= MCI_PWR_ON;
break;
}
+ if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
+ /*
+ * The ST Micro variant has some additional bits
+ * indicating signal direction for the signals in
+ * the SD/MMC bus and feedback-clock usage.
+ */
+ pwr |= host->plat->sigdir;
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ pwr &= ~MCI_ST_DATA74DIREN;
+ else if (ios->bus_width == MMC_BUS_WIDTH_1)
+ pwr &= (~MCI_ST_DATA74DIREN &
+ ~MCI_ST_DATA31DIREN &
+ ~MCI_ST_DATA2DIREN);
+ }
+
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
if (host->hw_designer != AMBA_VENDOR_ST)
pwr |= MCI_ROD;
@@ -1334,6 +1421,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
amba_set_drvdata(dev, mmc);
+ pm_runtime_enable(mmc->parent);
+ if (pm_runtime_get_sync(mmc->parent) < 0)
+ dev_err(mmc_dev(mmc), "failed pm_runtime_get_sync\n");
+
dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
amba_rev(dev), (unsigned long long)dev->res.start,
@@ -1437,6 +1528,9 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state)
ret = mmc_suspend_host(mmc);
if (ret == 0)
writel(0, host->base + MMCIMASK0);
+
+ if (pm_runtime_put_sync(mmc->parent) < 0)
+ dev_err(mmc_dev(mmc), "failed pm_runtime_put_sync\n");
}
return ret;
@@ -1450,6 +1544,9 @@ static int mmci_resume(struct amba_device *dev)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
+ if (pm_runtime_get_sync(mmc->parent) < 0)
+ dev_err(mmc_dev(mmc), "failed pm_runtime_get_sync\n");
+
writel(MCI_IRQENABLE, host->base + MMCIMASK0);
ret = mmc_resume_host(mmc);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 79e4143ab9d..1c05b191fa9 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -13,16 +13,6 @@
#define MCI_PWR_ON 0x03
#define MCI_OD (1 << 6)
#define MCI_ROD (1 << 7)
-/*
- * The ST Micro version does not have ROD and reuse the voltage registers
- * for direction settings
- */
-#define MCI_ST_DATA2DIREN (1 << 2)
-#define MCI_ST_CMDDIREN (1 << 3)
-#define MCI_ST_DATA0DIREN (1 << 4)
-#define MCI_ST_DATA31DIREN (1 << 5)
-#define MCI_ST_FBCLKEN (1 << 7)
-#define MCI_ST_DATA74DIREN (1 << 8)
#define MMCICLOCK 0x004
#define MCI_CLK_ENABLE (1 << 8)
@@ -70,6 +60,13 @@
#define MCI_ST_DPSM_RWMOD (1 << 10)
#define MCI_ST_DPSM_SDIOEN (1 << 11)
/* Control register extensions in the ST Micro Ux500 versions */
+/*
+ * DMA request control is required for write
+ * if transfer size is not 32 byte aligned.
+ * DMA request control is also needed if the total
+ * transfer size is 32 byte aligned but any of the
+ * sg element lengths are not aligned with 32 byte.
+ */
#define MCI_ST_DPSM_DMAREQCTL (1 << 12)
#define MCI_ST_DPSM_DBOOTMODEEN (1 << 13)
#define MCI_ST_DPSM_BUSYMODE (1 << 14)
@@ -160,7 +157,7 @@
(MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \
MCI_TXFIFOHALFEMPTYMASK)
-#define NR_SG 16
+#define NR_SG 128
struct clk;
struct variant_data;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8843071fe98..15658d89441 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -58,6 +58,7 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_net.h>
+#include <linux/regulator/consumer.h>
#include "smsc911x.h"
#define SMSC_CHIPNAME "smsc911x"
@@ -138,6 +139,10 @@ struct smsc911x_data {
/* register access functions */
const struct smsc911x_ops *ops;
+
+ /* regulators */
+ struct regulator *regulator_vddvario;
+ struct regulator *regulator_vdd33a;
};
/* Easy access to information */
@@ -362,6 +367,81 @@ out:
spin_unlock_irqrestore(&pdata->dev_lock, flags);
}
+/* Enable resources(clocks and regulators) */
+static int smsc911x_enable_resources(struct platform_device *pdev, bool enable)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int err = 0;
+
+ /* enable/diable regulator for vddvario */
+ if (pdata->regulator_vddvario) {
+ if (enable) {
+ err = regulator_enable(pdata->regulator_vddvario);
+ if (err < 0) {
+ netdev_err(ndev, "%s: regulator_enable failed '%s'\n",
+ __func__, "vddvario");
+ }
+ } else
+ err = regulator_disable(pdata->regulator_vdd33a);
+ }
+
+ /* enable/diableregulator for vdd33a */
+ if (pdata->regulator_vdd33a) {
+ if (enable) {
+ err = regulator_enable(pdata->regulator_vdd33a);
+ if (err < 0) {
+ netdev_err(ndev, "%s: regulator_enable failed '%s'\n",
+ __func__, "vdd33a");
+ }
+ } else
+ err = regulator_disable(pdata->regulator_vdd33a);
+ }
+ return err;
+}
+
+
+/* Request resources(clocks and regulators) */
+static int smsc911x_request_resources(struct platform_device *pdev,
+ bool request)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int err = 0;
+
+ /* Request regulator for vddvario */
+ if (request && !pdata->regulator_vddvario) {
+ pdata->regulator_vddvario = regulator_get(&pdev->dev,
+ "vddvario");
+ if (IS_ERR(pdata->regulator_vddvario)) {
+ netdev_warn(ndev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, "vddvario");
+ pdata->regulator_vddvario = NULL;
+ }
+ } else if (!request && pdata->regulator_vddvario) {
+ regulator_put(pdata->regulator_vddvario);
+ pdata->regulator_vddvario = NULL;
+ }
+
+ /* Request regulator for vdd33a */
+ if (request && !pdata->regulator_vddvario) {
+ pdata->regulator_vdd33a = regulator_get(&pdev->dev,
+ "vdd33a");
+ if (IS_ERR(pdata->regulator_vdd33a)) {
+ netdev_warn(ndev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, "vdd33a");
+ pdata->regulator_vdd33a = NULL;
+ }
+ } else if (!request && pdata->regulator_vdd33a) {
+ regulator_put(pdata->regulator_vdd33a);
+ pdata->regulator_vdd33a = NULL;
+ }
+
+ return err;
+}
+
/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
* and smsc911x_mac_write, so assumes mac_lock is held */
static int smsc911x_mac_complete(struct smsc911x_data *pdata)
@@ -2065,6 +2145,7 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
struct net_device *dev;
struct smsc911x_data *pdata;
struct resource *res;
+ int retval;
dev = platform_get_drvdata(pdev);
BUG_ON(!dev);
@@ -2092,6 +2173,12 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
iounmap(pdata->ioaddr);
+ if (smsc911x_enable_resources(pdev, false))
+ pr_warn("Could not disable resource\n");
+
+ retval = smsc911x_request_resources(pdev, false);
+ /* ignore not all have regulators */
+
free_netdev(dev);
return 0;
@@ -2174,6 +2261,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
unsigned int intcfg = 0;
int res_size, irq_flags;
int retval;
+ int to = 100;
pr_info("Driver version %s\n", SMSC_DRV_VERSION);
@@ -2218,6 +2306,17 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
pdata->dev = dev;
pdata->msg_enable = ((1 << debug) - 1);
+ platform_set_drvdata(pdev, dev);
+
+ retval = smsc911x_request_resources(pdev, true);
+ /* ignore not all have regulators */
+
+ retval = smsc911x_enable_resources(pdev, true);
+ if (retval) {
+ pr_warn("Could not enable resource\n");
+ goto out_0;
+ }
+
if (pdata->ioaddr == NULL) {
SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
retval = -ENOMEM;
@@ -2242,6 +2341,18 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (pdata->config.shift)
pdata->ops = &shifted_smsc911x_ops;
+ /* poll the READY bit in PMT_CTRL. Any other access to the device is
+ * forbidden while this bit isn't set. Try for 100ms
+ */
+ while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
+ udelay(1000);
+
+ if (to == 0) {
+ pr_err("Device not READY in 100ms aborting\n");
+ goto out_0;
+ }
+
+
retval = smsc911x_init(dev);
if (retval < 0)
goto out_unmap_io_3;
@@ -2334,6 +2445,7 @@ out_0:
return retval;
}
+
#ifdef CONFIG_PM
/* This implementation assumes the devices remains powered on its VDDVARIO
* pins during suspend. */
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 9f88641e67f..2e32409a342 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -236,6 +236,32 @@ config CHARGER_GPIO
This driver can be build as a module. If so, the module will be
called gpio-charger.
+config AB8500_BM
+ bool "AB8500 Battery Management Driver"
+ depends on AB8500_CORE && AB8500_GPADC && ARCH_U8500
+ help
+ Say Y to include support for AB8500 battery management.
+
+config AB8500_BATTERY_THERM_ON_BATCTRL
+ bool "Thermistor connected on BATCTRL ADC"
+ depends on AB8500_BM
+ help
+ Say Y to enable battery temperature measurements using
+ thermistor connected on BATCTRL ADC.
+
+config AB5500_BM
+ bool "AB5500 Battery Management Driver"
+ depends on AB5500_CORE && AB5500_GPADC && MACH_U5500
+ help
+ Say Y to include support for AB5500 battery management.
+
+config AB5500_BATTERY_THERM_ON_BATCTRL
+ bool "Thermistor connected on BATCTRL ADC"
+ depends on AB5500_BM
+ help
+ Say Y to enable battery temperature measurements using
+ thermistor connected on BATCTRL ADC.
+
config CHARGER_MAX8997
tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
depends on MFD_MAX8997 && REGULATOR_MAX8997
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b4af13dd8b6..6585c0aee99 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -36,5 +36,7 @@ obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
+obj-$(CONFIG_AB8500_BM) += ab8500_charger.o ab8500_btemp.o ab8500_fg.o ab8500_chargalg.o
+obj-$(CONFIG_AB5500_BM) += ab5500_charger.o abx500_chargalg.o ab5500_fg.o ab5500_btemp.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
diff --git a/drivers/power/ab5500_btemp.c b/drivers/power/ab5500_btemp.c
new file mode 100644
index 00000000000..7867455f493
--- /dev/null
+++ b/drivers/power/ab5500_btemp.c
@@ -0,0 +1,889 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Battery temperature driver for ab5500
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+
+#define BTEMP_THERMAL_LOW_LIMIT -10
+#define BTEMP_THERMAL_MED_LIMIT 0
+#define BTEMP_THERMAL_HIGH_LIMIT_62 62
+
+#define BTEMP_BATCTRL_CURR_SRC_7UA 7
+#define BTEMP_BATCTRL_CURR_SRC_15UA 15
+#define BTEMP_BATCTRL_CURR_SRC_20UA 20
+
+#define UART_MODE 0x0F
+#define BAT_CUR_SRC 0x1F
+#define RESIS_ID_MODE 0x03
+#define RESET 0x00
+#define ADOUT_10K_PULL_UP 0x07
+
+#define to_ab5500_btemp_device_info(x) container_of((x), \
+ struct ab5500_btemp, btemp_psy);
+
+/**
+ * struct ab5500_btemp_interrupts - ab5500 interrupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab5500_btemp_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab5500_btemp_events {
+ bool batt_rem;
+ bool usb_conn;
+};
+
+/**
+ * struct ab5500_btemp - ab5500 BTEMP device information
+ * @dev: Pointer to the structure device
+ * @chip_id: Chip-Id of the AB5500
+ * @curr_source: What current source we use, in uA
+ * @bat_temp: Battery temperature in degree Celcius
+ * @prev_bat_temp Last dispatched battery temperature
+ * @node: struct of type list_head
+ * @parent: Pointer to the struct ab5500
+ * @gpadc: Pointer to the struct gpadc
+ * @gpadc-auto: Pointer to the struct adc_auto_input
+ * @pdata: Pointer to the ab5500_btemp platform data
+ * @bat: Pointer to the ab5500_bm platform data
+ * @btemp_psy: Structure for BTEMP specific battery properties
+ * @events: Structure for information about events triggered
+ * @btemp_wq: Work queue for measuring the temperature periodically
+ * @btemp_periodic_work: Work for measuring the temperature periodically
+ */
+struct ab5500_btemp {
+ struct device *dev;
+ u8 chip_id;
+ int curr_source;
+ int bat_temp;
+ int prev_bat_temp;
+ struct list_head node;
+ struct ab5500 *parent;
+ struct ab5500_gpadc *gpadc;
+ struct adc_auto_input *gpadc_auto;
+ struct abx500_btemp_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct power_supply btemp_psy;
+ struct ab5500_btemp_events events;
+ struct workqueue_struct *btemp_wq;
+ struct delayed_work btemp_periodic_work;
+};
+
+/* BTEMP power supply properties */
+static enum power_supply_property ab5500_btemp_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_TEMP,
+};
+
+static LIST_HEAD(ab5500_btemp_list);
+
+struct ab5500_btemp *ab5500_btemp_get(void)
+{
+ struct ab5500_btemp *di;
+ di = list_first_entry(&ab5500_btemp_list, struct ab5500_btemp, node);
+
+ return di;
+}
+
+/**
+ * ab5500_btemp_get_batctrl_temp() - get the temperature
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * Returns the batctrl temperature in millidegrees
+ */
+int ab5500_btemp_get_batctrl_temp(struct ab5500_btemp *di)
+{
+ return di->bat_temp * 1000;
+}
+
+/**
+ * ab5500_btemp_batctrl_volt_to_res() - convert batctrl voltage to resistance
+ * @di: pointer to the ab5500_btemp structure
+ * @v_batctrl: measured batctrl voltage
+ *
+ * This function returns the battery resistance that is
+ * derived from the BATCTRL voltage.
+ * Returns value in Ohms.
+ */
+static int ab5500_btemp_batctrl_volt_to_res(struct ab5500_btemp *di,
+ int v_batctrl)
+{
+ int rbs;
+
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL) {
+ /*
+ * If the battery has internal NTC, we use the current
+ * source to calculate the resistance, 7uA or 20uA
+ */
+ rbs = v_batctrl * 1000 / di->curr_source;
+ } else {
+ /*
+ * BAT_CTRL is internally
+ * connected to 1.8V through a 10k resistor
+ */
+ rbs = (10000 * (v_batctrl)) / (1800 - v_batctrl);
+ }
+ return rbs;
+}
+
+/**
+ * ab5500_btemp_read_batctrl_voltage() - measure batctrl voltage
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * This function returns the voltage on BATCTRL. Returns value in mV.
+ */
+static int ab5500_btemp_read_batctrl_voltage(struct ab5500_btemp *di)
+{
+ int vbtemp;
+ static int prev;
+
+ vbtemp = ab5500_gpadc_convert(di->gpadc, BAT_CTRL);
+ if (vbtemp < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed, using previous value",
+ __func__);
+ return prev;
+ }
+ prev = vbtemp;
+ return vbtemp;
+}
+
+/**
+ * ab5500_btemp_curr_source_enable() - enable/disable batctrl current source
+ * @di: pointer to the ab5500_btemp structure
+ * @enable: enable or disable the current source
+ *
+ * Enable or disable the current sources for the BatCtrl AD channel
+ */
+static int ab5500_btemp_curr_source_enable(struct ab5500_btemp *di,
+ bool enable)
+{
+ int ret = 0;
+
+ /* Only do this for batteries with internal NTC */
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
+
+ dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_UART,
+ UART_MODE, RESIS_ID_MODE);
+ if (ret) {
+ dev_err(di->dev,
+ "%s failed setting resistance identification mode\n",
+ __func__);
+ return ret;
+ }
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_URI,
+ BAT_CUR_SRC, BAT_CTRL_15U_ENA);
+ if (ret) {
+ dev_err(di->dev, "%s failed enabling current source\n",
+ __func__);
+ goto disable_curr_source;
+ }
+ } else if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
+ dev_dbg(di->dev, "Disable BATCTRL curr source\n");
+
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_URI,
+ BAT_CUR_SRC, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling current source\n",
+ __func__);
+ goto disable_curr_source;
+ }
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_UART,
+ UART_MODE, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling force comp\n",
+ __func__);
+ }
+ }
+ return ret;
+disable_curr_source:
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_URI,
+ BAT_CUR_SRC, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling current source\n",
+ __func__);
+ }
+ return ret;
+}
+
+/**
+ * ab5500_btemp_get_batctrl_res() - get battery resistance
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * This function returns the battery pack identification resistance.
+ * Returns value in Ohms.
+ */
+static int ab5500_btemp_get_batctrl_res(struct ab5500_btemp *di)
+{
+ int ret;
+ int batctrl;
+ int res;
+
+ ret = ab5500_btemp_curr_source_enable(di, true);
+ /* TODO: This delay has to be optimised */
+ mdelay(1000);
+ if (ret) {
+ dev_err(di->dev, "%s curr source enable failed\n", __func__);
+ return ret;
+ }
+
+ batctrl = ab5500_btemp_read_batctrl_voltage(di);
+ res = ab5500_btemp_batctrl_volt_to_res(di, batctrl);
+
+ ret = ab5500_btemp_curr_source_enable(di, false);
+ if (ret) {
+ dev_err(di->dev, "%s curr source disable failed\n", __func__);
+ return ret;
+ }
+
+ dev_dbg(di->dev, "%s batctrl: %d res: %d ",
+ __func__, batctrl, res);
+
+ return res;
+}
+
+/**
+ * ab5500_btemp_res_to_temp() - resistance to temperature
+ * @di: pointer to the ab5500_btemp structure
+ * @tbl: pointer to the resiatance to temperature table
+ * @tbl_size: size of the resistance to temperature table
+ * @res: resistance to calculate the temperature from
+ *
+ * This function returns the battery temperature in degrees Celcius
+ * based on the NTC resistance.
+ */
+static int ab5500_btemp_res_to_temp(struct ab5500_btemp *di,
+ const struct abx500_res_to_temp *tbl, int tbl_size, int res)
+{
+ int i, temp;
+ /*
+ * Calculate the formula for the straight line
+ * Simple interpolation if we are within
+ * the resistance table limits, extrapolate
+ * if resistance is outside the limits.
+ */
+ if (res > tbl[0].resist)
+ i = 0;
+ else if (res <= tbl[tbl_size - 1].resist)
+ i = tbl_size - 2;
+ else {
+ i = 0;
+ while (!(res <= tbl[i].resist &&
+ res > tbl[i + 1].resist))
+ i++;
+ }
+
+ temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
+ (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
+ return temp;
+}
+
+/**
+ * ab5500_btemp_measure_temp() - measure battery temperature
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * Returns battery temperature (on success) else the previous temperature
+ */
+static int ab5500_btemp_measure_temp(struct ab5500_btemp *di)
+{
+ int temp, ret;
+ static int prev;
+ int rbat, vntc;
+ int rntc = 0;
+ u8 id;
+
+ id = di->bat->batt_id;
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+ id != BATTERY_UNKNOWN) {
+ rbat = ab5500_btemp_get_batctrl_res(di);
+ if (rbat < 0) {
+ dev_err(di->dev, "%s get batctrl res failed\n",
+ __func__);
+ /*
+ * Return out-of-range temperature so that
+ * charging is stopped
+ */
+ return BTEMP_THERMAL_LOW_LIMIT;
+ }
+
+ temp = ab5500_btemp_res_to_temp(di,
+ di->bat->bat_type[id].r_to_t_tbl,
+ di->bat->bat_type[id].n_temp_tbl_elements, rbat);
+ } else {
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_UART,
+ UART_MODE, ADOUT_10K_PULL_UP);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to enable 10k pull up to Vadout\n");
+ }
+ vntc = ab5500_gpadc_convert(di->gpadc, BTEMP_BALL);
+ if (vntc < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed,"
+ " using previous value\n", __func__);
+ return prev;
+ }
+ /*
+ * The PCB NTC is sourced from 2.75v via a 10kOhm
+ * resistor.
+ */
+ rntc = 10000 * vntc / (27500 - vntc);
+
+ temp = ab5500_btemp_res_to_temp(di,
+ di->bat->bat_type[id].r_to_t_tbl,
+ di->bat->bat_type[id].n_temp_tbl_elements, rntc);
+ prev = temp;
+ }
+ dev_dbg(di->dev, "Battery temperature is %d\n", temp);
+ return temp;
+}
+
+/**
+ * ab5500_btemp_id() - Identify the connected battery
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * This function will try to identify the battery by reading the ID
+ * resistor. Some brands use a combined ID resistor with a NTC resistor to
+ * both be able to identify and to read the temperature of it.
+ */
+static int ab5500_btemp_id(struct ab5500_btemp *di)
+{
+ int res;
+ u8 i;
+
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
+ di->bat->batt_id = BATTERY_UNKNOWN;
+
+ res = ab5500_btemp_get_batctrl_res(di);
+ if (res < 0) {
+ dev_err(di->dev, "%s get batctrl res failed\n", __func__);
+ return -ENXIO;
+ }
+
+ /* BATTERY_UNKNOWN is defined on position 0, skip it! */
+ for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) {
+ if ((res <= di->bat->bat_type[i].resis_high) &&
+ (res >= di->bat->bat_type[i].resis_low)) {
+ dev_dbg(di->dev, "Battery detected on %s"
+ " low %d < res %d < high: %d"
+ " index: %d\n",
+ di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL ?
+ "BATCTRL" : "BATTEMP",
+ di->bat->bat_type[i].resis_low, res,
+ di->bat->bat_type[i].resis_high, i);
+
+ di->bat->batt_id = i;
+ break;
+ }
+ }
+
+ if (di->bat->batt_id == BATTERY_UNKNOWN) {
+ dev_warn(di->dev, "Battery identified as unknown"
+ ", resistance %d Ohm\n", res);
+ return -ENXIO;
+ }
+
+ /*
+ * We only have to change current source if the
+ * detected type is Type 1, else we use the 7uA source
+ */
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+ di->bat->batt_id == 1) {
+ dev_dbg(di->dev, "Set BATCTRL current source to 15uA\n");
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_15UA;
+ }
+
+ return di->bat->batt_id;
+}
+
+/**
+ * ab5500_btemp_periodic_work() - Measuring the temperature periodically
+ * @work: pointer to the work_struct structure
+ *
+ * Work function for measuring the temperature periodically
+ */
+static void ab5500_btemp_periodic_work(struct work_struct *work)
+{
+ struct ab5500_btemp *di = container_of(work,
+ struct ab5500_btemp, btemp_periodic_work.work);
+
+ di->bat_temp = ab5500_btemp_measure_temp(di);
+
+ if (di->bat_temp != di->prev_bat_temp) {
+ di->prev_bat_temp = di->bat_temp;
+ power_supply_changed(&di->btemp_psy);
+ }
+ di->bat->temp_now = di->bat_temp;
+
+ /* Schedule a new measurement */
+ queue_delayed_work(di->btemp_wq,
+ &di->btemp_periodic_work,
+ round_jiffies(20 * HZ));
+}
+
+/**
+ * ab5500_btemp_batt_removal_handler() - battery removal detected
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab5500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_btemp_batt_removal_handler(int irq, void *_di)
+{
+ struct ab5500_btemp *di = _di;
+ dev_err(di->dev, "Battery removal detected!\n");
+
+ di->events.batt_rem = true;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_btemp_batt_attach_handler() - battery insertion detected
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab5500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_btemp_batt_attach_handler(int irq, void *_di)
+{
+ struct ab5500_btemp *di = _di;
+ dev_err(di->dev, "Battery attached!\n");
+
+ di->events.batt_rem = false;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_btemp_periodic() - Periodic temperature measurements
+ * @di: pointer to the ab5500_btemp structure
+ * @enable: enable or disable periodic temperature measurements
+ *
+ * Starts of stops periodic temperature measurements. Periodic measurements
+ * should only be done when a charger is connected.
+ */
+static void ab5500_btemp_periodic(struct ab5500_btemp *di,
+ bool enable)
+{
+ dev_dbg(di->dev, "Enable periodic temperature measurements: %d\n",
+ enable);
+
+ if (enable)
+ queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0);
+ else
+ cancel_delayed_work_sync(&di->btemp_periodic_work);
+}
+
+/**
+ * ab5500_btemp_get_property() - get the btemp properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the btemp
+ * properties by reading the sysfs files.
+ * online: presence of the battery
+ * present: presence of the battery
+ * technology: battery technology
+ * temp: battery temperature
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_btemp_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab5500_btemp *di;
+
+ di = to_ab5500_btemp_device_info(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (di->events.batt_rem)
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = di->bat->bat_type[di->bat->batt_id].name;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ if (di->bat->batt_id == BATTERY_UNKNOWN)
+ /*
+ * In case the battery is not identified, its assumed that
+ * we are using the power supply and since no monitoring is
+ * done for the same, a nominal temp is hardocded.
+ */
+ val->intval = 250;
+ else
+ val->intval = di->bat_temp * 10;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ab5500_btemp_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab5500_btemp *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab5500_btemp_device_info(psy);
+
+ /*
+ * For all psy where the name of your driver
+ * appears in any supplied_to
+ */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB disconnected */
+ if (!ret.intval && di->events.usb_conn) {
+ di->events.usb_conn = false;
+ ab5500_btemp_periodic(di,
+ false);
+ }
+ /* USB connected */
+ else if (ret.intval && !di->events.usb_conn) {
+ di->events.usb_conn = true;
+ ab5500_btemp_periodic(di, true);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab5500_btemp_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is pointing to the function pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in the external power
+ * supply to the btemp.
+ */
+static void ab5500_btemp_external_power_changed(struct power_supply *psy)
+{
+ struct ab5500_btemp *di = to_ab5500_btemp_device_info(psy);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->btemp_psy, ab5500_btemp_get_ext_psy_data);
+}
+
+/* ab5500 btemp driver interrupts and their respective isr */
+static struct ab5500_btemp_interrupts ab5500_btemp_irq[] = {
+ {"BATT_REMOVAL", ab5500_btemp_batt_removal_handler},
+ {"BATT_ATTACH", ab5500_btemp_batt_attach_handler},
+};
+static int ab5500_btemp_bat_temp_trig(int mux)
+{
+ struct ab5500_btemp *di = ab5500_btemp_get();
+
+ if (di->bat_temp < BTEMP_THERMAL_LOW_LIMIT) {
+ dev_err(di->dev,
+ "battery temp less than lower threshold (-10 deg cel)\n");
+ power_supply_changed(&di->btemp_psy);
+ } else if (di->bat_temp > BTEMP_THERMAL_HIGH_LIMIT_62) {
+ dev_err(di->dev, "battery temp greater them max threshold\n");
+ power_supply_changed(&di->btemp_psy);
+ }
+ return 0;;
+}
+
+static int ab5500_btemp_auto_temp(struct ab5500_btemp *di)
+{
+ struct adc_auto_input *auto_ip;
+ int ret = 0;
+
+ auto_ip = kzalloc(sizeof(struct adc_auto_input), GFP_KERNEL);
+ if (!auto_ip) {
+ dev_err(di->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ auto_ip->mux = BTEMP_BALL;
+ auto_ip->freq = MS500;
+ auto_ip->min = BTEMP_THERMAL_LOW_LIMIT;
+ auto_ip->max = BTEMP_THERMAL_HIGH_LIMIT_62;
+ auto_ip->auto_adc_callback = ab5500_btemp_bat_temp_trig;
+ di->gpadc_auto = auto_ip;
+ ret = ab5500_gpadc_convert_auto(di->gpadc, di->gpadc_auto);
+ if (ret)
+ dev_err(di->dev,
+ "failed to set auto trigger for battery temp\n");
+ return ret;
+}
+
+#if defined(CONFIG_PM)
+static int ab5500_btemp_resume(struct platform_device *pdev)
+{
+ struct ab5500_btemp *di = platform_get_drvdata(pdev);
+
+ if (di->events.usb_conn)
+ ab5500_btemp_periodic(di, true);
+
+ return 0;
+}
+
+static int ab5500_btemp_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab5500_btemp *di = platform_get_drvdata(pdev);
+
+ if (di->events.usb_conn)
+ ab5500_btemp_periodic(di, false);
+
+ return 0;
+}
+#else
+#define ab5500_btemp_suspend NULL
+#define ab5500_btemp_resume NULL
+#endif
+
+static int __devexit ab5500_btemp_remove(struct platform_device *pdev)
+{
+ struct ab5500_btemp *di = platform_get_drvdata(pdev);
+ int i, irq;
+
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_btemp_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_btemp_irq[i].name);
+ free_irq(irq, di);
+ }
+
+ /* Delete the work queue */
+ destroy_workqueue(di->btemp_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->btemp_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di->gpadc_auto);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab5500_btemp_probe(struct platform_device *pdev)
+{
+ int irq, i, ret = 0;
+ struct abx500_bm_plat_data *plat_data;
+
+ struct ab5500_btemp *di =
+ kzalloc(sizeof(struct ab5500_btemp), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab5500_gpadc_get("ab5500-adc.0");
+
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->btemp;
+ di->bat = plat_data->battery;
+
+ /* get btemp specific platform data */
+ if (!di->pdata) {
+ dev_err(di->dev, "no btemp platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* get battery specific platform data */
+ if (!di->bat) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* BTEMP supply */
+ di->btemp_psy.name = "ab5500_btemp";
+ di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->btemp_psy.properties = ab5500_btemp_props;
+ di->btemp_psy.num_properties = ARRAY_SIZE(ab5500_btemp_props);
+ di->btemp_psy.get_property = ab5500_btemp_get_property;
+ di->btemp_psy.supplied_to = di->pdata->supplied_to;
+ di->btemp_psy.num_supplicants = di->pdata->num_supplicants;
+ di->btemp_psy.external_power_changed =
+ ab5500_btemp_external_power_changed;
+
+
+ /* Create a work queue for the btemp */
+ di->btemp_wq =
+ create_singlethread_workqueue("ab5500_btemp_wq");
+ if (di->btemp_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for measuring temperature periodically */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
+ ab5500_btemp_periodic_work);
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(di->dev);
+ if (ret < 0) {
+ dev_err(di->dev, "failed to get chip ID\n");
+ goto free_btemp_wq;
+ }
+ di->chip_id = ret;
+ dev_dbg(di->dev, "ab5500 CID is: 0x%02x\n",
+ di->chip_id);
+
+ /* Identify the battery */
+ if (ab5500_btemp_id(di) < 0)
+ dev_warn(di->dev, "failed to identify the battery\n");
+
+ /* Measure temperature once initially */
+ di->bat_temp = ab5500_btemp_measure_temp(di);
+ di->bat->temp_now = di->bat_temp;
+
+ /* Register BTEMP power supply class */
+ ret = power_supply_register(di->dev, &di->btemp_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register BTEMP psy\n");
+ goto free_btemp_wq;
+ }
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_btemp_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_btemp_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab5500_btemp_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab5500_btemp_irq[i].name, di);
+
+ if (ret) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab5500_btemp_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab5500_btemp_irq[i].name, irq, ret);
+ }
+ ret = ab5500_btemp_auto_temp(di);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to register auto trigger for battery temp\n");
+ goto free_irq;
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ dev_info(di->dev, "probe success\n");
+ return ret;
+
+free_irq:
+ power_supply_unregister(&di->btemp_psy);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab5500_btemp_irq[i].name);
+ free_irq(irq, di);
+ }
+free_btemp_wq:
+ destroy_workqueue(di->btemp_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab5500_btemp_driver = {
+ .probe = ab5500_btemp_probe,
+ .remove = __devexit_p(ab5500_btemp_remove),
+ .suspend = ab5500_btemp_suspend,
+ .resume = ab5500_btemp_resume,
+ .driver = {
+ .name = "ab5500-btemp",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_btemp_init(void)
+{
+ return platform_driver_register(&ab5500_btemp_driver);
+}
+
+static void __exit ab5500_btemp_exit(void)
+{
+ platform_driver_unregister(&ab5500_btemp_driver);
+}
+
+subsys_initcall_sync(ab5500_btemp_init);
+module_exit(ab5500_btemp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab5500-btemp");
+MODULE_DESCRIPTION("AB5500 battery temperature driver");
diff --git a/drivers/power/ab5500_charger.c b/drivers/power/ab5500_charger.c
new file mode 100644
index 00000000000..4ac35fcb1e8
--- /dev/null
+++ b/drivers/power/ab5500_charger.c
@@ -0,0 +1,1814 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Charger driver for AB5500
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/mfd/abx500/ux500_chargalg.h>
+#include <linux/usb/otg.h>
+
+/* Charger constants */
+#define NO_PW_CONN 0
+#define USB_PW_CONN 2
+
+/* HW failure constants */
+#define VBUS_CH_NOK 0x0A
+#define VBUS_OVV_TH 0x06
+
+/* AB5500 Charger constants */
+#define AB5500_USB_LINK_STATUS 0x78
+#define CHARGER_REV_SUP 0x10
+#define SW_EOC 0x40
+#define USB_CHAR_DET 0x02
+#define VBUS_RISING 0x20
+#define VBUS_FALLING 0x40
+#define USB_LINK_UPDATE 0x02
+#define USB_CH_TH_PROT_LOW 0x02
+#define USB_CH_TH_PROT_HIGH 0x01
+#define USB_ID_HOST_DET_ENA_MASK 0x02
+#define USB_ID_HOST_DET_ENA 0x02
+#define USB_ID_DEVICE_DET_ENA_MASK 0x01
+#define USB_ID_DEVICE_DET_ENA 0x01
+#define CHARGER_ISET_IN_1_1A 0x0C
+#define LED_ENABLE 0x01
+#define RESET 0x00
+#define SSW_ENABLE_REBOOT 0x80
+#define SSW_REBOOT_EN 0x40
+#define SSW_CONTROL_AUTOC 0x04
+#define SSW_PSEL_480S 0x00
+
+/* UsbLineStatus register - usb types */
+enum ab5500_charger_link_status {
+ USB_STAT_NOT_CONFIGURED,
+ USB_STAT_STD_HOST_NC,
+ USB_STAT_STD_HOST_C_NS,
+ USB_STAT_STD_HOST_C_S,
+ USB_STAT_HOST_CHG_NM,
+ USB_STAT_HOST_CHG_HS,
+ USB_STAT_HOST_CHG_HS_CHIRP,
+ USB_STAT_DEDICATED_CHG,
+ USB_STAT_ACA_RID_A,
+ USB_STAT_ACA_RID_B,
+ USB_STAT_ACA_RID_C_NM,
+ USB_STAT_ACA_RID_C_HS,
+ USB_STAT_ACA_RID_C_HS_CHIRP,
+ USB_STAT_HM_IDGND,
+ USB_STAT_RESERVED,
+ USB_STAT_NOT_VALID_LINK,
+};
+
+enum ab5500_usb_state {
+ AB5500_BM_USB_STATE_RESET_HS, /* HighSpeed Reset */
+ AB5500_BM_USB_STATE_RESET_FS, /* FullSpeed/LowSpeed Reset */
+ AB5500_BM_USB_STATE_CONFIGURED,
+ AB5500_BM_USB_STATE_SUSPEND,
+ AB5500_BM_USB_STATE_RESUME,
+ AB5500_BM_USB_STATE_MAX,
+};
+
+/* VBUS input current limits supported in AB5500 in mA */
+#define USB_CH_IP_CUR_LVL_0P05 50
+#define USB_CH_IP_CUR_LVL_0P09 98
+#define USB_CH_IP_CUR_LVL_0P19 193
+#define USB_CH_IP_CUR_LVL_0P29 290
+#define USB_CH_IP_CUR_LVL_0P38 380
+#define USB_CH_IP_CUR_LVL_0P45 450
+#define USB_CH_IP_CUR_LVL_0P5 500
+#define USB_CH_IP_CUR_LVL_0P6 600
+#define USB_CH_IP_CUR_LVL_0P7 700
+#define USB_CH_IP_CUR_LVL_0P8 800
+#define USB_CH_IP_CUR_LVL_0P9 900
+#define USB_CH_IP_CUR_LVL_1P0 1000
+#define USB_CH_IP_CUR_LVL_1P1 1100
+#define USB_CH_IP_CUR_LVL_1P3 1300
+#define USB_CH_IP_CUR_LVL_1P4 1400
+#define USB_CH_IP_CUR_LVL_1P5 1500
+
+#define to_ab5500_charger_usb_device_info(x) container_of((x), \
+ struct ab5500_charger, usb_chg)
+
+/**
+ * struct ab5500_charger_interrupts - ab5500 interupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab5500_charger_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab5500_charger_info {
+ int charger_connected;
+ int charger_online;
+ int charger_voltage;
+ int cv_active;
+ bool wd_expired;
+};
+
+struct ab5500_charger_event_flags {
+ bool usb_thermal_prot;
+ bool vbus_ovv;
+ bool usbchargernotok;
+ bool vbus_collapse;
+};
+
+struct ab5500_charger_usb_state {
+ bool usb_changed;
+ int usb_current;
+ enum ab5500_usb_state state;
+ spinlock_t usb_lock;
+};
+
+/**
+ * struct ab5500_charger - ab5500 Charger device information
+ * @dev: Pointer to the structure device
+ * @chip_id: Chip-Id of the ab5500
+ * @max_usb_in_curr: Max USB charger input current
+ * @vbus_detected: VBUS detected
+ * @vbus_detected_start:
+ * VBUS detected during startup
+ * @parent: Pointer to the struct ab5500
+ * @gpadc: Pointer to the struct gpadc
+ * @pdata: Pointer to the ab5500_charger platform data
+ * @bat: Pointer to the ab5500_bm platform data
+ * @flags: Structure for information about events triggered
+ * @usb_state: Structure for usb stack information
+ * @usb_chg: USB charger power supply
+ * @ac: Structure that holds the AC charger properties
+ * @usb: Structure that holds the USB charger properties
+ * @charger_wq: Work queue for the IRQs and checking HW state
+ * @check_hw_failure_work: Work for checking HW state
+ * @check_usbchgnotok_work: Work for checking USB charger not ok status
+ * @ac_work: Work for checking AC charger connection
+ * @detect_usb_type_work: Work for detecting the USB type connected
+ * @usb_link_status_work: Work for checking the new USB link status
+ * @usb_state_changed_work: Work for checking USB state
+ * @check_main_thermal_prot_work:
+ * Work for checking Main thermal status
+ * @check_usb_thermal_prot_work:
+ * Work for checking USB thermal status
+ * @ otg: pointer to struct otg_transceiver, used to
+ * notify the current during a standard host
+ * charger.
+ * @nb: structture of type notifier_block, which has
+ * a function pointer referenced by usb driver.
+ */
+struct ab5500_charger {
+ struct device *dev;
+ u8 chip_id;
+ int max_usb_in_curr;
+ bool vbus_detected;
+ bool vbus_detected_start;
+ struct ab5500 *parent;
+ struct ab5500_gpadc *gpadc;
+ struct abx500_charger_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct ab5500_charger_event_flags flags;
+ struct ab5500_charger_usb_state usb_state;
+ struct ux500_charger usb_chg;
+ struct ab5500_charger_info usb;
+ struct workqueue_struct *charger_wq;
+ struct delayed_work check_hw_failure_work;
+ struct delayed_work check_usbchgnotok_work;
+ struct work_struct detect_usb_type_work;
+ struct work_struct usb_link_status_work;
+ struct work_struct usb_state_changed_work;
+ struct work_struct check_usb_thermal_prot_work;
+ struct otg_transceiver *otg;
+ struct notifier_block nb;
+};
+
+/* USB properties */
+static enum power_supply_property ab5500_charger_usb_props[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+/**
+ * ab5500_charger_get_vbus_voltage() - get vbus voltage
+ * @di: pointer to the ab5500_charger structure
+ *
+ * This function returns the vbus voltage.
+ * Returns vbus voltage (on success)
+ */
+static int ab5500_charger_get_vbus_voltage(struct ab5500_charger *di)
+{
+ int vch;
+
+ /* Only measure voltage if the charger is connected */
+ if (di->usb.charger_connected) {
+ vch = ab5500_gpadc_convert(di->gpadc, VBUS_V);
+ if (vch < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ vch = 0;
+ }
+ return vch;
+}
+
+/**
+ * ab5500_charger_get_usb_current() - get usb charger current
+ * @di: pointer to the ab5500_charger structure
+ *
+ * This function returns the usb charger current.
+ * Returns usb current (on success) and error code on failure
+ */
+static int ab5500_charger_get_usb_current(struct ab5500_charger *di)
+{
+ int ich;
+
+ /* Only measure current if the charger is online */
+ if (di->usb.charger_online) {
+ ich = ab5500_gpadc_convert(di->gpadc, USB_CHARGER_C);
+ if (ich < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ ich = 0;
+ }
+ return ich;
+}
+
+/**
+ * ab5500_charger_detect_chargers() - Detect the connected chargers
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Returns the type of charger connected.
+ * For USB it will not mean we can actually charge from it
+ * but that there is a USB cable connected that we have to
+ * identify. This is used during startup when we don't get
+ * interrupts of the charger detection
+ *
+ * Returns an integer value, that means,
+ * NO_PW_CONN no power supply is connected
+ * USB_PW_CONN if the USB power supply is connected
+ */
+static int ab5500_charger_detect_chargers(struct ab5500_charger *di)
+{
+ int result = NO_PW_CONN;
+ int ret;
+ u8 val;
+ /* Check for USB charger */
+ /*
+ * TODO: Since there are no status register validating by
+ * reading the IT souce registers
+ */
+ ret = abx500_get_register_interruptible(di->dev, AB5500_BANK_IT,
+ AB5500_IT_SOURCE8, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return ret;
+ }
+
+ if (val & VBUS_RISING)
+ result |= USB_PW_CONN;
+ else if (val & VBUS_FALLING)
+ result = NO_PW_CONN;
+
+ return result;
+}
+
+/**
+ * ab5500_charger_max_usb_curr() - get the max curr for the USB type
+ * @di: pointer to the ab5500_charger structure
+ * @link_status: the identified USB type
+ *
+ * Get the maximum current that is allowed to be drawn from the host
+ * based on the USB type.
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab5500_charger_max_usb_curr(struct ab5500_charger *di,
+ enum ab5500_charger_link_status link_status)
+{
+ int ret = 0;
+
+ switch (link_status) {
+ case USB_STAT_STD_HOST_NC:
+ case USB_STAT_STD_HOST_C_NS:
+ case USB_STAT_STD_HOST_C_S:
+ dev_dbg(di->dev, "USB Type - Standard host is "
+ "detected through USB driver\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+ break;
+ case USB_STAT_HOST_CHG_HS_CHIRP:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+ case USB_STAT_HOST_CHG_HS:
+ case USB_STAT_ACA_RID_C_HS:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9;
+ break;
+ case USB_STAT_ACA_RID_A:
+ /*
+ * Dedicated charger level minus maximum current accessory
+ * can consume (300mA). Closest level is 1100mA
+ */
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P1;
+ break;
+ case USB_STAT_ACA_RID_B:
+ /*
+ * Dedicated charger level minus 120mA (20mA for ACA and
+ * 100mA for potential accessory). Closest level is 1300mA
+ */
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3;
+ break;
+ case USB_STAT_DEDICATED_CHG:
+ case USB_STAT_HOST_CHG_NM:
+ case USB_STAT_ACA_RID_C_HS_CHIRP:
+ case USB_STAT_ACA_RID_C_NM:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+ break;
+ case USB_STAT_RESERVED:
+ /*
+ * This state is used to indicate that VBUS has dropped below
+ * the detection level 4 times in a row. This is due to the
+ * charger output current is set to high making the charger
+ * voltage collapse. This have to be propagated through to
+ * chargalg. This is done using the property
+ * POWER_SUPPLY_PROP_CURRENT_AVG = 1
+ */
+ di->flags.vbus_collapse = true;
+ dev_dbg(di->dev, "USB Type - USB_STAT_RESERVED "
+ "VBUS has collapsed\n");
+ ret = -1;
+ break;
+ case USB_STAT_HM_IDGND:
+ case USB_STAT_NOT_CONFIGURED:
+ case USB_STAT_NOT_VALID_LINK:
+ dev_err(di->dev, "USB Type - Charging not allowed\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ ret = -ENXIO;
+ break;
+ default:
+ dev_err(di->dev, "USB Type - Unknown\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ ret = -ENXIO;
+ break;
+ };
+
+ dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
+ link_status, di->max_usb_in_curr);
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_read_usb_type() - read the type of usb connected
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Detect the type of the plugged USB
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab5500_charger_read_usb_type(struct ab5500_charger *di)
+{
+ int ret;
+ u8 val;
+
+ ret = abx500_get_register_interruptible(di->dev, AB5500_BANK_USB,
+ AB5500_USB_LINE_STATUS, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return ret;
+ }
+
+ /* get the USB type */
+ val = (val & AB5500_USB_LINK_STATUS) >> 3;
+ ret = ab5500_charger_max_usb_curr(di,
+ (enum ab5500_charger_link_status) val);
+
+ return ret;
+}
+
+static int ab5500_charger_voltage_map[] = {
+ 3500 ,
+ 3525 ,
+ 3550 ,
+ 3575 ,
+ 3600 ,
+ 3625 ,
+ 3650 ,
+ 3675 ,
+ 3700 ,
+ 3725 ,
+ 3750 ,
+ 3775 ,
+ 3800 ,
+ 3825 ,
+ 3850 ,
+ 3875 ,
+ 3900 ,
+ 3925 ,
+ 3950 ,
+ 3975 ,
+ 4000 ,
+ 4025 ,
+ 4050 ,
+ 4060 ,
+ 4070 ,
+ 4080 ,
+ 4090 ,
+ 4100 ,
+ 4110 ,
+ 4120 ,
+ 4130 ,
+ 4140 ,
+ 4150 ,
+ 4160 ,
+ 4170 ,
+ 4180 ,
+ 4190 ,
+ 4200 ,
+ 4210 ,
+ 4220 ,
+ 4230 ,
+ 4240 ,
+ 4250 ,
+ 4260 ,
+ 4270 ,
+ 4280 ,
+ 4290 ,
+ 4300 ,
+ 4310 ,
+ 4320 ,
+ 4330 ,
+ 4340 ,
+ 4350 ,
+ 4360 ,
+ 4370 ,
+ 4380 ,
+ 4390 ,
+ 4400 ,
+ 4410 ,
+ 4420 ,
+ 4430 ,
+ 4440 ,
+ 4450 ,
+ 4460 ,
+ 4470 ,
+ 4480 ,
+ 4490 ,
+ 4500 ,
+ 4510 ,
+ 4520 ,
+ 4530 ,
+ 4540 ,
+ 4550 ,
+ 4560 ,
+ 4570 ,
+ 4580 ,
+ 4590 ,
+ 4600 ,
+};
+
+/*
+ * This array maps the raw hex value to charger current used by the ab5500
+ * Values taken from the AB5500 product specification manual
+ */
+static int ab5500_charger_current_map[] = {
+ 100 ,
+ 200 ,
+ 300 ,
+ 400 ,
+ 500 ,
+ 600 ,
+ 700 ,
+ 800 ,
+ 900 ,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1500,
+ 1500,
+};
+
+static int ab5500_icsr_current_map[] = {
+ 50,
+ 93,
+ 193,
+ 290,
+ 380,
+ 450,
+ 500 ,
+ 600 ,
+ 700 ,
+ 800 ,
+ 900 ,
+ 1000,
+ 1100,
+ 1300,
+ 1400,
+ 1500,
+};
+
+static int ab5500_cvrec_voltage_map[] = {
+ 3300,
+ 3325,
+ 3350,
+ 3375,
+ 3400,
+ 3425,
+ 3450,
+ 3475,
+ 3500,
+ 3525,
+ 3550,
+ 3575,
+ 3600,
+ 3625,
+ 3650,
+ 3675,
+ 3700,
+ 3725,
+ 3750,
+ 3775,
+ 3800,
+ 3825,
+ 3850,
+ 3875,
+ 3900,
+ 3925,
+ 4000,
+ 4025,
+ 4050,
+ 4075,
+ 4100,
+ 4125,
+ 4150,
+ 4175,
+ 4200,
+ 4225,
+ 4250,
+ 4275,
+ 4300,
+ 4325,
+ 4350,
+ 4375,
+ 4400,
+ 4425,
+ 4450,
+ 4475,
+ 4500,
+ 4525,
+ 4550,
+ 4575,
+ 4600,
+};
+
+static int ab5500_cvrec_voltage_to_regval(int voltage)
+{
+ int i;
+
+ /* Special case for voltage below 3.3V */
+ if (voltage < ab5500_cvrec_voltage_map[0])
+ return 0;
+
+ for (i = 1; i < ARRAY_SIZE(ab5500_cvrec_voltage_map); i++) {
+ if (voltage < ab5500_cvrec_voltage_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab5500_cvrec_voltage_map) - 1;
+ if (voltage == ab5500_cvrec_voltage_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab5500_voltage_to_regval(int voltage)
+{
+ int i;
+
+ /* Special case for voltage below 3.3V */
+ if (voltage < ab5500_charger_voltage_map[0])
+ return 0;
+
+ for (i = 1; i < ARRAY_SIZE(ab5500_charger_voltage_map); i++) {
+ if (voltage < ab5500_charger_voltage_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab5500_charger_voltage_map) - 1;
+ if (voltage == ab5500_charger_voltage_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab5500_icsr_curr_to_regval(int curr)
+{
+ int i;
+
+ if (curr < ab5500_icsr_current_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab5500_icsr_current_map); i++) {
+ if (curr < ab5500_icsr_current_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab5500_icsr_current_map) - 1;
+ if (curr == ab5500_icsr_current_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab5500_current_to_regval(int curr)
+{
+ int i;
+
+ if (curr < ab5500_charger_current_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab5500_charger_current_map); i++) {
+ if (curr < ab5500_charger_current_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab5500_charger_current_map) - 1;
+ if (curr == ab5500_charger_current_map[i])
+ return i;
+ else
+ return -1;
+}
+
+/**
+ * ab5500_charger_get_usb_cur() - get usb current
+ * @di: pointer to the ab5500_charger structre
+ *
+ * The usb stack provides the maximum current that can be drawn from
+ * the standard usb host. This will be in mA.
+ * This function converts current in mA to a value that can be written
+ * to the register. Returns -1 if charging is not allowed
+ */
+static int ab5500_charger_get_usb_cur(struct ab5500_charger *di)
+{
+ switch (di->usb_state.usb_current) {
+ case 50:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ break;
+ case 100:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+ break;
+ case 200:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P19;
+ break;
+ case 300:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P29;
+ break;
+ case 400:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P38;
+ break;
+ case 500:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+ default:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ return -1;
+ break;
+ };
+ return 0;
+}
+
+/**
+ * ab5500_charger_set_vbus_in_curr() - set VBUS input current limit
+ * @di: pointer to the ab5500_charger structure
+ * @ich_in: charger input current limit
+ *
+ * Sets the current that can be drawn from the USB host
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_set_vbus_in_curr(struct ab5500_charger *di,
+ int ich_in)
+{
+ int ret;
+ int input_curr_index;
+ int min_value;
+
+ /* We should always use to lowest current limit */
+ min_value = min(di->bat->chg_params->usb_curr_max, ich_in);
+
+ input_curr_index = ab5500_icsr_curr_to_regval(min_value);
+ if (input_curr_index < 0) {
+ dev_err(di->dev, "VBUS input current limit too high\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev, AB5500_BANK_CHG,
+ AB5500_ICSR, input_curr_index);
+ if (ret)
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_usb_en() - enable usb charging
+ * @di: pointer to the ab5500_charger structure
+ * @enable: enable/disable flag
+ * @vset: charging voltage
+ * @ich_out: charger output current
+ *
+ * Enable/Disable USB charging and turns on/off the charging led respectively.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_usb_en(struct ux500_charger *charger,
+ int enable, int vset, int ich_out)
+{
+ int ret;
+ int volt_index;
+ int curr_index;
+
+ struct ab5500_charger *di = to_ab5500_charger_usb_device_info(charger);
+
+ if (enable) {
+ /* Check if USB is connected */
+ if (!di->usb.charger_connected) {
+ dev_err(di->dev, "USB charger not connected\n");
+ return -ENXIO;
+ }
+
+ /* Enable USB charging */
+ dev_dbg(di->dev, "Enable USB: %dmV %dmA\n", vset, ich_out);
+
+ volt_index = ab5500_voltage_to_regval(vset);
+ curr_index = ab5500_current_to_regval(ich_out) ;
+
+ /* ChVoltLevel: max voltage upto which battery can be charged */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_VSRC, (u8) volt_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /* current that can be drawn from the usb */
+ ret = ab5500_charger_set_vbus_in_curr(di, ich_out);
+ if (ret) {
+ dev_err(di->dev, "%s setting icsr failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /* ChOutputCurentLevel: protected output current */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_OCSRV, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /*
+ * Battery voltage when charging should be resumed after
+ * completion of charging
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_CVREC,
+ ab5500_cvrec_voltage_to_regval(
+ di->bat->bat_type[di->bat->batt_id].recharge_vol));
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ /*
+ * Battery temperature:
+ * Input to the TBDATA register corresponds to the battery
+ * temperature(temp being multiples of 2)
+ * In order to obatain the value to be written to this reg
+ * divide the temperature obtained from gpadc by 2
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_TBDATA,
+ di->bat->temp_now / 2);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /* If success power on charging LED indication */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_LEDT, LED_ENABLE);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /*
+ * Register DCIOCURRENT is one among the charging watchdog
+ * rekick sequence, hence irrespective of usb charging this
+ * register will have to be written.
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_DCIOCURRENT,
+ RESET);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ di->usb.charger_online = 1;
+ } else {
+ /* ChVoltLevel: max voltage upto which battery can be charged */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_VSRC, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ /* USBChInputCurr: current that can be drawn from the usb */
+ ret = ab5500_charger_set_vbus_in_curr(di, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s resetting icsr failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ /* If success power off charging LED indication */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_LEDT, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ di->usb.charger_online = 0;
+ di->usb.wd_expired = false;
+ dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
+ }
+ power_supply_changed(&di->usb_chg.psy);
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_watchdog_kick() - kick charger watchdog
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Kick charger watchdog
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_watchdog_kick(struct ux500_charger *charger)
+{
+ int ret;
+ struct ab5500_charger *di;
+ int volt_index, curr_index;
+ u8 value = 0;
+
+ /* TODO: update */
+ if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+ di = to_ab5500_charger_usb_device_info(charger);
+ else
+ return -ENXIO;
+
+ ret = abx500_get_register_interruptible(di->dev, AB5500_BANK_STARTUP,
+ AB5500_MCB, &value);
+ if (ret)
+ dev_err(di->dev, "Failed to read!\n");
+
+ value = value | (SSW_ENABLE_REBOOT | SSW_REBOOT_EN |
+ SSW_CONTROL_AUTOC | SSW_PSEL_480S);
+ ret = abx500_set_register_interruptible(di->dev, AB5500_BANK_STARTUP,
+ AB5500_MCB, value);
+ if (ret)
+ dev_err(di->dev, "Failed to kick WD!\n");
+
+ volt_index = ab5500_voltage_to_regval(
+ di->bat->bat_type[di->bat->batt_id].normal_vol_lvl);
+ curr_index = ab5500_current_to_regval(di->max_usb_in_curr);
+
+ /* ChVoltLevel: max voltage upto which battery can be charged */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_VSRC, (u8) volt_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+
+ /* current that can be drawn from the usb */
+ ret = ab5500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+ if (ret) {
+ dev_err(di->dev, "%s setting icsr failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /* ChOutputCurentLevel: protected output current */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_OCSRV, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+
+ /*
+ * Battery voltage when charging should be resumed after
+ * completion of charging
+ */
+ /* Charger_Vrechar[5:0] = '4.025 V' */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_CVREC,
+ ab5500_cvrec_voltage_to_regval(
+ di->bat->bat_type[di->bat->batt_id].recharge_vol));
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+ /*
+ * Battery temperature:
+ * Input to the TBDATA register corresponds to the battery
+ * temperature(temp being multiples of 2)
+ * In order to obatain the value to be written to this reg
+ * divide the temperature obtained from gpadc by 2
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_TBDATA,
+ di->bat->temp_now / 2);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+ /*
+ * Register DCIOCURRENT is one among the charging watchdog
+ * rekick sequence, hence irrespective of usb charging this
+ * register will have to be written.
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_DCIOCURRENT,
+ RESET);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_update_charger_current() - update charger current
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Update the charger output current for the specified charger
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_update_charger_current(struct ux500_charger *charger,
+ int ich_out)
+{
+ int ret = 0;
+ int curr_index;
+ struct ab5500_charger *di;
+
+ if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+ di = to_ab5500_charger_usb_device_info(charger);
+ else
+ return -ENXIO;
+
+ curr_index = ab5500_current_to_regval(ich_out);
+ if (curr_index < 0) {
+ dev_err(di->dev,
+ "Charger current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev, AB5500_BANK_CHG,
+ AB5500_OCSRV, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_check_hw_failure_work() - check main charger failure
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the main charger status
+ */
+static void ab5500_charger_check_hw_failure_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, check_hw_failure_work.work);
+
+ /* Check if the status bits for HW failure is still active */
+ if (di->flags.vbus_ovv) {
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_USB_PHY_STATUS,
+ &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return;
+ }
+ if (!(reg_value & VBUS_OVV_TH)) {
+ di->flags.vbus_ovv = false;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ }
+ /* If we still have a failure, schedule a new check */
+ if (di->flags.vbus_ovv) {
+ queue_delayed_work(di->charger_wq,
+ &di->check_hw_failure_work, round_jiffies(HZ));
+ }
+}
+
+/**
+ * ab5500_charger_detect_usb_type_work() - work to detect USB type
+ * @work: Pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+void ab5500_charger_detect_usb_type_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, detect_usb_type_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if is
+ * connected by reading the status register
+ */
+ ret = ab5500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (!(ret & USB_PW_CONN)) {
+ di->vbus_detected = 0;
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ } else {
+ di->vbus_detected = 1;
+ }
+}
+
+/**
+ * ab5500_charger_usb_link_status_work() - work to detect USB type
+ * @work: pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+static void ab5500_charger_usb_link_status_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, usb_link_status_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if is
+ * connected by reading the status register
+ */
+ ret = ab5500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (!(ret & USB_PW_CONN)) {
+ di->vbus_detected = 0;
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ } else {
+ di->vbus_detected = 1;
+ ret = ab5500_charger_read_usb_type(di);
+ if (!ret) {
+ /* Update maximum input current */
+ ret = ab5500_charger_set_vbus_in_curr(di,
+ di->max_usb_in_curr);
+ if (ret)
+ return;
+
+ di->usb.charger_connected = 1;
+ power_supply_changed(&di->usb_chg.psy);
+ } else if (ret == -ENXIO) {
+ /* No valid charger type detected */
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ }
+}
+
+static void ab5500_charger_usb_state_changed_work(struct work_struct *work)
+{
+ int ret;
+ unsigned long flags;
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, usb_state_changed_work);
+
+ if (!di->vbus_detected)
+ return;
+
+ spin_lock_irqsave(&di->usb_state.usb_lock, flags);
+ di->usb_state.usb_changed = false;
+ spin_unlock_irqrestore(&di->usb_state.usb_lock, flags);
+
+ /*
+ * wait for some time until you get updates from the usb stack
+ * and negotiations are completed
+ */
+ msleep(250);
+
+ if (di->usb_state.usb_changed)
+ return;
+
+ dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n",
+ __func__, di->usb_state.state, di->usb_state.usb_current);
+
+ switch (di->usb_state.state) {
+ case AB5500_BM_USB_STATE_RESET_HS:
+ case AB5500_BM_USB_STATE_RESET_FS:
+ case AB5500_BM_USB_STATE_SUSPEND:
+ case AB5500_BM_USB_STATE_MAX:
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ break;
+
+ case AB5500_BM_USB_STATE_RESUME:
+ /*
+ * when suspend->resume there should be delay
+ * of 1sec for enabling charging
+ */
+ msleep(1000);
+ /* Intentional fall through */
+ case AB5500_BM_USB_STATE_CONFIGURED:
+ /*
+ * USB is configured, enable charging with the charging
+ * input current obtained from USB driver
+ */
+ if (!ab5500_charger_get_usb_cur(di)) {
+ /* Update maximum input current */
+ ret = ab5500_charger_set_vbus_in_curr(di,
+ di->max_usb_in_curr);
+ if (ret)
+ return;
+
+ di->usb.charger_connected = 1;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ break;
+
+ default:
+ break;
+ };
+}
+
+/**
+ * ab5500_charger_check_usbchargernotok_work() - check USB chg not ok status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB charger Not OK status
+ */
+static void ab5500_charger_check_usbchargernotok_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+ bool prev_status;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, check_usbchgnotok_work.work);
+
+ /* Check if the status bit for usbchargernotok is still active */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_CHGFSM_CHARGER_DETECT, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return;
+ }
+ prev_status = di->flags.usbchargernotok;
+
+ if (reg_value & VBUS_CH_NOK) {
+ di->flags.usbchargernotok = true;
+ /* Check again in 1sec */
+ queue_delayed_work(di->charger_wq,
+ &di->check_usbchgnotok_work, HZ);
+ } else {
+ di->flags.usbchargernotok = false;
+ di->flags.vbus_collapse = false;
+ }
+
+ if (prev_status != di->flags.usbchargernotok)
+ power_supply_changed(&di->usb_chg.psy);
+}
+
+/**
+ * ab5500_charger_check_usb_thermal_prot_work() - check usb thermal status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB thermal prot status
+ */
+static void ab5500_charger_check_usb_thermal_prot_work(
+ struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, check_usb_thermal_prot_work);
+
+ /* Check if the status bit for usb_thermal_prot is still active */
+ /* TODO: Interrupt source reg 15 bit 4 */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_CHGFSM_USB_BTEMP_CURR_LIM, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return;
+ }
+ if (reg_value & USB_CH_TH_PROT_LOW || reg_value & USB_CH_TH_PROT_HIGH)
+ di->flags.usb_thermal_prot = true;
+ else
+ di->flags.usb_thermal_prot = false;
+
+ power_supply_changed(&di->usb_chg.psy);
+}
+
+/**
+ * ab5500_charger_vbusdetf_handler() - VBUS falling detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_vbusdetf_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS falling detected\n");
+ queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_vbusdetr_handler() - VBUS rising detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_vbusdetr_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ di->vbus_detected = true;
+ dev_dbg(di->dev, "VBUS rising detected\n");
+ queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_usblinkstatus_handler() - USB link status has changed
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_usblinkstatus_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "USB link status changed\n");
+
+ queue_work(di->charger_wq, &di->usb_link_status_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_usbchthprotr_handler() - Die temp is above usb charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_usbchthprotr_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp above USB charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_usbchargernotokr_handler() - USB charger not ok detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_usbchargernotokr_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "Not allowed USB charger detected\n");
+ queue_delayed_work(di->charger_wq, &di->check_usbchgnotok_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_chwdexp_handler() - Charger watchdog expired
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_chwdexp_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "Charger watchdog expired\n");
+
+ /*
+ * The charger that was online when the watchdog expired
+ * needs to be restarted for charging to start again
+ */
+ if (di->usb.charger_online) {
+ di->usb.wd_expired = true;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_vbusovv_handler() - VBUS overvoltage detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_vbusovv_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS overvoltage detected\n");
+ di->flags.vbus_ovv = true;
+ power_supply_changed(&di->usb_chg.psy);
+
+ /* Schedule a new HW failure check */
+ queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_usb_get_property() - get the usb properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the usb
+ * properties by reading the sysfs files.
+ * USB properties are online, present and voltage.
+ * online: usb charging is in progress or not
+ * present: presence of the usb
+ * voltage: vbus voltage
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab5500_charger *di;
+
+ di = to_ab5500_charger_usb_device_info(psy_to_ux500_charger(psy));
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->flags.usbchargernotok)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else if (di->usb.wd_expired)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (di->flags.usb_thermal_prot)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (di->flags.vbus_ovv)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = di->usb.charger_online;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = di->usb.charger_connected;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ di->usb.charger_voltage = ab5500_charger_get_vbus_voltage(di);
+ val->intval = di->usb.charger_voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = ab5500_charger_get_usb_current(di) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ /*
+ * This property is used to indicate when VBUS has collapsed
+ * due to too high output current from the USB charger
+ */
+ if (di->flags.vbus_collapse)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ab5500_charger_hw_registers() - Set up charger related registers
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Set up charger OVV, watchdog and maximum voltage registers as well as
+ * charging of the backup battery
+ */
+static int ab5500_charger_init_hw_registers(struct ab5500_charger *di)
+{
+ int ret = 0;
+
+ /* Enable ID Host and Device detection */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_USB_OTG_CTRL,
+ USB_ID_HOST_DET_ENA_MASK, USB_ID_HOST_DET_ENA);
+ if (ret) {
+ dev_err(di->dev, "failed to enable usb charger detection\n");
+ goto out;
+ }
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_USB_OTG_CTRL,
+ USB_ID_DEVICE_DET_ENA_MASK, USB_ID_DEVICE_DET_ENA);
+ if (ret) {
+ dev_err(di->dev, "failed to enable usb charger detection\n");
+ goto out;
+ }
+
+ /* Over current protection for reverse supply */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_CREVS, CHARGER_REV_SUP,
+ CHARGER_REV_SUP);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to enable over current protection for reverse supply\n");
+ goto out;
+ }
+
+ /* Enable SW EOC at flatcurrent detection */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_CCTRL, SW_EOC, SW_EOC);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to enable end of charge at flatcurrent detection\n");
+ goto out;
+ }
+out:
+ return ret;
+}
+
+/*
+ * ab5500 charger driver interrupts and their respective isr
+ */
+static struct ab5500_charger_interrupts ab5500_charger_irq[] = {
+ {"VBUS_FALLING", ab5500_charger_vbusdetf_handler},
+ {"VBUS_RISING", ab5500_charger_vbusdetr_handler},
+ {"USB_LINK_UPDATE", ab5500_charger_usblinkstatus_handler},
+ {"USB_CH_TH_PROTECTION", ab5500_charger_usbchthprotr_handler},
+ {"USB_CH_NOT_OK", ab5500_charger_usbchargernotokr_handler},
+ {"OVV", ab5500_charger_vbusovv_handler},
+ /* TODO: Interrupt missing, will be available in cut 2 */
+ /*{"CHG_SW_TIMER_OUT", ab5500_charger_chwdexp_handler},*/
+};
+
+static int ab5500_charger_usb_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *power)
+{
+ struct ab5500_charger *di =
+ container_of(nb, struct ab5500_charger, nb);
+ enum ab5500_usb_state bm_usb_state;
+ unsigned mA = *((unsigned *)power);
+
+ /* TODO: State is fabricate here. See if charger really needs USB
+ * state or if mA is enough
+ */
+ if ((di->usb_state.usb_current == 2) && (mA > 2))
+ bm_usb_state = AB5500_BM_USB_STATE_RESUME;
+ else if (mA == 0)
+ bm_usb_state = AB5500_BM_USB_STATE_RESET_HS;
+ else if (mA == 2)
+ bm_usb_state = AB5500_BM_USB_STATE_SUSPEND;
+ else if (mA >= 8) /* 8, 100, 500 */
+ bm_usb_state = AB5500_BM_USB_STATE_CONFIGURED;
+ else /* Should never occur */
+ bm_usb_state = AB5500_BM_USB_STATE_RESET_FS;
+
+ dev_dbg(di->dev, "%s usb_state: 0x%02x mA: %d\n",
+ __func__, bm_usb_state, mA);
+
+ spin_lock(&di->usb_state.usb_lock);
+ di->usb_state.usb_changed = true;
+ di->usb_state.state = bm_usb_state;
+ di->usb_state.usb_current = mA;
+ spin_unlock(&di->usb_state.usb_lock);
+
+ queue_work(di->charger_wq, &di->usb_state_changed_work);
+
+ return NOTIFY_OK;
+}
+
+#if defined(CONFIG_PM)
+static int ab5500_charger_resume(struct platform_device *pdev)
+{
+ struct ab5500_charger *di = platform_get_drvdata(pdev);
+
+ /* If we still have a HW failure, schedule a new check */
+ if (di->flags.usbchargernotok || di->flags.vbus_ovv) {
+ queue_delayed_work(di->charger_wq,
+ &di->check_hw_failure_work, 0);
+ }
+
+ return 0;
+}
+
+static int ab5500_charger_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab5500_charger *di = platform_get_drvdata(pdev);
+
+ /* Cancel any pending HW failure check */
+ if (delayed_work_pending(&di->check_hw_failure_work))
+ cancel_delayed_work(&di->check_hw_failure_work);
+
+ return 0;
+}
+#else
+#define ab5500_charger_suspend NULL
+#define ab5500_charger_resume NULL
+#endif
+
+static int __devexit ab5500_charger_remove(struct platform_device *pdev)
+{
+ struct ab5500_charger *di = platform_get_drvdata(pdev);
+ int i, irq;
+
+ /* Disable USB charging */
+ ab5500_charger_usb_en(&di->usb_chg, false, 0, 0);
+
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_charger_irq[i].name);
+ free_irq(irq, di);
+ }
+
+ otg_unregister_notifier(di->otg, &di->nb);
+ otg_put_transceiver(di->otg);
+
+ /* Delete the work queue */
+ destroy_workqueue(di->charger_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->usb_chg.psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab5500_charger_probe(struct platform_device *pdev)
+{
+ int irq, i, charger_status, ret = 0;
+ struct abx500_bm_plat_data *plat_data;
+
+ struct ab5500_charger *di =
+ kzalloc(sizeof(struct ab5500_charger), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab5500_gpadc_get("ab5500-adc.0");
+
+ /* initialize lock */
+ spin_lock_init(&di->usb_state.usb_lock);
+
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->charger;
+ di->bat = plat_data->battery;
+
+ /* get charger specific platform data */
+ if (!di->pdata) {
+ dev_err(di->dev, "no charger platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* get battery specific platform data */
+ if (!di->bat) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ /* USB supply */
+ /* power_supply base class */
+ di->usb_chg.psy.name = "ab5500_usb";
+ di->usb_chg.psy.type = POWER_SUPPLY_TYPE_USB;
+ di->usb_chg.psy.properties = ab5500_charger_usb_props;
+ di->usb_chg.psy.num_properties = ARRAY_SIZE(ab5500_charger_usb_props);
+ di->usb_chg.psy.get_property = ab5500_charger_usb_get_property;
+ di->usb_chg.psy.supplied_to = di->pdata->supplied_to;
+ di->usb_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ /* ux500_charger sub-class */
+ di->usb_chg.ops.enable = &ab5500_charger_usb_en;
+ di->usb_chg.ops.kick_wd = &ab5500_charger_watchdog_kick;
+ di->usb_chg.ops.update_curr = &ab5500_charger_update_charger_current;
+ di->usb_chg.max_out_volt = ab5500_charger_voltage_map[
+ ARRAY_SIZE(ab5500_charger_voltage_map) - 1];
+ di->usb_chg.max_out_curr = ab5500_charger_current_map[
+ ARRAY_SIZE(ab5500_charger_current_map) - 1];
+
+
+ /* Create a work queue for the charger */
+ di->charger_wq =
+ create_singlethread_workqueue("ab5500_charger_wq");
+ if (di->charger_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for HW failure check */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work,
+ ab5500_charger_check_hw_failure_work);
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work,
+ ab5500_charger_check_usbchargernotok_work);
+
+ /* Init work for charger detection */
+ INIT_WORK(&di->usb_link_status_work,
+ ab5500_charger_usb_link_status_work);
+ INIT_WORK(&di->detect_usb_type_work,
+ ab5500_charger_detect_usb_type_work);
+
+ INIT_WORK(&di->usb_state_changed_work,
+ ab5500_charger_usb_state_changed_work);
+
+ /* Init work for checking HW status */
+ INIT_WORK(&di->check_usb_thermal_prot_work,
+ ab5500_charger_check_usb_thermal_prot_work);
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(di->dev);
+ if (ret < 0) {
+ dev_err(di->dev, "failed to get chip ID\n");
+ goto free_charger_wq;
+ }
+ di->chip_id = ret;
+ dev_dbg(di->dev, "AB5500 CID is: 0x%02x\n", di->chip_id);
+
+ /* Initialize OVV, and other registers */
+ ret = ab5500_charger_init_hw_registers(di);
+ if (ret) {
+ dev_err(di->dev, "failed to initialize ABB registers\n");
+ goto free_device_info;
+ }
+
+ /* Register USB charger class */
+ ret = power_supply_register(di->dev, &di->usb_chg.psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register USB charger\n");
+ goto free_device_info;
+ }
+
+ di->otg = otg_get_transceiver();
+ if (!di->otg) {
+ dev_err(di->dev, "failed to get otg transceiver\n");
+ goto free_usb;
+ }
+ di->nb.notifier_call = ab5500_charger_usb_notifier_call;
+ ret = otg_register_notifier(di->otg, &di->nb);
+ if (ret) {
+ dev_err(di->dev, "failed to register otg notifier\n");
+ goto put_otg_transceiver;
+ }
+
+ /* Identify the connected charger types during startup */
+ charger_status = ab5500_charger_detect_chargers(di);
+ if (charger_status & USB_PW_CONN) {
+ dev_dbg(di->dev, "VBUS Detect during startup\n");
+ di->vbus_detected = true;
+ di->vbus_detected_start = true;
+ queue_work(di->charger_wq,
+ &di->usb_link_status_work);
+ }
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_charger_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab5500_charger_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab5500_charger_irq[i].name, di);
+
+ if (ret != 0) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab5500_charger_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab5500_charger_irq[i].name, irq, ret);
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ dev_info(di->dev, "probe success\n");
+ return ret;
+
+free_irq:
+ otg_unregister_notifier(di->otg, &di->nb);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab5500_charger_irq[i].name);
+ free_irq(irq, di);
+ }
+put_otg_transceiver:
+ otg_put_transceiver(di->otg);
+free_usb:
+ power_supply_unregister(&di->usb_chg.psy);
+free_charger_wq:
+ destroy_workqueue(di->charger_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab5500_charger_driver = {
+ .probe = ab5500_charger_probe,
+ .remove = __devexit_p(ab5500_charger_remove),
+ .suspend = ab5500_charger_suspend,
+ .resume = ab5500_charger_resume,
+ .driver = {
+ .name = "ab5500-charger",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_charger_init(void)
+{
+ return platform_driver_register(&ab5500_charger_driver);
+}
+
+static void __exit ab5500_charger_exit(void)
+{
+ platform_driver_unregister(&ab5500_charger_driver);
+}
+
+subsys_initcall_sync(ab5500_charger_init);
+module_exit(ab5500_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab5500-charger");
+MODULE_DESCRIPTION("AB5500 charger management driver");
diff --git a/drivers/power/ab5500_fg.c b/drivers/power/ab5500_fg.c
new file mode 100644
index 00000000000..62710680d1e
--- /dev/null
+++ b/drivers/power/ab5500_fg.c
@@ -0,0 +1,1838 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2011
+ *
+ * Main and Back-up battery management driver.
+ *
+ * Note: Backup battery management is required in case of Li-Ion battery and not
+ * for capacitive battery. HREF boards have capacitive battery and hence backup
+ * battery management is not used and the supported code is available in this
+ * driver.
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+
+static LIST_HEAD(ab5500_fg_list);
+
+/* U5500 Constants */
+#define FG_ON_MASK 0x04
+#define FG_ON 0x04
+#define FG_ACC_RESET_ON_READ_MASK 0x08
+#define FG_ACC_RESET_ON_READ 0x08
+#define EN_READOUT_MASK 0x01
+#define EN_READOUT 0x01
+#define RESET 0x00
+#define EOC_52_mA 0x04
+#define MILLI_TO_MICRO 1000
+#define FG_LSB_IN_MA 770
+#define QLSB_NANO_AMP_HOURS_X10 1129
+#define SEC_TO_SAMPLE(S) (S * 4)
+#define NBR_AVG_SAMPLES 20
+#define LOW_BAT_CHECK_INTERVAL (2 * HZ)
+
+#define VALID_CAPACITY_SEC (45 * 60) /* 45 minutes */
+
+#define interpolate(x, x1, y1, x2, y2) \
+ ((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1))));
+
+#define to_ab5500_fg_device_info(x) container_of((x), \
+ struct ab5500_fg, fg_psy);
+
+/**
+ * struct ab5500_fg_interrupts - ab5500 fg interupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab5500_fg_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+enum ab5500_fg_discharge_state {
+ AB5500_FG_DISCHARGE_INIT,
+ AB5500_FG_DISCHARGE_INITMEASURING,
+ AB5500_FG_DISCHARGE_INIT_RECOVERY,
+ AB5500_FG_DISCHARGE_RECOVERY,
+ AB5500_FG_DISCHARGE_READOUT,
+ AB5500_FG_DISCHARGE_WAKEUP,
+};
+
+static char *discharge_state[] = {
+ "DISCHARGE_INIT",
+ "DISCHARGE_INITMEASURING",
+ "DISCHARGE_INIT_RECOVERY",
+ "DISCHARGE_RECOVERY",
+ "DISCHARGE_READOUT",
+ "DISCHARGE_WAKEUP",
+};
+
+enum ab5500_fg_charge_state {
+ AB5500_FG_CHARGE_INIT,
+ AB5500_FG_CHARGE_READOUT,
+};
+
+static char *charge_state[] = {
+ "CHARGE_INIT",
+ "CHARGE_READOUT",
+};
+
+enum ab5500_fg_calibration_state {
+ AB5500_FG_CALIB_INIT,
+ AB5500_FG_CALIB_WAIT,
+ AB5500_FG_CALIB_END,
+};
+
+struct ab5500_fg_avg_cap {
+ int avg;
+ int samples[NBR_AVG_SAMPLES];
+ __kernel_time_t time_stamps[NBR_AVG_SAMPLES];
+ int pos;
+ int nbr_samples;
+ int sum;
+};
+
+struct ab5500_fg_battery_capacity {
+ int max_mah_design;
+ int max_mah;
+ int mah;
+ int permille;
+ int level;
+ int prev_mah;
+ int prev_percent;
+ int prev_level;
+};
+
+struct ab5500_fg_flags {
+ bool fg_enabled;
+ bool conv_done;
+ bool charging;
+ bool fully_charged;
+ bool low_bat_delay;
+ bool low_bat;
+ bool bat_ovv;
+ bool batt_unknown;
+ bool calibrate;
+};
+
+/**
+ * struct ab5500_fg - ab5500 FG device information
+ * @dev: Pointer to the structure device
+ * @vbat: Battery voltage in mV
+ * @vbat_nom: Nominal battery voltage in mV
+ * @inst_curr: Instantenous battery current in mA
+ * @avg_curr: Average battery current in mA
+ * @fg_samples: Number of samples used in the FG accumulation
+ * @accu_charge: Accumulated charge from the last conversion
+ * @recovery_cnt: Counter for recovery mode
+ * @high_curr_cnt: Counter for high current mode
+ * @init_cnt: Counter for init mode
+ * @v_to_cap: capacity based on battery voltage
+ * @recovery_needed: Indicate if recovery is needed
+ * @high_curr_mode: Indicate if we're in high current mode
+ * @init_capacity: Indicate if initial capacity measuring should be done
+ * @calib_state State during offset calibration
+ * @discharge_state: Current discharge state
+ * @charge_state: Current charge state
+ * @flags: Structure for information about events triggered
+ * @bat_cap: Structure for battery capacity specific parameters
+ * @avg_cap: Average capacity filter
+ * @parent: Pointer to the struct ab5500
+ * @gpadc: Pointer to the struct gpadc
+ * @gpadc_auto: Pointer tot he struct adc_auto_input
+ * @pdata: Pointer to the ab5500_fg platform data
+ * @bat: Pointer to the ab5500_bm platform data
+ * @fg_psy: Structure that holds the FG specific battery properties
+ * @fg_wq: Work queue for running the FG algorithm
+ * @fg_periodic_work: Work to run the FG algorithm periodically
+ * @fg_low_bat_work: Work to check low bat condition
+ * @fg_work: Work to run the FG algorithm instantly
+ * @fg_acc_cur_work: Work to read the FG accumulator
+ * @cc_lock: Mutex for locking the CC
+ * @node: struct of type list_head
+ */
+struct ab5500_fg {
+ struct device *dev;
+ int vbat;
+ int vbat_nom;
+ int inst_curr;
+ int avg_curr;
+ int fg_samples;
+ int accu_charge;
+ int recovery_cnt;
+ int high_curr_cnt;
+ int init_cnt;
+ int v_to_cap;
+ bool recovery_needed;
+ bool high_curr_mode;
+ bool init_capacity;
+ enum ab5500_fg_calibration_state calib_state;
+ enum ab5500_fg_discharge_state discharge_state;
+ enum ab5500_fg_charge_state charge_state;
+ struct ab5500_fg_flags flags;
+ struct ab5500_fg_battery_capacity bat_cap;
+ struct ab5500_fg_avg_cap avg_cap;
+ struct ab5500 *parent;
+ struct ab5500_gpadc *gpadc;
+ struct adc_auto_input *gpadc_auto;
+ struct abx500_fg_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct power_supply fg_psy;
+ struct workqueue_struct *fg_wq;
+ struct delayed_work fg_periodic_work;
+ struct delayed_work fg_low_bat_work;
+ struct work_struct fg_work;
+ struct delayed_work fg_acc_cur_work;
+ struct mutex cc_lock;
+ struct list_head node;
+ struct timer_list avg_current_timer;
+};
+
+/* Main battery properties */
+static enum power_supply_property ab5500_fg_props[] = {
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+};
+
+struct ab5500_fg *ab5500_fg_get(void)
+{
+ struct ab5500_fg *di;
+ di = list_first_entry(&ab5500_fg_list, struct ab5500_fg, node);
+
+ return di;
+}
+
+/**
+ * ab5500_fg_is_low_curr() - Low or high current mode
+ * @di: pointer to the ab5500_fg structure
+ * @curr: the current to base or our decision on
+ *
+ * Low current mode if the current consumption is below a certain threshold
+ */
+static int ab5500_fg_is_low_curr(struct ab5500_fg *di, int curr)
+{
+ /*
+ * We want to know if we're in low current mode
+ */
+ if (curr > -di->bat->fg_params->high_curr_threshold)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ab5500_fg_add_cap_sample() - Add capacity to average filter
+ * @di: pointer to the ab5500_fg structure
+ * @sample: the capacity in mAh to add to the filter
+ *
+ * A capacity is added to the filter and a new mean capacity is calculated and
+ * returned
+ */
+static int ab5500_fg_add_cap_sample(struct ab5500_fg *di, int sample)
+{
+ struct timespec ts;
+ struct ab5500_fg_avg_cap *avg = &di->avg_cap;
+
+ getnstimeofday(&ts);
+
+ do {
+ avg->sum += sample - avg->samples[avg->pos];
+ avg->samples[avg->pos] = sample;
+ avg->time_stamps[avg->pos] = ts.tv_sec;
+ avg->pos++;
+
+ if (avg->pos == NBR_AVG_SAMPLES)
+ avg->pos = 0;
+
+ if (avg->nbr_samples < NBR_AVG_SAMPLES)
+ avg->nbr_samples++;
+
+ /*
+ * Check the time stamp for each sample. If too old,
+ * replace with latest sample
+ */
+ } while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
+
+ avg->avg = avg->sum / avg->nbr_samples;
+
+ return avg->avg;
+}
+
+/**
+ * ab5500_fg_fill_cap_sample() - Fill average filter
+ * @di: pointer to the ab5500_fg structure
+ * @sample: the capacity in mAh to fill the filter with
+ *
+ * The capacity filter is filled with a capacity in mAh
+ */
+static void ab5500_fg_fill_cap_sample(struct ab5500_fg *di, int sample)
+{
+ int i;
+ struct timespec ts;
+ struct ab5500_fg_avg_cap *avg = &di->avg_cap;
+
+ getnstimeofday(&ts);
+
+ for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+ avg->samples[i] = sample;
+ avg->time_stamps[i] = ts.tv_sec;
+ }
+
+ avg->pos = 0;
+ avg->nbr_samples = NBR_AVG_SAMPLES;
+ avg->sum = sample * NBR_AVG_SAMPLES;
+ avg->avg = sample;
+}
+
+/**
+ * ab5500_fg_coulomb_counter() - enable coulomb counter
+ * @di: pointer to the ab5500_fg structure
+ * @enable: enable/disable
+ *
+ * Enable/Disable coulomb counter.
+ * On failure returns negative value.
+ */
+static int ab5500_fg_coulomb_counter(struct ab5500_fg *di, bool enable)
+{
+ int ret = 0;
+ mutex_lock(&di->cc_lock);
+ if (enable) {
+ /* Power-up the CC */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ (FG_ON | FG_ACC_RESET_ON_READ));
+ if (ret)
+ goto cc_err;
+
+ di->flags.fg_enabled = true;
+ } else {
+ /* Stop the CC */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ FG_ON_MASK, RESET);
+ if (ret)
+ goto cc_err;
+
+ di->flags.fg_enabled = false;
+
+ }
+ dev_dbg(di->dev, " CC enabled: %d Samples: %d\n",
+ enable, di->fg_samples);
+
+ mutex_unlock(&di->cc_lock);
+
+ return ret;
+cc_err:
+ dev_err(di->dev, "%s Enabling coulomb counter failed\n", __func__);
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+/**
+ * ab5500_fg_inst_curr() - battery instantaneous current
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Returns battery instantenous current(on success) else error code
+ */
+static int ab5500_fg_inst_curr(struct ab5500_fg *di)
+{
+ u8 low, high;
+ static int val;
+ int ret = 0;
+ bool fg_off = false;
+
+ if (!di->flags.fg_enabled) {
+ fg_off = true;
+ /* Power-up the CC */
+ ab5500_fg_coulomb_counter(di, true);
+ msleep(250);
+ }
+
+ mutex_lock(&di->cc_lock);
+ /*
+ * Since there is no interrupt for this, just wait for 250ms
+ * 250ms is one sample conversion time with 32.768 Khz RTC clock
+ */
+ msleep(250);
+
+ /* Enable read request */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_B,
+ EN_READOUT_MASK, EN_READOUT);
+ if (ret)
+ goto inst_curr_err;
+
+ /* Read CC Sample conversion value Low and high */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FGDIR_READ0, &low);
+ if (ret < 0)
+ goto inst_curr_err;
+
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FGDIR_READ1, &high);
+ if (ret < 0)
+ goto inst_curr_err;
+
+ /*
+ * negative value for Discharging
+ * convert 2's compliment into decimal
+ */
+ if (high & 0x10)
+ val = (low | (high << 8) | 0xFFFFE000);
+ else
+ val = (low | (high << 8));
+
+ /*
+ * Convert to unit value in mA
+ * R(FGSENSE) = 20 mOhm
+ * Scaling of LSB: This corresponds fro R(FGSENSE) to a current of
+ * I = Q/t = 192.7 uC * 4 Hz = 0.77mA
+ */
+ val = (val * 770) / 1000;
+
+ mutex_unlock(&di->cc_lock);
+
+ if (fg_off) {
+ dev_dbg(di->dev, "%s Disable FG\n", __func__);
+ /* Power-off the CC */
+ ab5500_fg_coulomb_counter(di, false);
+ }
+
+ return val;
+
+inst_curr_err:
+ dev_err(di->dev, "%s Get instanst current failed\n", __func__);
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+static void ab5500_fg_acc_cur_timer_expired(unsigned long data)
+{
+ struct ab5500_fg *di = (struct ab5500_fg *) data;
+ dev_dbg(di->dev, "Avg current timer expired\n");
+
+ /* Trigger execution of the algorithm instantly */
+ queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work, 0);
+}
+
+/**
+ * ab5500_fg_acc_cur_work() - average battery current
+ * @work: pointer to the work_struct structure
+ *
+ * Updated the average battery current obtained from the
+ * coulomb counter.
+ */
+static void ab5500_fg_acc_cur_work(struct work_struct *work)
+{
+ int val;
+ int ret;
+ u8 low, med, high, cnt_low, cnt_high;
+
+ struct ab5500_fg *di = container_of(work,
+ struct ab5500_fg, fg_acc_cur_work.work);
+
+ if (!di->flags.fg_enabled) {
+ /* Power-up the CC */
+ ab5500_fg_coulomb_counter(di, true);
+ msleep(250);
+ }
+ mutex_lock(&di->cc_lock);
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_C,
+ EN_READOUT_MASK, EN_READOUT);
+ if (ret < 0)
+ goto exit;
+ /* If charging read charging registers for accumulated values */
+ if (di->flags.charging) {
+ /* Read CC Sample conversion value Low and high */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_CH0, &low);
+ if (ret < 0)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_CH1, &med);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_CH2, &high);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_VAL_COUNT0, &cnt_low);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_VAL_COUNT1, &cnt_high);
+ if (ret < 0)
+ goto exit;
+ queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work,
+ di->bat->interval_charging * HZ);
+ } else { /* discharging */
+ /* Read CC Sample conversion value Low and high */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_DIS_CH0, &low);
+ if (ret < 0)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_DIS_CH1, &med);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_DIS_CH2, &high);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_VAL_COUNT0, &cnt_low);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_VAL_COUNT1, &cnt_high);
+ if (ret < 0)
+ goto exit;
+ queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work,
+ di->bat->interval_not_charging * HZ);
+ }
+ di->fg_samples = (cnt_low | (cnt_high << 8));
+ val = (low | (med << 8) | (high << 16));
+
+ if (di->fg_samples) {
+ di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X10)/10000;
+ di->avg_curr = (val * FG_LSB_IN_MA) / (di->fg_samples * 1000);
+ } else
+ dev_err(di->dev,
+ "samples is zero, using previous calculated average current\n");
+ di->flags.conv_done = true;
+
+ mutex_unlock(&di->cc_lock);
+
+ queue_work(di->fg_wq, &di->fg_work);
+
+ return;
+exit:
+ dev_err(di->dev,
+ "Failed to read or write gas gauge registers\n");
+ mutex_unlock(&di->cc_lock);
+ queue_work(di->fg_wq, &di->fg_work);
+}
+
+/**
+ * ab5500_fg_bat_voltage() - get battery voltage
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Returns battery voltage(on success) else error code
+ */
+static int ab5500_fg_bat_voltage(struct ab5500_fg *di)
+{
+ int vbat;
+ static int prev;
+
+ vbat = ab5500_gpadc_convert(di->gpadc, MAIN_BAT_V);
+ if (vbat < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed, using previous value\n",
+ __func__);
+ return prev;
+ }
+
+ prev = vbat;
+ return vbat;
+}
+
+/**
+ * ab5500_fg_volt_to_capacity() - Voltage based capacity
+ * @di: pointer to the ab5500_fg structure
+ * @voltage: The voltage to convert to a capacity
+ *
+ * Returns battery capacity in per mille based on voltage
+ */
+static int ab5500_fg_volt_to_capacity(struct ab5500_fg *di, int voltage)
+{
+ int i, tbl_size;
+ struct abx500_v_to_cap *tbl;
+ int cap = 0;
+
+ tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl,
+ tbl_size = di->bat->bat_type[di->bat->batt_id].n_v_cap_tbl_elements;
+
+ for (i = 0; i < tbl_size; ++i) {
+ if (di->vbat < tbl[i].voltage && di->vbat > tbl[i+1].voltage)
+ di->v_to_cap = tbl[i].capacity;
+ }
+
+ for (i = 0; i < tbl_size; ++i) {
+ if (voltage > tbl[i].voltage)
+ break;
+ }
+
+ if ((i > 0) && (i < tbl_size)) {
+ cap = interpolate(voltage,
+ tbl[i].voltage,
+ tbl[i].capacity * 10,
+ tbl[i-1].voltage,
+ tbl[i-1].capacity * 10);
+ } else if (i == 0) {
+ cap = 1000;
+ } else {
+ cap = 0;
+ }
+
+ dev_dbg(di->dev, "%s Vbat: %d, Cap: %d per mille",
+ __func__, voltage, cap);
+
+ return cap;
+}
+
+/**
+ * ab5500_fg_uncomp_volt_to_capacity() - Uncompensated voltage based capacity
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is not compensated
+ * for the voltage drop due to the load
+ */
+static int ab5500_fg_uncomp_volt_to_capacity(struct ab5500_fg *di)
+{
+ di->vbat = ab5500_fg_bat_voltage(di);
+ return ab5500_fg_volt_to_capacity(di, di->vbat);
+}
+
+/**
+ * ab5500_fg_load_comp_volt_to_capacity() - Load compensated voltage based capacity
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is load compensated
+ * for the voltage drop
+ */
+static int ab5500_fg_load_comp_volt_to_capacity(struct ab5500_fg *di)
+{
+ int vbat_comp;
+
+ di->inst_curr = ab5500_fg_inst_curr(di);
+ di->vbat = ab5500_fg_bat_voltage(di);
+
+ /* Use Ohms law to get the load compensated voltage */
+ vbat_comp = di->vbat - (di->inst_curr *
+ di->bat->bat_type[di->bat->batt_id].battery_resistance) / 1000;
+
+ dev_dbg(di->dev, "%s Measured Vbat: %dmV,Compensated Vbat %dmV, "
+ "R: %dmOhm, Current: %dmA\n",
+ __func__,
+ di->vbat,
+ vbat_comp,
+ di->bat->bat_type[di->bat->batt_id].battery_resistance,
+ di->inst_curr);
+
+ return ab5500_fg_volt_to_capacity(di, vbat_comp);
+}
+
+/**
+ * ab5500_fg_convert_mah_to_permille() - Capacity in mAh to permille
+ * @di: pointer to the ab5500_fg structure
+ * @cap_mah: capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in permille
+ */
+static int ab5500_fg_convert_mah_to_permille(struct ab5500_fg *di, int cap_mah)
+{
+ return (cap_mah * 1000) / di->bat_cap.max_mah_design;
+}
+
+/**
+ * ab5500_fg_convert_permille_to_mah() - Capacity in permille to mAh
+ * @di: pointer to the ab5500_fg structure
+ * @cap_pm: capacity in permille
+ *
+ * Converts capacity in permille to capacity in mAh
+ */
+static int ab5500_fg_convert_permille_to_mah(struct ab5500_fg *di, int cap_pm)
+{
+ return cap_pm * di->bat_cap.max_mah_design / 1000;
+}
+
+/**
+ * ab5500_fg_convert_mah_to_uwh() - Capacity in mAh to uWh
+ * @di: pointer to the ab5500_fg structure
+ * @cap_mah: capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in uWh
+ */
+static int ab5500_fg_convert_mah_to_uwh(struct ab5500_fg *di, int cap_mah)
+{
+ u64 div_res;
+ u32 div_rem;
+
+ div_res = ((u64) cap_mah) * ((u64) di->vbat_nom);
+ div_rem = do_div(div_res, 1000);
+
+ /* Make sure to round upwards if necessary */
+ if (div_rem >= 1000 / 2)
+ div_res++;
+
+ return (int) div_res;
+}
+
+/**
+ * ab5500_fg_calc_cap_charging() - Calculate remaining capacity while charging
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. The filter is filled with this capacity
+ */
+static int ab5500_fg_calc_cap_charging(struct ab5500_fg *di)
+{
+ dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+ __func__,
+ di->bat_cap.mah,
+ di->accu_charge);
+
+ /* Capacity should not be less than 0 */
+ if (di->bat_cap.mah + di->accu_charge > 0)
+ di->bat_cap.mah += di->accu_charge;
+ else
+ di->bat_cap.mah = 0;
+
+ /*
+ * We force capacity to 100% as long as the algorithm
+ * reports that it's full.
+ */
+ if (di->bat_cap.mah >= di->bat_cap.max_mah_design ||
+ di->flags.fully_charged)
+ di->bat_cap.mah = di->bat_cap.max_mah_design;
+
+ ab5500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ di->bat_cap.permille =
+ ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+ /* We need to update battery voltage and inst current when charging */
+ di->vbat = ab5500_fg_bat_voltage(di);
+ di->inst_curr = ab5500_fg_inst_curr(di);
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab5500_fg_calc_cap_discharge_voltage() - Capacity in discharge with voltage
+ * @di: pointer to the ab5500_fg structure
+ * @comp: if voltage should be load compensated before capacity calc
+ *
+ * Return the capacity in mAh based on the battery voltage. The voltage can
+ * either be load compensated or not. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab5500_fg_calc_cap_discharge_voltage(struct ab5500_fg *di, bool comp)
+{
+ int permille, mah;
+
+ if (comp)
+ permille = ab5500_fg_load_comp_volt_to_capacity(di);
+ else
+ permille = ab5500_fg_uncomp_volt_to_capacity(di);
+
+ mah = ab5500_fg_convert_permille_to_mah(di, permille);
+
+ di->bat_cap.mah = ab5500_fg_add_cap_sample(di, mah);
+ di->bat_cap.permille =
+ ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab5500_fg_calc_cap_discharge_fg() - Capacity in discharge with FG
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab5500_fg_calc_cap_discharge_fg(struct ab5500_fg *di)
+{
+ int permille_volt, permille;
+
+ dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+ __func__,
+ di->bat_cap.mah,
+ di->accu_charge);
+
+ /* Capacity should not be less than 0 */
+ if (di->bat_cap.mah + di->accu_charge > 0)
+ di->bat_cap.mah += di->accu_charge;
+ else
+ di->bat_cap.mah = 0;
+
+ if (di->bat_cap.mah >= di->bat_cap.max_mah_design)
+ di->bat_cap.mah = di->bat_cap.max_mah_design;
+
+ /*
+ * Check against voltage based capacity. It can not be lower
+ * than what the uncompensated voltage says
+ */
+ permille = ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+ permille_volt = ab5500_fg_uncomp_volt_to_capacity(di);
+
+ if (permille < permille_volt) {
+ di->bat_cap.permille = permille_volt;
+ di->bat_cap.mah = ab5500_fg_convert_permille_to_mah(di,
+ di->bat_cap.permille);
+
+ dev_dbg(di->dev, "%s voltage based: perm %d perm_volt %d\n",
+ __func__,
+ permille,
+ permille_volt);
+
+ ab5500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ } else {
+ ab5500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ di->bat_cap.permille =
+ ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+ }
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab5500_fg_capacity_level() - Get the battery capacity level
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Get the battery capacity level based on the capacity in percent
+ */
+static int ab5500_fg_capacity_level(struct ab5500_fg *di)
+{
+ int ret, percent;
+
+ percent = di->bat_cap.permille / 10;
+
+ if (percent <= di->bat->cap_levels->critical ||
+ di->flags.low_bat)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else if (percent <= di->bat->cap_levels->low)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (percent <= di->bat->cap_levels->normal)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ else if (percent <= di->bat->cap_levels->high)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
+ else
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+
+ return ret;
+}
+
+/**
+ * ab5500_fg_check_capacity_limits() - Check if capacity has changed
+ * @di: pointer to the ab5500_fg structure
+ * @init: capacity is allowed to go up in init mode
+ *
+ * Check if capacity or capacity limit has changed and notify the system
+ * about it using the power_supply framework
+ */
+static void ab5500_fg_check_capacity_limits(struct ab5500_fg *di, bool init)
+{
+ bool changed = false;
+
+ di->bat_cap.level = ab5500_fg_capacity_level(di);
+
+ if (di->bat_cap.level != di->bat_cap.prev_level) {
+ /*
+ * We do not allow reported capacity level to go up
+ * unless we're charging or if we're in init
+ */
+ if (!(!di->flags.charging && di->bat_cap.level >
+ di->bat_cap.prev_level) || init) {
+ dev_dbg(di->dev, "level changed from %d to %d\n",
+ di->bat_cap.prev_level,
+ di->bat_cap.level);
+ di->bat_cap.prev_level = di->bat_cap.level;
+ changed = true;
+ } else {
+ dev_dbg(di->dev, "level not allowed to go up "
+ "since no charger is connected: %d to %d\n",
+ di->bat_cap.prev_level,
+ di->bat_cap.level);
+ }
+ }
+
+ /*
+ * If we have received the LOW_BAT IRQ, set capacity to 0 to initiate
+ * shutdown
+ */
+ if (di->flags.low_bat) {
+ dev_dbg(di->dev, "Battery low, set capacity to 0\n");
+ di->bat_cap.prev_percent = 0;
+ di->bat_cap.permille = 0;
+ di->bat_cap.prev_mah = 0;
+ di->bat_cap.mah = 0;
+ changed = true;
+ } else if (di->bat_cap.prev_percent != di->bat_cap.permille / 10) {
+ if (di->bat_cap.permille / 10 == 0) {
+ /*
+ * We will not report 0% unless we've got
+ * the LOW_BAT IRQ, no matter what the FG
+ * algorithm says.
+ */
+ di->bat_cap.prev_percent = 1;
+ di->bat_cap.permille = 1;
+ di->bat_cap.prev_mah = 1;
+ di->bat_cap.mah = 1;
+
+ changed = true;
+ } else if (!(!di->flags.charging &&
+ (di->bat_cap.permille / 10) >
+ di->bat_cap.prev_percent) || init) {
+ /*
+ * We do not allow reported capacity to go up
+ * unless we're charging or if we're in init
+ */
+ dev_dbg(di->dev,
+ "capacity changed from %d to %d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.permille / 10,
+ di->bat_cap.permille);
+ di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ di->bat_cap.prev_mah = di->bat_cap.mah;
+
+ changed = true;
+ } else {
+ dev_dbg(di->dev, "capacity not allowed to go up since "
+ "no charger is connected: %d to %d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.permille / 10,
+ di->bat_cap.permille);
+ }
+ }
+
+ if (changed)
+ power_supply_changed(&di->fg_psy);
+
+}
+
+static void ab5500_fg_charge_state_to(struct ab5500_fg *di,
+ enum ab5500_fg_charge_state new_state)
+{
+ dev_dbg(di->dev, "Charge state from %d [%s] to %d [%s]\n",
+ di->charge_state,
+ charge_state[di->charge_state],
+ new_state,
+ charge_state[new_state]);
+
+ di->charge_state = new_state;
+}
+
+static void ab5500_fg_discharge_state_to(struct ab5500_fg *di,
+ enum ab5500_fg_charge_state new_state)
+{
+ dev_dbg(di->dev, "Disharge state from %d [%s] to %d [%s]\n",
+ di->discharge_state,
+ discharge_state[di->discharge_state],
+ new_state,
+ discharge_state[new_state]);
+
+ di->discharge_state = new_state;
+}
+
+/**
+ * ab5500_fg_algorithm_charging() - FG algorithm for when charging
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're charging
+ */
+static void ab5500_fg_algorithm_charging(struct ab5500_fg *di)
+{
+ /*
+ * If we change to discharge mode
+ * we should start with recovery
+ */
+ if (di->discharge_state != AB5500_FG_DISCHARGE_INIT_RECOVERY)
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_INIT_RECOVERY);
+
+ switch (di->charge_state) {
+ case AB5500_FG_CHARGE_INIT:
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_charging);
+
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_READOUT);
+
+ break;
+
+ case AB5500_FG_CHARGE_READOUT:
+ /*
+ * Read the FG and calculate the new capacity
+ */
+ mutex_lock(&di->cc_lock);
+ if (!di->flags.conv_done) {
+ /* Wasn't the CC IRQ that got us here */
+ mutex_unlock(&di->cc_lock);
+ dev_dbg(di->dev, "%s CC conv not done\n",
+ __func__);
+
+ break;
+ }
+ di->flags.conv_done = false;
+ mutex_unlock(&di->cc_lock);
+
+ ab5500_fg_calc_cap_charging(di);
+
+ break;
+
+ default:
+ break;
+ }
+
+ /* Check capacity limits */
+ ab5500_fg_check_capacity_limits(di, false);
+}
+
+/**
+ * ab5500_fg_algorithm_discharging() - FG algorithm for when discharging
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're discharging
+ */
+static void ab5500_fg_algorithm_discharging(struct ab5500_fg *di)
+{
+ int sleep_time;
+
+ /* If we change to charge mode we should start with init */
+ if (di->charge_state != AB5500_FG_CHARGE_INIT)
+ ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_INIT);
+
+ switch (di->discharge_state) {
+ case AB5500_FG_DISCHARGE_INIT:
+ /* We use the FG IRQ to work on */
+ di->init_cnt = 0;
+ di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_INITMEASURING);
+
+ /* Intentional fallthrough */
+ case AB5500_FG_DISCHARGE_INITMEASURING:
+ /*
+ * Discard a number of samples during startup.
+ * After that, use compensated voltage for a few
+ * samples to get an initial capacity.
+ * Then go to READOUT
+ */
+ sleep_time = di->bat->fg_params->init_timer;
+
+ /* Discard the first [x] seconds */
+ if (di->init_cnt >
+ di->bat->fg_params->init_discard_time) {
+
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+
+ ab5500_fg_check_capacity_limits(di, true);
+ }
+
+ di->init_cnt += sleep_time;
+ if (di->init_cnt >
+ di->bat->fg_params->init_total_time) {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_READOUT);
+ }
+
+ break;
+
+ case AB5500_FG_DISCHARGE_INIT_RECOVERY:
+ di->recovery_cnt = 0;
+ di->recovery_needed = true;
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_RECOVERY);
+
+ /* Intentional fallthrough */
+
+ case AB5500_FG_DISCHARGE_RECOVERY:
+ sleep_time = di->bat->fg_params->recovery_sleep_timer;
+
+ /*
+ * We should check the power consumption
+ * If low, go to READOUT (after x min) or
+ * RECOVERY_SLEEP if time left.
+ * If high, go to READOUT
+ */
+ di->inst_curr = ab5500_fg_inst_curr(di);
+
+ if (ab5500_fg_is_low_curr(di, di->inst_curr)) {
+ if (di->recovery_cnt >
+ di->bat->fg_params->recovery_total_time) {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_READOUT);
+ di->recovery_needed = false;
+ } else {
+ queue_delayed_work(di->fg_wq,
+ &di->fg_periodic_work,
+ sleep_time * HZ);
+ }
+ di->recovery_cnt += sleep_time;
+ } else {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_READOUT);
+ }
+
+ break;
+
+ case AB5500_FG_DISCHARGE_READOUT:
+ di->inst_curr = ab5500_fg_inst_curr(di);
+
+ if (ab5500_fg_is_low_curr(di, di->inst_curr)) {
+ /* Detect mode change */
+ if (di->high_curr_mode) {
+ di->high_curr_mode = false;
+ di->high_curr_cnt = 0;
+ }
+
+ if (di->recovery_needed) {
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_RECOVERY);
+
+ queue_delayed_work(di->fg_wq,
+ &di->fg_periodic_work,
+ 0);
+
+ break;
+ }
+
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+ } else {
+ mutex_lock(&di->cc_lock);
+ if (!di->flags.conv_done) {
+ /* Wasn't the CC IRQ that got us here */
+ mutex_unlock(&di->cc_lock);
+ dev_dbg(di->dev, "%s CC conv not done\n",
+ __func__);
+
+ break;
+ }
+ di->flags.conv_done = false;
+ mutex_unlock(&di->cc_lock);
+
+ /* Detect mode change */
+ if (!di->high_curr_mode) {
+ di->high_curr_mode = true;
+ di->high_curr_cnt = 0;
+ }
+
+ di->high_curr_cnt +=
+ di->bat->fg_params->accu_high_curr;
+ if (di->high_curr_cnt >
+ di->bat->fg_params->high_curr_time)
+ di->recovery_needed = true;
+
+ ab5500_fg_calc_cap_discharge_fg(di);
+ }
+
+ ab5500_fg_check_capacity_limits(di, false);
+
+ break;
+
+ case AB5500_FG_DISCHARGE_WAKEUP:
+ ab5500_fg_coulomb_counter(di, true);
+ di->inst_curr = ab5500_fg_inst_curr(di);
+
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ /* Re-program number of samples set above */
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_READOUT);
+
+ ab5500_fg_check_capacity_limits(di, false);
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * ab5500_fg_algorithm_calibrate() - Internal columb counter offset calibration
+ * @di: pointer to the ab5500_fg structure
+ *
+ */
+static void ab5500_fg_algorithm_calibrate(struct ab5500_fg *di)
+{
+ int ret;
+
+ switch (di->calib_state) {
+ case AB5500_FG_CALIB_INIT:
+ dev_dbg(di->dev, "Calibration ongoing...\n");
+ /* TODO: For Cut 1.1 no calibration */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ FG_ACC_RESET_ON_READ_MASK, FG_ACC_RESET_ON_READ);
+ if (ret)
+ goto err;
+ di->calib_state = AB5500_FG_CALIB_WAIT;
+ break;
+ case AB5500_FG_CALIB_END:
+ di->flags.calibrate = false;
+ dev_dbg(di->dev, "Calibration done...\n");
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ break;
+ case AB5500_FG_CALIB_WAIT:
+ dev_dbg(di->dev, "Calibration WFI\n");
+ default:
+ break;
+ }
+ return;
+err:
+ /* Something went wrong, don't calibrate then */
+ dev_err(di->dev, "failed to calibrate the CC\n");
+ di->flags.calibrate = false;
+ di->calib_state = AB5500_FG_CALIB_INIT;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+}
+
+/**
+ * ab5500_fg_algorithm() - Entry point for the FG algorithm
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Entry point for the battery capacity calculation state machine
+ */
+static void ab5500_fg_algorithm(struct ab5500_fg *di)
+{
+ if (di->flags.calibrate)
+ ab5500_fg_algorithm_calibrate(di);
+ else {
+ if (di->flags.charging)
+ ab5500_fg_algorithm_charging(di);
+ else
+ ab5500_fg_algorithm_discharging(di);
+ }
+
+ dev_dbg(di->dev, "[FG_DATA] %d %d %d %d %d %d %d %d %d "
+ "%d %d %d %d %d %d %d\n",
+ di->bat_cap.max_mah_design,
+ di->bat_cap.mah,
+ di->bat_cap.permille,
+ di->bat_cap.level,
+ di->bat_cap.prev_mah,
+ di->bat_cap.prev_percent,
+ di->bat_cap.prev_level,
+ di->vbat,
+ di->inst_curr,
+ di->avg_curr,
+ di->accu_charge,
+ di->flags.charging,
+ di->charge_state,
+ di->discharge_state,
+ di->high_curr_mode,
+ di->recovery_needed);
+}
+
+/**
+ * ab5500_fg_periodic_work() - Run the FG state machine periodically
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for periodic work
+ */
+static void ab5500_fg_periodic_work(struct work_struct *work)
+{
+ struct ab5500_fg *di = container_of(work, struct ab5500_fg,
+ fg_periodic_work.work);
+
+ if (di->init_capacity) {
+ /* A dummy read that will return 0 */
+ di->inst_curr = ab5500_fg_inst_curr(di);
+ /* Get an initial capacity calculation */
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+ ab5500_fg_check_capacity_limits(di, true);
+ di->init_capacity = false;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ } else
+ ab5500_fg_algorithm(di);
+}
+
+/**
+ * ab5500_fg_low_bat_work() - Check LOW_BAT condition
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the LOW_BAT condition
+ */
+static void ab5500_fg_low_bat_work(struct work_struct *work)
+{
+ int vbat;
+
+ struct ab5500_fg *di = container_of(work, struct ab5500_fg,
+ fg_low_bat_work.work);
+
+ vbat = ab5500_fg_bat_voltage(di);
+
+ /* Check if LOW_BAT still fulfilled */
+ if (vbat < di->bat->fg_params->lowbat_threshold) {
+ di->flags.low_bat = true;
+ dev_warn(di->dev, "Battery voltage still LOW\n");
+
+ /*
+ * We need to re-schedule this check to be able to detect
+ * if the voltage increases again during charging
+ */
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ } else {
+ di->flags.low_bat = false;
+ dev_warn(di->dev, "Battery voltage OK again\n");
+ }
+
+ /* This is needed to dispatch LOW_BAT */
+ ab5500_fg_check_capacity_limits(di, false);
+
+ /* Set this flag to check if LOW_BAT IRQ still occurs */
+ di->flags.low_bat_delay = false;
+}
+
+/**
+ * ab5500_fg_instant_work() - Run the FG state machine instantly
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for instant work
+ */
+static void ab5500_fg_instant_work(struct work_struct *work)
+{
+ struct ab5500_fg *di = container_of(work, struct ab5500_fg, fg_work);
+
+ ab5500_fg_algorithm(di);
+}
+
+/**
+ * ab5500_fg_get_property() - get the fg properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * fg properties by reading the sysfs files.
+ * voltage_now: battery voltage
+ * current_now: battery instant current
+ * current_avg: battery average current
+ * charge_full_design: capacity where battery is considered full
+ * charge_now: battery capacity in nAh
+ * capacity: capacity in percent
+ * capacity_level: capacity level
+ *
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab5500_fg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab5500_fg *di;
+ int i, tbl_size;
+ struct abx500_v_to_cap *tbl;
+
+ di = to_ab5500_fg_device_info(psy);
+
+ /*
+ * If battery is identified as unknown and charging of unknown
+ * batteries is disabled, we always report 100% capacity and
+ * capacity level UNKNOWN, since we can't calculate
+ * remaining capacity
+ */
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (di->flags.bat_ovv)
+ val->intval = 47500000;
+ else {
+ di->vbat = ab5500_gpadc_convert
+ (di->gpadc, MAIN_BAT_V);
+ val->intval = di->vbat * 1000;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ di->inst_curr = ab5500_fg_inst_curr(di);
+ val->intval = di->inst_curr * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = di->avg_curr * 1000;
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = ab5500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah_design);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ val->intval = ab5500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = ab5500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah);
+ else
+ val->intval = ab5500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.prev_mah);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = di->bat_cap.max_mah_design;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = di->bat_cap.max_mah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = di->bat_cap.max_mah;
+ else
+ val->intval = di->bat_cap.prev_mah;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = 100;
+ else if (di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl) {
+ tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl,
+ tbl_size = di->bat->bat_type[
+ di->bat->batt_id].n_v_cap_tbl_elements;
+
+ for (i = 0; i < tbl_size; ++i) {
+ if (di->vbat < tbl[i].voltage &&
+ di->vbat > tbl[i+1].voltage) {
+ di->v_to_cap = tbl[i].capacity;
+ break;
+ }
+ }
+ val->intval = di->v_to_cap;
+ } else
+ val->intval = di->bat_cap.prev_percent;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ else
+ val->intval = di->bat_cap.prev_level;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ab5500_fg_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab5500_fg *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab5500_fg_device_info(psy);
+
+ /*
+ * For all psy where the name of your driver
+ * appears in any supplied_to
+ */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ switch (ret.intval) {
+ case POWER_SUPPLY_STATUS_UNKNOWN:
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ case POWER_SUPPLY_STATUS_NOT_CHARGING:
+ if (!di->flags.charging)
+ break;
+ di->flags.charging = false;
+ di->flags.fully_charged = false;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ case POWER_SUPPLY_STATUS_FULL:
+ if (di->flags.fully_charged)
+ break;
+ di->flags.fully_charged = true;
+ /* Save current capacity as maximum */
+ di->bat_cap.max_mah = di->bat_cap.mah;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ case POWER_SUPPLY_STATUS_CHARGING:
+ if (di->flags.charging)
+ break;
+ di->flags.charging = true;
+ di->flags.fully_charged = false;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ };
+ default:
+ break;
+ };
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (ret.intval)
+ di->flags.batt_unknown = false;
+ else
+ di->flags.batt_unknown = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int ab5500_fg_bat_v_trig(int mux)
+{
+ struct ab5500_fg *di = ab5500_fg_get();
+
+ /* check if the battery voltage is below low threshold */
+ if (di->vbat < 2700) {
+ dev_warn(di->dev, "Battery voltage is below LOW threshold\n");
+ di->flags.low_bat_delay = true;
+ /*
+ * Start a timer to check LOW_BAT again after some time
+ * This is done to avoid shutdown on single voltage dips
+ */
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ }
+ /* check if battery votlage is above OVV */
+ else if (di->vbat > 4200) {
+ dev_dbg(di->dev, "Battery OVV\n");
+ di->flags.bat_ovv = true;
+
+ power_supply_changed(&di->fg_psy);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ab5500_fg_init_hw_registers() - Set up FG related registers
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Set up battery OVV, low battery voltage registers
+ */
+static int ab5500_fg_init_hw_registers(struct ab5500_fg *di)
+{
+ int ret;
+ struct adc_auto_input *auto_ip;
+
+ auto_ip = kzalloc(sizeof(struct adc_auto_input), GFP_KERNEL);
+ if (!auto_ip) {
+ dev_err(di->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ auto_ip->mux = MAIN_BAT_V;
+ auto_ip->freq = MS500;
+ auto_ip->min = 2700;
+ auto_ip->max = 4200;
+ auto_ip->auto_adc_callback = ab5500_fg_bat_v_trig;
+ di->gpadc_auto = auto_ip;
+ ret = ab5500_gpadc_convert_auto(di->gpadc, di->gpadc_auto);
+ if (ret)
+ dev_err(di->dev,
+ "failed to set auto trigger for battery votlage\n");
+ /* set End Of Charge current to 247mA */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_EOC, EOC_52_mA);
+ return ret;
+}
+
+/**
+ * ab5500_fg_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void ab5500_fg_external_power_changed(struct power_supply *psy)
+{
+ struct ab5500_fg *di = to_ab5500_fg_device_info(psy);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->fg_psy, ab5500_fg_get_ext_psy_data);
+}
+
+#if defined(CONFIG_PM)
+static int ab5500_fg_resume(struct platform_device *pdev)
+{
+ struct ab5500_fg *di = platform_get_drvdata(pdev);
+
+ /*
+ * Change state if we're not charging. If we're charging we will wake
+ * up on the FG IRQ
+ */
+ if (!di->flags.charging) {
+ ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_WAKEUP);
+ queue_work(di->fg_wq, &di->fg_work);
+ }
+
+ return 0;
+}
+
+static int ab5500_fg_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab5500_fg *di = platform_get_drvdata(pdev);
+
+ flush_delayed_work(&di->fg_periodic_work);
+
+ /*
+ * If the FG is enabled we will disable it before going to suspend
+ * only if we're not charging
+ */
+ if (di->flags.fg_enabled && !di->flags.charging)
+ ab5500_fg_coulomb_counter(di, false);
+
+ return 0;
+}
+#else
+#define ab5500_fg_suspend NULL
+#define ab5500_fg_resume NULL
+#endif
+
+static int __devexit ab5500_fg_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab5500_fg *di = platform_get_drvdata(pdev);
+
+ /* Disable coulomb counter */
+ ret = ab5500_fg_coulomb_counter(di, false);
+ if (ret)
+ dev_err(di->dev, "failed to disable coulomb counter\n");
+
+ destroy_workqueue(di->fg_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->fg_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di->gpadc_auto);
+ kfree(di);
+ return ret;
+}
+
+static int __devinit ab5500_fg_probe(struct platform_device *pdev)
+{
+ struct abx500_bm_plat_data *plat_data;
+ int ret = 0;
+
+ struct ab5500_fg *di =
+ kzalloc(sizeof(struct ab5500_fg), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ mutex_init(&di->cc_lock);
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab5500_gpadc_get("ab5500-adc.0");
+
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->fg;
+ di->bat = plat_data->battery;
+
+ /* get fg specific platform data */
+ if (!di->pdata) {
+ dev_err(di->dev, "no fg platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* get battery specific platform data */
+ if (!di->bat) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ di->fg_psy.name = "ab5500_fg";
+ di->fg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->fg_psy.properties = ab5500_fg_props;
+ di->fg_psy.num_properties = ARRAY_SIZE(ab5500_fg_props);
+ di->fg_psy.get_property = ab5500_fg_get_property;
+ di->fg_psy.supplied_to = di->pdata->supplied_to;
+ di->fg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->fg_psy.external_power_changed = ab5500_fg_external_power_changed;
+
+ di->bat_cap.max_mah_design = MILLI_TO_MICRO *
+ di->bat->bat_type[di->bat->batt_id].charge_full_design;
+
+ di->bat_cap.max_mah = di->bat_cap.max_mah_design;
+
+ di->vbat_nom = di->bat->bat_type[di->bat->batt_id].nominal_voltage;
+
+ di->init_capacity = true;
+
+ ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_INIT);
+ ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_INIT);
+
+ /* Create a work queue for running the FG algorithm */
+ di->fg_wq = create_singlethread_workqueue("ab5500_fg_wq");
+ if (di->fg_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for running the fg algorithm instantly */
+ INIT_WORK(&di->fg_work, ab5500_fg_instant_work);
+
+ /* Init work for getting the battery accumulated current */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_acc_cur_work,
+ ab5500_fg_acc_cur_work);
+
+ /* Work delayed Queue to run the state machine */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work,
+ ab5500_fg_periodic_work);
+
+ /* Work to check low battery condition */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work,
+ ab5500_fg_low_bat_work);
+
+ list_add_tail(&di->node, &ab5500_fg_list);
+
+ /* Consider battery unknown until we're informed otherwise */
+ di->flags.batt_unknown = true;
+
+ /* Register FG power supply class */
+ ret = power_supply_register(di->dev, &di->fg_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register FG psy\n");
+ goto free_fg_wq;
+ }
+
+ /* Initialize OVV, and other registers */
+ ret = ab5500_fg_init_hw_registers(di);
+ if (ret) {
+ dev_err(di->dev, "failed to initialize registers\n");
+ goto pow_unreg;
+ }
+
+ di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+ ab5500_fg_coulomb_counter(di, true);
+
+ /* Initilialize avg current timer */
+ init_timer(&di->avg_current_timer);
+ di->avg_current_timer.function = ab5500_fg_acc_cur_timer_expired;
+ di->avg_current_timer.data = (unsigned long) di;
+ di->avg_current_timer.expires = 60 * HZ;;
+ if (!timer_pending(&di->avg_current_timer))
+ add_timer(&di->avg_current_timer);
+ else
+ mod_timer(&di->avg_current_timer, 60 * HZ);
+
+ platform_set_drvdata(pdev, di);
+
+ /* Calibrate the fg first time */
+ di->flags.calibrate = true;
+ di->calib_state = AB5500_FG_CALIB_INIT;
+ /* Run the FG algorithm */
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work, 0);
+
+ dev_info(di->dev, "probe success\n");
+ return ret;
+
+pow_unreg:
+ power_supply_unregister(&di->fg_psy);
+free_fg_wq:
+ destroy_workqueue(di->fg_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab5500_fg_driver = {
+ .probe = ab5500_fg_probe,
+ .remove = __devexit_p(ab5500_fg_remove),
+ .suspend = ab5500_fg_suspend,
+ .resume = ab5500_fg_resume,
+ .driver = {
+ .name = "ab5500-fg",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_fg_init(void)
+{
+ return platform_driver_register(&ab5500_fg_driver);
+}
+
+static void __exit ab5500_fg_exit(void)
+{
+ platform_driver_unregister(&ab5500_fg_driver);
+}
+
+subsys_initcall_sync(ab5500_fg_init);
+module_exit(ab5500_fg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab5500-fg");
+MODULE_DESCRIPTION("AB5500 Fuel Gauge driver");
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
new file mode 100644
index 00000000000..70e3a4688c9
--- /dev/null
+++ b/drivers/power/ab8500_btemp.c
@@ -0,0 +1,1126 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Battery temperature driver for AB8500
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: Karl Komierowski <karl.komierowski@stericsson.com>
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500/bm.h>
+#include <linux/mfd/ab8500/gpadc.h>
+
+#define VTVOUT_V 1800
+
+#define BTEMP_THERMAL_LOW_LIMIT -10
+#define BTEMP_THERMAL_MED_LIMIT 0
+#define BTEMP_THERMAL_HIGH_LIMIT_52 52
+#define BTEMP_THERMAL_HIGH_LIMIT_57 57
+#define BTEMP_THERMAL_HIGH_LIMIT_62 62
+
+#define BTEMP_BATCTRL_CURR_SRC_7UA 7
+#define BTEMP_BATCTRL_CURR_SRC_20UA 20
+
+#define to_ab8500_btemp_device_info(x) container_of((x), \
+ struct ab8500_btemp, btemp_psy);
+
+/**
+ * struct ab8500_btemp_interrupts - ab8500 interrupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab8500_btemp_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab8500_btemp_events {
+ bool batt_rem;
+ bool btemp_high;
+ bool btemp_medhigh;
+ bool btemp_lowmed;
+ bool btemp_low;
+ bool ac_conn;
+ bool usb_conn;
+};
+
+struct ab8500_btemp_ranges {
+ int btemp_high_limit;
+ int btemp_med_limit;
+ int btemp_low_limit;
+};
+
+/**
+ * struct ab8500_btemp - ab8500 BTEMP device information
+ * @dev: Pointer to the structure device
+ * @node: List of AB8500 BTEMPs, hence prepared for reentrance
+ * @chip_id: Chip-Id of the AB8500
+ * @curr_source: What current source we use, in uA
+ * @bat_temp: Battery temperature in degree Celcius
+ * @prev_bat_temp Last dispatched battery temperature
+ * @parent: Pointer to the struct ab8500
+ * @gpadc: Pointer to the struct gpadc
+ * @fg: Pointer to the struct fg
+ * @pdata: Pointer to the ab8500_btemp platform data
+ * @bat: Pointer to the ab8500_bm platform data
+ * @btemp_psy: Structure for BTEMP specific battery properties
+ * @events: Structure for information about events triggered
+ * @btemp_ranges: Battery temperature range structure
+ * @btemp_wq: Work queue for measuring the temperature periodically
+ * @btemp_periodic_work: Work for measuring the temperature periodically
+ */
+struct ab8500_btemp {
+ struct device *dev;
+ struct list_head node;
+ u8 chip_id;
+ int curr_source;
+ int bat_temp;
+ int prev_bat_temp;
+ struct ab8500 *parent;
+ struct ab8500_gpadc *gpadc;
+ struct ab8500_fg *fg;
+ struct ab8500_btemp_platform_data *pdata;
+ struct ab8500_bm_data *bat;
+ struct power_supply btemp_psy;
+ struct ab8500_btemp_events events;
+ struct ab8500_btemp_ranges btemp_ranges;
+ struct workqueue_struct *btemp_wq;
+ struct delayed_work btemp_periodic_work;
+};
+
+/* BTEMP power supply properties */
+static enum power_supply_property ab8500_btemp_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_TEMP,
+};
+
+static LIST_HEAD(ab8500_btemp_list);
+
+/**
+ * ab8500_btemp_get() - returns a reference to the primary AB8500 BTEMP
+ * (i.e. the first BTEMP in the instance list)
+ */
+struct ab8500_btemp *ab8500_btemp_get(void)
+{
+ struct ab8500_btemp *btemp;
+ btemp = list_first_entry(&ab8500_btemp_list, struct ab8500_btemp, node);
+
+ return btemp;
+}
+
+/**
+ * ab8500_btemp_batctrl_volt_to_res() - convert batctrl voltage to resistance
+ * @di: pointer to the ab8500_btemp structure
+ * @v_batctrl: measured batctrl voltage
+ * @inst_curr: measured instant current
+ *
+ * This function returns the battery resistance that is
+ * derived from the BATCTRL voltage.
+ * Returns value in Ohms.
+ */
+static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
+ int v_batctrl, int inst_curr)
+{
+ int rbs;
+
+ switch (di->chip_id) {
+ case AB8500_CUT1P0:
+ case AB8500_CUT1P1:
+ /*
+ * For ABB cut1.0 and 1.1 BAT_CTRL is internally
+ * connected to 1.8V through a 450k resistor
+ */
+ rbs = (450000 * (v_batctrl)) / (1800 - v_batctrl);
+ break;
+ default:
+ if (di->bat->adc_therm == ADC_THERM_BATCTRL) {
+ /*
+ * If the battery has internal NTC, we use the current
+ * source to calculate the resistance, 7uA or 20uA
+ */
+ rbs = (v_batctrl * 1000
+ - di->bat->gnd_lift_resistance * inst_curr)
+ / di->curr_source;
+ } else {
+ /*
+ * BAT_CTRL is internally
+ * connected to 1.8V through a 80k resistor
+ */
+ rbs = (80000 * (v_batctrl)) / (1800 - v_batctrl);
+ }
+ break;
+ }
+
+ return rbs;
+}
+
+/**
+ * ab8500_btemp_read_batctrl_voltage() - measure batctrl voltage
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * This function returns the voltage on BATCTRL. Returns value in mV.
+ */
+static int ab8500_btemp_read_batctrl_voltage(struct ab8500_btemp *di)
+{
+ int vbtemp;
+ static int prev;
+
+ vbtemp = ab8500_gpadc_convert(di->gpadc, BAT_CTRL);
+ if (vbtemp < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed, using previous value",
+ __func__);
+ return prev;
+ }
+ prev = vbtemp;
+ return vbtemp;
+}
+
+/**
+ * ab8500_btemp_curr_source_enable() - enable/disable batctrl current source
+ * @di: pointer to the ab8500_btemp structure
+ * @enable: enable or disable the current source
+ *
+ * Enable or disable the current sources for the BatCtrl AD channel
+ */
+static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
+ bool enable)
+{
+ int curr;
+ int ret = 0;
+
+ /*
+ * BATCTRL current sources are included on AB8500 cut2.0
+ * and future versions
+ */
+ if (di->chip_id == AB8500_CUT1P0 || di->chip_id == AB8500_CUT1P1)
+ return 0;
+
+ /* Only do this for batteries with internal NTC */
+ if (di->bat->adc_therm == ADC_THERM_BATCTRL && enable) {
+ if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
+ curr = BAT_CTRL_7U_ENA;
+ else
+ curr = BAT_CTRL_20U_ENA;
+
+ dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ FORCE_BAT_CTRL_CMP_HIGH, FORCE_BAT_CTRL_CMP_HIGH);
+ if (ret) {
+ dev_err(di->dev, "%s failed setting cmp_force\n",
+ __func__);
+ return ret;
+ }
+
+ /*
+ * We have to wait one 32kHz cycle before enabling
+ * the current source, since ForceBatCtrlCmpHigh needs
+ * to be written in a separate cycle
+ */
+ udelay(32);
+
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ FORCE_BAT_CTRL_CMP_HIGH | curr);
+ if (ret) {
+ dev_err(di->dev, "%s failed enabling current source\n",
+ __func__);
+ goto disable_curr_source;
+ }
+ } else if (di->bat->adc_therm == ADC_THERM_BATCTRL && !enable) {
+ dev_dbg(di->dev, "Disable BATCTRL curr source\n");
+
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
+ ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling current source\n",
+ __func__);
+ goto disable_curr_source;
+ }
+
+ /* Enable Pull-Up and comparator */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA,
+ BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA);
+ if (ret) {
+ dev_err(di->dev, "%s failed enabling PU and comp\n",
+ __func__);
+ goto enable_pu_comp;
+ }
+
+ /*
+ * We have to wait one 32kHz cycle before disabling
+ * ForceBatCtrlCmpHigh since this needs to be written
+ * in a separate cycle
+ */
+ udelay(32);
+
+ /* Disable 'force comparator' */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ FORCE_BAT_CTRL_CMP_HIGH, ~FORCE_BAT_CTRL_CMP_HIGH);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling force comp\n",
+ __func__);
+ goto disable_force_comp;
+ }
+ }
+ return ret;
+
+ /*
+ * We have to try unsetting FORCE_BAT_CTRL_CMP_HIGH one more time
+ * if we got an error above
+ */
+disable_curr_source:
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
+ ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling current source\n",
+ __func__);
+ return ret;
+ }
+enable_pu_comp:
+ /* Enable Pull-Up and comparator */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA,
+ BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA);
+ if (ret) {
+ dev_err(di->dev, "%s failed enabling PU and comp\n",
+ __func__);
+ return ret;
+ }
+
+disable_force_comp:
+ /*
+ * We have to wait one 32kHz cycle before disabling
+ * ForceBatCtrlCmpHigh since this needs to be written
+ * in a separate cycle
+ */
+ udelay(32);
+
+ /* Disable 'force comparator' */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ FORCE_BAT_CTRL_CMP_HIGH, ~FORCE_BAT_CTRL_CMP_HIGH);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling force comp\n",
+ __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * ab8500_btemp_get_batctrl_res() - get battery resistance
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * This function returns the battery pack identification resistance.
+ * Returns value in Ohms.
+ */
+static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
+{
+ int ret;
+ int batctrl = 0;
+ int res;
+ int inst_curr;
+ int i = 0;
+
+ /*
+ * BATCTRL current sources are included on AB8500 cut2.0
+ * and future versions
+ */
+ ret = ab8500_btemp_curr_source_enable(di, true);
+ if (ret) {
+ dev_err(di->dev, "%s curr source enabled failed\n", __func__);
+ return ret;
+ }
+
+ if (!di->fg)
+ di->fg = ab8500_fg_get();
+ if (!di->fg || ab8500_fg_inst_curr_nonblocking(di->fg, &inst_curr))
+ inst_curr = 0;
+ do {
+ batctrl += ab8500_btemp_read_batctrl_voltage(di);
+ i++;
+ msleep(1);
+ barrier();
+ } while (inst_curr == INVALID_CURRENT);
+ batctrl /= i;
+ res = ab8500_btemp_batctrl_volt_to_res(di, batctrl, inst_curr);
+
+ ret = ab8500_btemp_curr_source_enable(di, false);
+ if (ret) {
+ dev_err(di->dev, "%s curr source disable failed\n", __func__);
+ return ret;
+ }
+
+ dev_dbg(di->dev, "%s batctrl: %d res: %d inst_curr: %d\n",
+ __func__, batctrl, res, inst_curr);
+
+ return res;
+}
+
+/**
+ * ab8500_btemp_res_to_temp() - resistance to temperature
+ * @di: pointer to the ab8500_btemp structure
+ * @tbl: pointer to the resiatance to temperature table
+ * @tbl_size: size of the resistance to temperature table
+ * @res: resistance to calculate the temperature from
+ *
+ * This function returns the battery temperature in degrees Celcius
+ * based on the NTC resistance.
+ */
+static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
+ const struct res_to_temp *tbl, int tbl_size, int res)
+{
+ int i, temp;
+ /*
+ * Calculate the formula for the straight line
+ * Simple interpolation if we are within
+ * the resistance table limits, extrapolate
+ * if resistance is outside the limits.
+ */
+ if (res > tbl[0].resist)
+ i = 0;
+ else if (res <= tbl[tbl_size - 1].resist)
+ i = tbl_size - 2;
+ else {
+ i = 0;
+ while (!(res <= tbl[i].resist &&
+ res > tbl[i + 1].resist))
+ i++;
+ }
+
+ temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
+ (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
+ return temp;
+}
+
+/**
+ * ab8500_btemp_measure_temp() - measure battery temperature
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * Returns battery temperature (on success) else the previous temperature
+ */
+static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
+{
+ int temp;
+ static int prev;
+ int rbat, rntc, vntc;
+ u8 id;
+
+ id = di->bat->batt_id;
+
+ if (di->bat->adc_therm == ADC_THERM_BATCTRL &&
+ id != BATTERY_UNKNOWN) {
+
+ rbat = ab8500_btemp_get_batctrl_res(di);
+ if (rbat < 0) {
+ dev_err(di->dev, "%s get batctrl res failed\n",
+ __func__);
+ /*
+ * Return out-of-range temperature so that
+ * charging is stopped
+ */
+ return BTEMP_THERMAL_LOW_LIMIT;
+ }
+
+ temp = ab8500_btemp_res_to_temp(di,
+ di->bat->bat_type[id].r_to_t_tbl,
+ di->bat->bat_type[id].n_temp_tbl_elements, rbat);
+ } else {
+ vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL);
+ if (vntc < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed,"
+ " using previous value\n", __func__);
+ return prev;
+ }
+ /*
+ * The PCB NTC is sourced from VTVOUT via a 230kOhm
+ * resistor.
+ */
+ rntc = 230000 * vntc / (VTVOUT_V - vntc);
+
+ temp = ab8500_btemp_res_to_temp(di,
+ di->bat->bat_type[id].r_to_t_tbl,
+ di->bat->bat_type[id].n_temp_tbl_elements, rntc);
+ prev = temp;
+ }
+ dev_dbg(di->dev, "Battery temperature is %d\n", temp);
+ return temp;
+}
+
+/**
+ * ab8500_btemp_id() - Identify the connected battery
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * This function will try to identify the battery by reading the ID
+ * resistor. Some brands use a combined ID resistor with a NTC resistor to
+ * both be able to identify and to read the temperature of it.
+ */
+static int ab8500_btemp_id(struct ab8500_btemp *di)
+{
+ int res;
+ u8 i;
+
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
+ di->bat->batt_id = BATTERY_UNKNOWN;
+
+ res = ab8500_btemp_get_batctrl_res(di);
+ if (res < 0) {
+ dev_err(di->dev, "%s get batctrl res failed\n", __func__);
+ return -ENXIO;
+ }
+
+ /* BATTERY_UNKNOWN is defined on position 0, skip it! */
+ for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) {
+ if ((res <= di->bat->bat_type[i].resis_high) &&
+ (res >= di->bat->bat_type[i].resis_low)) {
+ dev_dbg(di->dev, "Battery detected on %s"
+ " low %d < res %d < high: %d"
+ " index: %d\n",
+ di->bat->adc_therm == ADC_THERM_BATCTRL ?
+ "BATCTRL" : "BATTEMP",
+ di->bat->bat_type[i].resis_low, res,
+ di->bat->bat_type[i].resis_high, i);
+
+ di->bat->batt_id = i;
+ break;
+ }
+ }
+
+ if (di->bat->batt_id == BATTERY_UNKNOWN) {
+ dev_warn(di->dev, "Battery identified as unknown"
+ ", resistance %d Ohm\n", res);
+ return -ENXIO;
+ }
+
+ /*
+ * We only have to change current source if the
+ * detected type is Type 1, else we use the 7uA source
+ */
+ if (di->bat->adc_therm == ADC_THERM_BATCTRL && di->bat->batt_id == 1) {
+ dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
+ }
+
+ return di->bat->batt_id;
+}
+
+/**
+ * ab8500_btemp_periodic_work() - Measuring the temperature periodically
+ * @work: pointer to the work_struct structure
+ *
+ * Work function for measuring the temperature periodically
+ */
+static void ab8500_btemp_periodic_work(struct work_struct *work)
+{
+ int interval;
+ struct ab8500_btemp *di = container_of(work,
+ struct ab8500_btemp, btemp_periodic_work.work);
+
+ di->bat_temp = ab8500_btemp_measure_temp(di);
+
+ if (di->bat_temp != di->prev_bat_temp) {
+ di->prev_bat_temp = di->bat_temp;
+ power_supply_changed(&di->btemp_psy);
+ }
+
+ if (di->events.ac_conn || di->events.usb_conn)
+ interval = di->bat->temp_interval_chg;
+ else
+ interval = di->bat->temp_interval_nochg;
+
+ /* Schedule a new measurement */
+ queue_delayed_work(di->btemp_wq,
+ &di->btemp_periodic_work,
+ round_jiffies(interval * HZ));
+}
+
+/**
+ * ab8500_btemp_batctrlindb_handler() - battery removal detected
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_batctrlindb_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+ dev_err(di->dev, "Battery removal detected!\n");
+
+ di->events.batt_rem = true;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_templow_handler() - battery temp lower than 10 degrees
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_templow_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+ switch (di->chip_id) {
+ case AB8500_CUT1P0:
+ case AB8500_CUT1P1:
+ case AB8500_CUT2P0:
+ dev_dbg(di->dev, "Ignore false btemp low irq"
+ " for ABB cut 1.0, 1.1 and 2.0\n");
+
+ break;
+ default:
+ dev_crit(di->dev, "Battery temperature lower than -10deg c\n");
+
+ di->events.btemp_low = true;
+ di->events.btemp_high = false;
+ di->events.btemp_medhigh = false;
+ di->events.btemp_lowmed = false;
+ power_supply_changed(&di->btemp_psy);
+
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_temphigh_handler() - battery temp higher than max temp
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_temphigh_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+
+ dev_crit(di->dev, "Battery temperature is higher than MAX temp\n");
+
+ di->events.btemp_high = true;
+ di->events.btemp_medhigh = false;
+ di->events.btemp_lowmed = false;
+ di->events.btemp_low = false;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_lowmed_handler() - battery temp between low and medium
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_lowmed_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+
+ dev_dbg(di->dev, "Battery temperature is between low and medium\n");
+
+ di->events.btemp_lowmed = true;
+ di->events.btemp_medhigh = false;
+ di->events.btemp_high = false;
+ di->events.btemp_low = false;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_medhigh_handler() - battery temp between medium and high
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_medhigh_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+
+ dev_dbg(di->dev, "Battery temperature is between medium and high\n");
+
+ di->events.btemp_medhigh = true;
+ di->events.btemp_lowmed = false;
+ di->events.btemp_high = false;
+ di->events.btemp_low = false;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_periodic() - Periodic temperature measurements
+ * @di: pointer to the ab8500_btemp structure
+ * @enable: enable or disable periodic temperature measurements
+ *
+ * Starts of stops periodic temperature measurements. Periodic measurements
+ * should only be done when a charger is connected.
+ */
+static void ab8500_btemp_periodic(struct ab8500_btemp *di,
+ bool enable)
+{
+ dev_dbg(di->dev, "Enable periodic temperature measurements: %d\n",
+ enable);
+ /*
+ * Make sure a new measurement is done directly by cancelling
+ * any pending work
+ */
+ cancel_delayed_work_sync(&di->btemp_periodic_work);
+
+ if (enable)
+ queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0);
+}
+
+/**
+ * ab8500_btemp_get_temp() - get battery temperature
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * Returns battery temperature
+ */
+static int ab8500_btemp_get_temp(struct ab8500_btemp *di)
+{
+ int temp = 0;
+
+ /*
+ * The BTEMP events are not reliabe on AB8500 cut2.0
+ * and prior versions
+ */
+ switch (di->chip_id) {
+ case AB8500_CUT1P0:
+ case AB8500_CUT1P1:
+ case AB8500_CUT2P0:
+ temp = di->bat_temp * 10;
+
+ break;
+ default:
+ if (di->events.btemp_low) {
+ if (temp > di->btemp_ranges.btemp_low_limit)
+ temp = di->btemp_ranges.btemp_low_limit;
+ else
+ temp = di->bat_temp * 10;
+ } else if (di->events.btemp_high) {
+ if (temp < di->btemp_ranges.btemp_high_limit)
+ temp = di->btemp_ranges.btemp_high_limit;
+ else
+ temp = di->bat_temp * 10;
+ } else if (di->events.btemp_lowmed) {
+ if (temp > di->btemp_ranges.btemp_med_limit)
+ temp = di->btemp_ranges.btemp_med_limit;
+ else
+ temp = di->bat_temp * 10;
+ } else if (di->events.btemp_medhigh) {
+ if (temp < di->btemp_ranges.btemp_med_limit)
+ temp = di->btemp_ranges.btemp_med_limit;
+ else
+ temp = di->bat_temp * 10;
+ } else
+ temp = di->bat_temp * 10;
+
+ break;
+ }
+ return temp;
+}
+
+/**
+ * ab8500_btemp_get_batctrl_temp() - get the temperature
+ * @btemp: pointer to the btemp structure
+ *
+ * Returns the batctrl temperature in millidegrees
+ */
+int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
+{
+ return btemp->bat_temp * 1000;
+}
+
+/**
+ * ab8500_btemp_get_property() - get the btemp properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the btemp
+ * properties by reading the sysfs files.
+ * online: presence of the battery
+ * present: presence of the battery
+ * technology: battery technology
+ * temp: battery temperature
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_btemp_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_btemp *di;
+
+ di = to_ab8500_btemp_device_info(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (di->events.batt_rem)
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = di->bat->bat_type[di->bat->batt_id].name;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = ab8500_btemp_get_temp(di);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab8500_btemp *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab8500_btemp_device_info(psy);
+
+ /*
+ * For all psy where the name of your driver
+ * appears in any supplied_to
+ */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC disconnected */
+ if (!ret.intval && di->events.ac_conn) {
+ di->events.ac_conn = false;
+ }
+ /* AC connected */
+ else if (ret.intval && !di->events.ac_conn) {
+ di->events.ac_conn = true;
+ if (!di->events.usb_conn)
+ ab8500_btemp_periodic(di, true);
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB disconnected */
+ if (!ret.intval && di->events.usb_conn) {
+ di->events.usb_conn = false;
+ }
+ /* USB connected */
+ else if (ret.intval && !di->events.usb_conn) {
+ di->events.usb_conn = true;
+ if (!di->events.ac_conn)
+ ab8500_btemp_periodic(di, true);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab8500_btemp_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is pointing to the function pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in the external power
+ * supply to the btemp.
+ */
+static void ab8500_btemp_external_power_changed(struct power_supply *psy)
+{
+ struct ab8500_btemp *di = to_ab8500_btemp_device_info(psy);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->btemp_psy, ab8500_btemp_get_ext_psy_data);
+}
+
+/* ab8500 btemp driver interrupts and their respective isr */
+static struct ab8500_btemp_interrupts ab8500_btemp_irq[] = {
+ {"BAT_CTRL_INDB", ab8500_btemp_batctrlindb_handler},
+ {"BTEMP_LOW", ab8500_btemp_templow_handler},
+ {"BTEMP_HIGH", ab8500_btemp_temphigh_handler},
+ {"BTEMP_LOW_MEDIUM", ab8500_btemp_lowmed_handler},
+ {"BTEMP_MEDIUM_HIGH", ab8500_btemp_medhigh_handler},
+};
+
+#if defined(CONFIG_PM)
+static int ab8500_btemp_resume(struct platform_device *pdev)
+{
+ struct ab8500_btemp *di = platform_get_drvdata(pdev);
+
+ ab8500_btemp_periodic(di, true);
+
+ return 0;
+}
+
+static int ab8500_btemp_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab8500_btemp *di = platform_get_drvdata(pdev);
+
+ ab8500_btemp_periodic(di, false);
+
+ return 0;
+}
+#else
+#define ab8500_btemp_suspend NULL
+#define ab8500_btemp_resume NULL
+#endif
+
+static int __devexit ab8500_btemp_remove(struct platform_device *pdev)
+{
+ struct ab8500_btemp *di = platform_get_drvdata(pdev);
+ int i, irq;
+
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ free_irq(irq, di);
+ }
+
+ /* Delete the work queue */
+ destroy_workqueue(di->btemp_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->btemp_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
+{
+ int irq, i, ret = 0;
+ u8 val;
+ struct ab8500_platform_data *plat;
+
+ struct ab8500_btemp *di =
+ kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab8500_gpadc_get();
+
+ plat = dev_get_platdata(di->parent->dev);
+
+ /* get btemp specific platform data */
+ if (!plat->btemp) {
+ dev_err(di->dev, "no btemp platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->pdata = plat->btemp;
+
+ /* get battery specific platform data */
+ if (!plat->battery) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->bat = plat->battery;
+
+ /* BTEMP supply */
+ di->btemp_psy.name = "ab8500_btemp";
+ di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->btemp_psy.properties = ab8500_btemp_props;
+ di->btemp_psy.num_properties = ARRAY_SIZE(ab8500_btemp_props);
+ di->btemp_psy.get_property = ab8500_btemp_get_property;
+ di->btemp_psy.supplied_to = di->pdata->supplied_to;
+ di->btemp_psy.num_supplicants = di->pdata->num_supplicants;
+ di->btemp_psy.external_power_changed =
+ ab8500_btemp_external_power_changed;
+
+
+ /* Create a work queue for the btemp */
+ di->btemp_wq =
+ create_singlethread_workqueue("ab8500_btemp_wq");
+ if (di->btemp_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for measuring temperature periodically */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
+ ab8500_btemp_periodic_work);
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(di->dev);
+ if (ret < 0) {
+ dev_err(di->dev, "failed to get chip ID\n");
+ goto free_btemp_wq;
+ }
+ di->chip_id = ret;
+ dev_dbg(di->dev, "AB8500 CID is: 0x%02x\n",
+ di->chip_id);
+
+ /* Identify the battery */
+ if (ab8500_btemp_id(di) < 0)
+ dev_warn(di->dev, "failed to identify the battery\n");
+
+ /* Set BTEMP thermal limits. Low and Med are fixed */
+ di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT;
+ di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_BTEMP_HIGH_TH, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ goto free_btemp_wq;
+ }
+ switch (val) {
+ case BTEMP_HIGH_TH_57_0:
+ case BTEMP_HIGH_TH_57_1:
+ di->btemp_ranges.btemp_high_limit =
+ BTEMP_THERMAL_HIGH_LIMIT_57;
+ break;
+ case BTEMP_HIGH_TH_52:
+ di->btemp_ranges.btemp_high_limit =
+ BTEMP_THERMAL_HIGH_LIMIT_52;
+ break;
+ case BTEMP_HIGH_TH_62:
+ di->btemp_ranges.btemp_high_limit =
+ BTEMP_THERMAL_HIGH_LIMIT_62;
+ break;
+ }
+
+ /* Register BTEMP power supply class */
+ ret = power_supply_register(di->dev, &di->btemp_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register BTEMP psy\n");
+ goto free_btemp_wq;
+ }
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab8500_btemp_irq[i].name, di);
+
+ if (ret) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab8500_btemp_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab8500_btemp_irq[i].name, irq, ret);
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ /* Kick off periodic temperature measurements */
+ ab8500_btemp_periodic(di, true);
+ list_add_tail(&di->node, &ab8500_btemp_list);
+
+ return ret;
+
+free_irq:
+ power_supply_unregister(&di->btemp_psy);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ free_irq(irq, di);
+ }
+free_btemp_wq:
+ destroy_workqueue(di->btemp_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab8500_btemp_driver = {
+ .probe = ab8500_btemp_probe,
+ .remove = __devexit_p(ab8500_btemp_remove),
+ .suspend = ab8500_btemp_suspend,
+ .resume = ab8500_btemp_resume,
+ .driver = {
+ .name = "ab8500-btemp",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_btemp_init(void)
+{
+ return platform_driver_register(&ab8500_btemp_driver);
+}
+
+static void __exit ab8500_btemp_exit(void)
+{
+ platform_driver_unregister(&ab8500_btemp_driver);
+}
+
+subsys_initcall_sync(ab8500_btemp_init);
+module_exit(ab8500_btemp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski, Arun R Murthy");
+MODULE_ALIAS("platform:ab8500-btemp");
+MODULE_DESCRIPTION("AB8500 battery temperature driver");
diff --git a/drivers/power/ab8500_chargalg.c b/drivers/power/ab8500_chargalg.c
new file mode 100644
index 00000000000..97ea736424a
--- /dev/null
+++ b/drivers/power/ab8500_chargalg.c
@@ -0,0 +1,1961 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Charging algorithm driver for AB8500
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/ab8500/ux500_chargalg.h>
+#include <linux/mfd/ab8500/bm.h>
+#include <linux/mfd/ab8500/gpadc.h>
+
+/* Watchdog kick interval */
+#define CHG_WD_INTERVAL (60 * HZ)
+
+/* End-of-charge criteria counter */
+#define EOC_COND_CNT 10
+
+/* Recharge criteria counter */
+#define RCH_COND_CNT 3
+
+#define to_ab8500_chargalg_device_info(x) container_of((x), \
+ struct ab8500_chargalg, chargalg_psy);
+
+enum ab8500_chargers {
+ NO_CHG,
+ AC_CHG,
+ USB_CHG,
+};
+
+struct ab8500_chargalg_charger_info {
+ enum ab8500_chargers conn_chg;
+ enum ab8500_chargers prev_conn_chg;
+ enum ab8500_chargers online_chg;
+ enum ab8500_chargers prev_online_chg;
+ enum ab8500_chargers charger_type;
+ bool usb_chg_ok;
+ bool ac_chg_ok;
+ int usb_volt;
+ int usb_curr;
+ int ac_volt;
+ int ac_curr;
+ int usb_vset;
+ int usb_iset;
+ int ac_vset;
+ int ac_iset;
+};
+
+struct ab8500_chargalg_suspension_status {
+ bool suspended_change;
+ bool ac_suspended;
+ bool usb_suspended;
+};
+
+struct ab8500_chargalg_battery_data {
+ int temp;
+ int volt;
+ int avg_curr;
+ int inst_curr;
+ int percent;
+};
+
+enum ab8500_chargalg_states {
+ STATE_HANDHELD_INIT,
+ STATE_HANDHELD,
+ STATE_CHG_NOT_OK_INIT,
+ STATE_CHG_NOT_OK,
+ STATE_HW_TEMP_PROTECT_INIT,
+ STATE_HW_TEMP_PROTECT,
+ STATE_NORMAL_INIT,
+ STATE_NORMAL,
+ STATE_WAIT_FOR_RECHARGE_INIT,
+ STATE_WAIT_FOR_RECHARGE,
+ STATE_MAINTENANCE_A_INIT,
+ STATE_MAINTENANCE_A,
+ STATE_MAINTENANCE_B_INIT,
+ STATE_MAINTENANCE_B,
+ STATE_TEMP_UNDEROVER_INIT,
+ STATE_TEMP_UNDEROVER,
+ STATE_TEMP_LOWHIGH_INIT,
+ STATE_TEMP_LOWHIGH,
+ STATE_SUSPENDED_INIT,
+ STATE_SUSPENDED,
+ STATE_OVV_PROTECT_INIT,
+ STATE_OVV_PROTECT,
+ STATE_SAFETY_TIMER_EXPIRED_INIT,
+ STATE_SAFETY_TIMER_EXPIRED,
+ STATE_BATT_REMOVED_INIT,
+ STATE_BATT_REMOVED,
+ STATE_WD_EXPIRED_INIT,
+ STATE_WD_EXPIRED,
+};
+
+static const char *states[] = {
+ "HANDHELD_INIT",
+ "HANDHELD",
+ "CHG_NOT_OK_INIT",
+ "CHG_NOT_OK",
+ "HW_TEMP_PROTECT_INIT",
+ "HW_TEMP_PROTECT",
+ "NORMAL_INIT",
+ "NORMAL",
+ "WAIT_FOR_RECHARGE_INIT",
+ "WAIT_FOR_RECHARGE",
+ "MAINTENANCE_A_INIT",
+ "MAINTENANCE_A",
+ "MAINTENANCE_B_INIT",
+ "MAINTENANCE_B",
+ "TEMP_UNDEROVER_INIT",
+ "TEMP_UNDEROVER",
+ "TEMP_LOWHIGH_INIT",
+ "TEMP_LOWHIGH",
+ "SUSPENDED_INIT",
+ "SUSPENDED",
+ "OVV_PROTECT_INIT",
+ "OVV_PROTECT",
+ "SAFETY_TIMER_EXPIRED_INIT",
+ "SAFETY_TIMER_EXPIRED",
+ "BATT_REMOVED_INIT",
+ "BATT_REMOVED",
+ "WD_EXPIRED_INIT",
+ "WD_EXPIRED",
+};
+
+struct ab8500_chargalg_events {
+ bool batt_unknown;
+ bool mainextchnotok;
+ bool batt_ovv;
+ bool batt_rem;
+ bool btemp_underover;
+ bool btemp_lowhigh;
+ bool main_thermal_prot;
+ bool usb_thermal_prot;
+ bool main_ovv;
+ bool vbus_ovv;
+ bool usbchargernotok;
+ bool safety_timer_expired;
+ bool maintenance_timer_expired;
+ bool ac_wd_expired;
+ bool usb_wd_expired;
+ bool ac_cv_active;
+ bool usb_cv_active;
+ bool vbus_collapsed;
+};
+
+/**
+ * struct ab8500_charge_curr_maximization - Charger maximization parameters
+ * @original_iset: the non optimized/maximised charger current
+ * @current_iset: the charging current used at this moment
+ * @test_delta_i: the delta between the current we want to charge and the
+ current that is really going into the battery
+ * @condition_cnt: number of iterations needed before a new charger current
+ is set
+ * @max_current: maximum charger current
+ * @wait_cnt: to avoid too fast current step down in case of charger
+ * voltage collapse, we insert this delay between step
+ * down
+ * @level: tells in how many steps the charging current has been
+ increased
+ */
+struct ab8500_charge_curr_maximization {
+ int original_iset;
+ int current_iset;
+ int test_delta_i;
+ int condition_cnt;
+ int max_current;
+ int wait_cnt;
+ u8 level;
+};
+
+enum maxim_ret {
+ MAXIM_RET_NOACTION,
+ MAXIM_RET_CHANGE,
+ MAXIM_RET_IBAT_TOO_HIGH,
+};
+
+/**
+ * struct ab8500_chargalg - ab8500 Charging algorithm device information
+ * @dev: pointer to the structure device
+ * @charge_status: battery operating status
+ * @eoc_cnt: counter used to determine end-of_charge
+ * @rch_cnt: counter used to determine start of recharge
+ * @maintenance_chg: indicate if maintenance charge is active
+ * @t_hyst_norm temperature hysteresis when the temperature has been
+ * over or under normal limits
+ * @t_hyst_lowhigh temperature hysteresis when the temperature has been
+ * over or under the high or low limits
+ * @charge_state: current state of the charging algorithm
+ * @ccm charging current maximization parameters
+ * @chg_info: information about connected charger types
+ * @batt_data: data of the battery
+ * @susp_status: current charger suspension status
+ * @parent: pointer to the struct ab8500
+ * @pdata: pointer to the ab8500_chargalg platform data
+ * @bat: pointer to the ab8500_bm platform data
+ * @chargalg_psy: structure that holds the battery properties exposed by
+ * the charging algorithm
+ * @events: structure for information about events triggered
+ * @chargalg_wq: work queue for running the charging algorithm
+ * @chargalg_periodic_work: work to run the charging algorithm periodically
+ * @chargalg_wd_work: work to kick the charger watchdog periodically
+ * @chargalg_work: work to run the charging algorithm instantly
+ * @safety_timer: charging safety timer
+ * @maintenance_timer: maintenance charging timer
+ * @chargalg_kobject: structure of type kobject
+ */
+struct ab8500_chargalg {
+ struct device *dev;
+ int charge_status;
+ int eoc_cnt;
+ int rch_cnt;
+ bool maintenance_chg;
+ int t_hyst_norm;
+ int t_hyst_lowhigh;
+ enum ab8500_chargalg_states charge_state;
+ struct ab8500_charge_curr_maximization ccm;
+ struct ab8500_chargalg_charger_info chg_info;
+ struct ab8500_chargalg_battery_data batt_data;
+ struct ab8500_chargalg_suspension_status susp_status;
+ struct ab8500 *parent;
+ struct ab8500_chargalg_platform_data *pdata;
+ struct ab8500_bm_data *bat;
+ struct power_supply chargalg_psy;
+ struct ux500_charger *ac_chg;
+ struct ux500_charger *usb_chg;
+ struct ab8500_chargalg_events events;
+ struct workqueue_struct *chargalg_wq;
+ struct delayed_work chargalg_periodic_work;
+ struct delayed_work chargalg_wd_work;
+ struct work_struct chargalg_work;
+ struct timer_list safety_timer;
+ struct timer_list maintenance_timer;
+ struct kobject chargalg_kobject;
+};
+
+/* Main battery properties */
+static enum power_supply_property ab8500_chargalg_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+/**
+ * ab8500_chargalg_safety_timer_expired() - Expiration of the safety timer
+ * @data: pointer to the ab8500_chargalg structure
+ *
+ * This function gets called when the safety timer for the charger
+ * expires
+ */
+static void ab8500_chargalg_safety_timer_expired(unsigned long data)
+{
+ struct ab8500_chargalg *di = (struct ab8500_chargalg *) data;
+ dev_err(di->dev, "Safety timer expired\n");
+ di->events.safety_timer_expired = true;
+
+ /* Trigger execution of the algorithm instantly */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * ab8500_chargalg_maintenance_timer_expired() - Expiration of
+ * the maintenance timer
+ * @i: pointer to the ab8500_chargalg structure
+ *
+ * This function gets called when the maintenence timer
+ * expires
+ */
+static void ab8500_chargalg_maintenance_timer_expired(unsigned long data)
+{
+
+ struct ab8500_chargalg *di = (struct ab8500_chargalg *) data;
+ dev_dbg(di->dev, "Maintenance timer expired\n");
+ di->events.maintenance_timer_expired = true;
+
+ /* Trigger execution of the algorithm instantly */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * ab8500_chargalg_state_to() - Change charge state
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * This function gets called when a charge state change should occur
+ */
+static void ab8500_chargalg_state_to(struct ab8500_chargalg *di,
+ enum ab8500_chargalg_states state)
+{
+ dev_dbg(di->dev,
+ "State changed: %s (From state: [%d] %s =to=> [%d] %s )\n",
+ di->charge_state == state ? "NO" : "YES",
+ di->charge_state,
+ states[di->charge_state],
+ state,
+ states[state]);
+
+ di->charge_state = state;
+}
+
+/**
+ * ab8500_chargalg_check_charger_connection() - Check charger connection change
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * This function will check if there is a change in the charger connection
+ * and change charge state accordingly. AC has precedence over USB.
+ */
+static int ab8500_chargalg_check_charger_connection(struct ab8500_chargalg *di)
+{
+ if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg ||
+ di->susp_status.suspended_change) {
+ /*
+ * Charger state changed or suspension
+ * has changed since last update
+ */
+ if ((di->chg_info.conn_chg & AC_CHG) &&
+ !di->susp_status.ac_suspended) {
+ dev_dbg(di->dev, "Charging source is AC\n");
+ if (di->chg_info.charger_type != AC_CHG) {
+ di->chg_info.charger_type = AC_CHG;
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ }
+ } else if ((di->chg_info.conn_chg & USB_CHG) &&
+ !di->susp_status.usb_suspended) {
+ dev_dbg(di->dev, "Charging source is USB\n");
+ di->chg_info.charger_type = USB_CHG;
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ } else if (di->chg_info.conn_chg &&
+ (di->susp_status.ac_suspended ||
+ di->susp_status.usb_suspended)) {
+ dev_dbg(di->dev, "Charging is suspended\n");
+ di->chg_info.charger_type = NO_CHG;
+ ab8500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
+ } else {
+ dev_dbg(di->dev, "Charging source is OFF\n");
+ di->chg_info.charger_type = NO_CHG;
+ ab8500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+ }
+ di->chg_info.prev_conn_chg = di->chg_info.conn_chg;
+ di->susp_status.suspended_change = false;
+ }
+ return di->chg_info.conn_chg;
+}
+
+/**
+ * ab8500_chargalg_start_safety_timer() - Start charging safety timer
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * The safety timer is used to avoid overcharging of old or bad batteries.
+ * There are different timers for AC and USB
+ */
+static void ab8500_chargalg_start_safety_timer(struct ab8500_chargalg *di)
+{
+ unsigned long timer_expiration = 0;
+
+ switch (di->chg_info.charger_type) {
+ case AC_CHG:
+ timer_expiration =
+ round_jiffies(jiffies +
+ (di->bat->main_safety_tmr_h * 3600 * HZ));
+ break;
+
+ case USB_CHG:
+ timer_expiration =
+ round_jiffies(jiffies +
+ (di->bat->usb_safety_tmr_h * 3600 * HZ));
+ break;
+
+ default:
+ dev_err(di->dev, "Unknown charger to charge from\n");
+ break;
+ }
+
+ di->events.safety_timer_expired = false;
+ di->safety_timer.expires = timer_expiration;
+ if (!timer_pending(&di->safety_timer))
+ add_timer(&di->safety_timer);
+ else
+ mod_timer(&di->safety_timer, timer_expiration);
+}
+
+/**
+ * ab8500_chargalg_stop_safety_timer() - Stop charging safety timer
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * The safety timer is stopped whenever the NORMAL state is exited
+ */
+static void ab8500_chargalg_stop_safety_timer(struct ab8500_chargalg *di)
+{
+ di->events.safety_timer_expired = false;
+ if (timer_pending(&di->safety_timer))
+ del_timer(&di->safety_timer);
+}
+
+/**
+ * ab8500_chargalg_start_maintenance_timer() - Start charging maintenance timer
+ * @di: pointer to the ab8500_chargalg structure
+ * @duration: duration of ther maintenance timer in hours
+ *
+ * The maintenance timer is used to maintain the charge in the battery once
+ * the battery is considered full. These timers are chosen to match the
+ * discharge curve of the battery
+ */
+static void ab8500_chargalg_start_maintenance_timer(struct ab8500_chargalg *di,
+ int duration)
+{
+ unsigned long timer_expiration;
+
+ /* Convert from hours to jiffies */
+ timer_expiration = round_jiffies(jiffies + (duration * 3600 * HZ));
+
+ di->events.maintenance_timer_expired = false;
+ di->maintenance_timer.expires = timer_expiration;
+ if (!timer_pending(&di->maintenance_timer))
+ add_timer(&di->maintenance_timer);
+ else
+ mod_timer(&di->maintenance_timer, timer_expiration);
+}
+
+/**
+ * ab8500_chargalg_stop_maintenance_timer() - Stop maintenance timer
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * The maintenance timer is stopped whenever maintenance ends or when another
+ * state is entered
+ */
+static void ab8500_chargalg_stop_maintenance_timer(struct ab8500_chargalg *di)
+{
+ di->events.maintenance_timer_expired = false;
+ del_timer(&di->maintenance_timer);
+}
+
+/**
+ * ab8500_chargalg_kick_watchdog() - Kick charger watchdog
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * The charger watchdog have to be kicked periodically whenever the charger is
+ * on, else the ABB will reset the system
+ */
+static int ab8500_chargalg_kick_watchdog(struct ab8500_chargalg *di)
+{
+ /* Check if charger exists and kick watchdog if charging */
+ if (di->ac_chg && di->ac_chg->ops.kick_wd &&
+ di->chg_info.online_chg & AC_CHG)
+ return di->ac_chg->ops.kick_wd(di->ac_chg);
+ else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
+ di->chg_info.online_chg & USB_CHG)
+ return di->usb_chg->ops.kick_wd(di->usb_chg);
+
+ return -ENXIO;
+}
+
+/**
+ * ab8500_chargalg_ac_en() - Turn on/off the AC charger
+ * @di: pointer to the ab8500_chargalg structure
+ * @enable: charger on/off
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * The AC charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
+ int vset, int iset)
+{
+ if (!di->ac_chg || !di->ac_chg->ops.enable)
+ return -ENXIO;
+
+ /* Select maximum of what both the charger and the battery supports */
+ if (di->ac_chg->max_out_volt)
+ vset = min(vset, di->ac_chg->max_out_volt);
+ if (di->ac_chg->max_out_curr)
+ iset = min(iset, di->ac_chg->max_out_curr);
+
+ di->chg_info.ac_iset = iset;
+ di->chg_info.ac_vset = vset;
+
+ return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
+}
+
+/**
+ * ab8500_chargalg_usb_en() - Turn on/off the USB charger
+ * @di: pointer to the ab8500_chargalg structure
+ * @enable: charger on/off
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * The USB charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int ab8500_chargalg_usb_en(struct ab8500_chargalg *di, int enable,
+ int vset, int iset)
+{
+ if (!di->usb_chg || !di->usb_chg->ops.enable)
+ return -ENXIO;
+
+ /* Select maximum of what both the charger and the battery supports */
+ if (di->usb_chg->max_out_volt)
+ vset = min(vset, di->usb_chg->max_out_volt);
+ if (di->usb_chg->max_out_curr)
+ iset = min(iset, di->usb_chg->max_out_curr);
+
+ di->chg_info.usb_iset = iset;
+ di->chg_info.usb_vset = vset;
+
+ return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
+}
+
+/**
+ * ab8500_chargalg_update_chg_curr() - Update charger current
+ * @di: pointer to the ab8500_chargalg structure
+ * @iset: requested charger output current
+ *
+ * The charger output current will be updated for the charger
+ * that is currently in use
+ */
+static int ab8500_chargalg_update_chg_curr(struct ab8500_chargalg *di,
+ int iset)
+{
+ /* Check if charger exists and update current if charging */
+ if (di->ac_chg && di->ac_chg->ops.update_curr &&
+ di->chg_info.charger_type & AC_CHG) {
+ /*
+ * Select maximum of what both the charger
+ * and the battery supports
+ */
+ if (di->ac_chg->max_out_curr)
+ iset = min(iset, di->ac_chg->max_out_curr);
+
+ di->chg_info.ac_iset = iset;
+
+ return di->ac_chg->ops.update_curr(di->ac_chg, iset);
+ } else if (di->usb_chg && di->usb_chg->ops.update_curr &&
+ di->chg_info.charger_type & USB_CHG) {
+ /*
+ * Select maximum of what both the charger
+ * and the battery supports
+ */
+ if (di->usb_chg->max_out_curr)
+ iset = min(iset, di->usb_chg->max_out_curr);
+
+ di->chg_info.usb_iset = iset;
+
+ return di->usb_chg->ops.update_curr(di->usb_chg, iset);
+ }
+
+ return -ENXIO;
+}
+
+/**
+ * ab8500_chargalg_stop_charging() - Stop charging
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * This function is called from any state where charging should be stopped.
+ * All charging is disabled and all status parameters and timers are changed
+ * accordingly
+ */
+static void ab8500_chargalg_stop_charging(struct ab8500_chargalg *di)
+{
+ ab8500_chargalg_ac_en(di, false, 0, 0);
+ ab8500_chargalg_usb_en(di, false, 0, 0);
+ ab8500_chargalg_stop_safety_timer(di);
+ ab8500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ di->maintenance_chg = false;
+ cancel_delayed_work(&di->chargalg_wd_work);
+ power_supply_changed(&di->chargalg_psy);
+}
+
+/**
+ * ab8500_chargalg_hold_charging() - Pauses charging
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * This function is called in the case where maintenance charging has been
+ * disabled and instead a battery voltage mode is entered to check when the
+ * battery voltage has reached a certain recharge voltage
+ */
+static void ab8500_chargalg_hold_charging(struct ab8500_chargalg *di)
+{
+ ab8500_chargalg_ac_en(di, false, 0, 0);
+ ab8500_chargalg_usb_en(di, false, 0, 0);
+ ab8500_chargalg_stop_safety_timer(di);
+ ab8500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ di->maintenance_chg = false;
+ cancel_delayed_work(&di->chargalg_wd_work);
+ power_supply_changed(&di->chargalg_psy);
+}
+
+/**
+ * ab8500_chargalg_start_charging() - Start the charger
+ * @di: pointer to the ab8500_chargalg structure
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * A charger will be enabled depending on the requested charger type that was
+ * detected previously.
+ */
+static void ab8500_chargalg_start_charging(struct ab8500_chargalg *di,
+ int vset, int iset)
+{
+ switch (di->chg_info.charger_type) {
+ case AC_CHG:
+ dev_dbg(di->dev,
+ "AC parameters: Vset %d, Ich %d\n", vset, iset);
+ ab8500_chargalg_usb_en(di, false, 0, 0);
+ ab8500_chargalg_ac_en(di, true, vset, iset);
+ break;
+
+ case USB_CHG:
+ dev_dbg(di->dev,
+ "USB parameters: Vset %d, Ich %d\n", vset, iset);
+ ab8500_chargalg_ac_en(di, false, 0, 0);
+ ab8500_chargalg_usb_en(di, true, vset, iset);
+ break;
+
+ default:
+ dev_err(di->dev, "Unknown charger to charge from\n");
+ break;
+ }
+}
+
+/**
+ * ab8500_chargalg_check_temp() - Check battery temperature ranges
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * The battery temperature is checked against the predefined limits and the
+ * charge state is changed accordingly
+ */
+static void ab8500_chargalg_check_temp(struct ab8500_chargalg *di)
+{
+ if (di->batt_data.temp > (di->bat->temp_low + di->t_hyst_norm) &&
+ di->batt_data.temp < (di->bat->temp_high - di->t_hyst_norm)) {
+ /* Temp OK! */
+ di->events.btemp_underover = false;
+ di->events.btemp_lowhigh = false;
+ di->t_hyst_norm = 0;
+ di->t_hyst_lowhigh = 0;
+ } else {
+ if (((di->batt_data.temp >= di->bat->temp_high) &&
+ (di->batt_data.temp <
+ (di->bat->temp_over - di->t_hyst_lowhigh))) ||
+ ((di->batt_data.temp >
+ (di->bat->temp_under + di->t_hyst_lowhigh)) &&
+ (di->batt_data.temp <= di->bat->temp_low))) {
+ /* TEMP minor!!!!! */
+ di->events.btemp_underover = false;
+ di->events.btemp_lowhigh = true;
+ di->t_hyst_norm = di->bat->temp_hysteresis;
+ di->t_hyst_lowhigh = 0;
+ } else if (di->batt_data.temp <= di->bat->temp_under ||
+ di->batt_data.temp >= di->bat->temp_over) {
+ /* TEMP major!!!!! */
+ di->events.btemp_underover = true;
+ di->events.btemp_lowhigh = false;
+ di->t_hyst_norm = 0;
+ di->t_hyst_lowhigh = di->bat->temp_hysteresis;
+ } else {
+ /* Within hysteresis */
+ dev_dbg(di->dev, "Within hysteresis limit temp: %d "
+ "hyst_lowhigh %d, hyst normal %d\n",
+ di->batt_data.temp, di->t_hyst_lowhigh,
+ di->t_hyst_norm);
+ }
+ }
+}
+
+/**
+ * ab8500_chargalg_check_charger_voltage() - Check charger voltage
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * Charger voltage is checked against maximum limit
+ */
+static void ab8500_chargalg_check_charger_voltage(struct ab8500_chargalg *di)
+{
+ if (di->chg_info.usb_volt > di->bat->chg_params->usb_volt_max)
+ di->chg_info.usb_chg_ok = false;
+ else
+ di->chg_info.usb_chg_ok = true;
+
+ if (di->chg_info.ac_volt > di->bat->chg_params->ac_volt_max)
+ di->chg_info.ac_chg_ok = false;
+ else
+ di->chg_info.ac_chg_ok = true;
+
+}
+
+/**
+ * ab8500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * End-of-charge criteria is fulfilled when the battery voltage is above a
+ * certain limit and the battery current is below a certain limit for a
+ * predefined number of consecutive seconds. If true, the battery is full
+ */
+static void ab8500_chargalg_end_of_charge(struct ab8500_chargalg *di)
+{
+ if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+ di->charge_state == STATE_NORMAL &&
+ !di->maintenance_chg && (di->batt_data.volt >=
+ di->bat->bat_type[di->bat->batt_id].termination_vol ||
+ di->events.usb_cv_active || di->events.ac_cv_active) &&
+ di->batt_data.avg_curr <
+ di->bat->bat_type[di->bat->batt_id].termination_curr &&
+ di->batt_data.avg_curr > 0) {
+ if (++di->eoc_cnt >= EOC_COND_CNT) {
+ di->eoc_cnt = 0;
+ di->charge_status = POWER_SUPPLY_STATUS_FULL;
+ di->maintenance_chg = true;
+ dev_dbg(di->dev, "EOC reached!\n");
+ power_supply_changed(&di->chargalg_psy);
+ } else {
+ dev_dbg(di->dev,
+ " EOC limit reached for the %d"
+ " time, out of %d before EOC\n",
+ di->eoc_cnt,
+ EOC_COND_CNT);
+ }
+ } else {
+ di->eoc_cnt = 0;
+ }
+}
+
+static void init_maxim_chg_curr(struct ab8500_chargalg *di)
+{
+ di->ccm.original_iset =
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+ di->ccm.current_iset =
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+ di->ccm.test_delta_i = di->bat->maxi->charger_curr_step;
+ di->ccm.max_current = di->bat->maxi->chg_curr;
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.level = 0;
+}
+
+/**
+ * ab8500_chargalg_chg_curr_maxim - increases the charger current to
+ * compensate for the system load
+ * @di pointer to the ab8500_chargalg structure
+ *
+ * This maximization function is used to raise the charger current to get the
+ * battery current as close to the optimal value as possible. The battery
+ * current during charging is affected by the system load
+ */
+static enum maxim_ret ab8500_chargalg_chg_curr_maxim(struct ab8500_chargalg *di)
+{
+ int delta_i;
+
+ if (!di->bat->maxi->ena_maxi)
+ return MAXIM_RET_NOACTION;
+
+ delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
+
+ if (di->events.vbus_collapsed) {
+ dev_dbg(di->dev, "Charger voltage has collapsed %d\n",
+ di->ccm.wait_cnt);
+ if (di->ccm.wait_cnt == 0) {
+ dev_dbg(di->dev, "lowering current\n");
+ di->ccm.wait_cnt++;
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.max_current =
+ di->ccm.current_iset - di->ccm.test_delta_i;
+ di->ccm.current_iset = di->ccm.max_current;
+ di->ccm.level--;
+ return MAXIM_RET_CHANGE;
+ } else {
+ dev_dbg(di->dev, "waiting\n");
+ /* Let's go in here twice before lowering curr again */
+ di->ccm.wait_cnt = (di->ccm.wait_cnt + 1) % 3;
+ return MAXIM_RET_NOACTION;
+ }
+ }
+
+ di->ccm.wait_cnt = 0;
+
+ if ((di->batt_data.inst_curr > di->ccm.original_iset)) {
+ dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
+ " (limit %dmA) (current iset: %dmA)!\n",
+ di->batt_data.inst_curr, di->ccm.original_iset,
+ di->ccm.current_iset);
+
+ if (di->ccm.current_iset == di->ccm.original_iset)
+ return MAXIM_RET_NOACTION;
+
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.current_iset = di->ccm.original_iset;
+ di->ccm.level = 0;
+
+ return MAXIM_RET_IBAT_TOO_HIGH;
+ }
+
+ if (delta_i > di->ccm.test_delta_i &&
+ (di->ccm.current_iset + di->ccm.test_delta_i) <
+ di->ccm.max_current) {
+ if (di->ccm.condition_cnt-- == 0) {
+ /* Increse the iset with cco.test_delta_i */
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.current_iset += di->ccm.test_delta_i;
+ di->ccm.level++;
+ dev_dbg(di->dev, " Maximization needed, increase"
+ " with %d mA to %dmA (Optimal ibat: %d)"
+ " Level %d\n",
+ di->ccm.test_delta_i,
+ di->ccm.current_iset,
+ di->ccm.original_iset,
+ di->ccm.level);
+ return MAXIM_RET_CHANGE;
+ } else {
+ return MAXIM_RET_NOACTION;
+ }
+ } else {
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ return MAXIM_RET_NOACTION;
+ }
+}
+
+static void handle_maxim_chg_curr(struct ab8500_chargalg *di)
+{
+ enum maxim_ret ret;
+ int result;
+
+ ret = ab8500_chargalg_chg_curr_maxim(di);
+ switch (ret) {
+ case MAXIM_RET_CHANGE:
+ result = ab8500_chargalg_update_chg_curr(di,
+ di->ccm.current_iset);
+ if (result)
+ dev_err(di->dev, "failed to set chg curr\n");
+ break;
+ case MAXIM_RET_IBAT_TOO_HIGH:
+ result = ab8500_chargalg_update_chg_curr(di,
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+ if (result)
+ dev_err(di->dev, "failed to set chg curr\n");
+ break;
+
+ case MAXIM_RET_NOACTION:
+ default:
+ /* Do nothing..*/
+ break;
+ }
+}
+
+static void ab8500_chargalg_check_safety_timer(struct ab8500_chargalg *di)
+{
+ /*
+ * The safety timer will not be started until the capacity reported
+ * from the FG algorithm is 100%. Then we know that the amount of
+ * charge that's gone into the battery is enough for the battery
+ * to be full. If it has not reached end-of-charge before the safety
+ * timer has expired then we know that the battery is overcharged
+ * and charging will be stopped to protect the battery.
+ */
+ if (di->batt_data.percent == 100 &&
+ !timer_pending(&di->safety_timer)) {
+ ab8500_chargalg_start_safety_timer(di);
+ dev_dbg(di->dev, "start safety timer\n");
+ } else if (di->batt_data.percent != 100 &&
+ timer_pending(&di->safety_timer)) {
+ ab8500_chargalg_stop_safety_timer(di);
+ dev_dbg(di->dev, "stop safety timer\n");
+ }
+}
+
+static int ab8500_chargalg_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab8500_chargalg *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab8500_chargalg_device_info(psy);
+
+ /* For all psy where the driver name appears in any supplied_to */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ /* Initialize chargers if not already done */
+ if (!di->ac_chg &&
+ ext->type == POWER_SUPPLY_TYPE_MAINS)
+ di->ac_chg = psy_to_ux500_charger(ext);
+ else if (!di->usb_chg &&
+ ext->type == POWER_SUPPLY_TYPE_USB)
+ di->usb_chg = psy_to_ux500_charger(ext);
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ /* Battery present */
+ if (ret.intval)
+ di->events.batt_rem = false;
+ /* Battery removed */
+ else
+ di->events.batt_rem = true;
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC disconnected */
+ if (!ret.intval &&
+ (di->chg_info.conn_chg & AC_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg &= ~AC_CHG;
+ }
+ /* AC connected */
+ else if (ret.intval &&
+ !(di->chg_info.conn_chg & AC_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg |= AC_CHG;
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB disconnected */
+ if (!ret.intval &&
+ (di->chg_info.conn_chg & USB_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg &= ~USB_CHG;
+ }
+ /* USB connected */
+ else if (ret.intval &&
+ !(di->chg_info.conn_chg & USB_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg |= USB_CHG;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_ONLINE:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC offline */
+ if (!ret.intval &&
+ (di->chg_info.online_chg & AC_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg &= ~AC_CHG;
+ }
+ /* AC online */
+ else if (ret.intval &&
+ !(di->chg_info.online_chg & AC_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg |= AC_CHG;
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, 0);
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB offline */
+ if (!ret.intval &&
+ (di->chg_info.online_chg & USB_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg &= ~USB_CHG;
+ }
+ /* USB online */
+ else if (ret.intval &&
+ !(di->chg_info.online_chg & USB_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg |= USB_CHG;
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ switch (ret.intval) {
+ case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+ di->events.mainextchnotok = true;
+ di->events.main_thermal_prot = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_DEAD:
+ di->events.ac_wd_expired = true;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.main_thermal_prot = false;
+ break;
+ case POWER_SUPPLY_HEALTH_COLD:
+ case POWER_SUPPLY_HEALTH_OVERHEAT:
+ di->events.main_thermal_prot = true;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+ di->events.main_ovv = true;
+ di->events.mainextchnotok = false;
+ di->events.main_thermal_prot = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_GOOD:
+ di->events.main_thermal_prot = false;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_TYPE_USB:
+ switch (ret.intval) {
+ case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+ di->events.usbchargernotok = true;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_DEAD:
+ di->events.usb_wd_expired = true;
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ break;
+ case POWER_SUPPLY_HEALTH_COLD:
+ case POWER_SUPPLY_HEALTH_OVERHEAT:
+ di->events.usb_thermal_prot = true;
+ di->events.usbchargernotok = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+ di->events.vbus_ovv = true;
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_GOOD:
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.volt = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ di->chg_info.ac_volt = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ di->chg_info.usb_volt = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AVG is used to indicate when we are
+ * in CV mode */
+ if (ret.intval)
+ di->events.ac_cv_active = true;
+ else
+ di->events.ac_cv_active = false;
+
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* AVG is used to indicate when we are
+ * in CV mode */
+ if (ret.intval)
+ di->events.usb_cv_active = true;
+ else
+ di->events.usb_cv_active = false;
+
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (ret.intval)
+ di->events.batt_unknown = false;
+ else
+ di->events.batt_unknown = true;
+
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_TEMP:
+ di->batt_data.temp = ret.intval / 10;
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ di->chg_info.ac_curr =
+ ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ di->chg_info.usb_curr =
+ ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.inst_curr = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.avg_curr = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ if (ret.intval)
+ di->events.vbus_collapsed = true;
+ else
+ di->events.vbus_collapsed = false;
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ di->batt_data.percent = ret.intval;
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab8500_chargalg_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void ab8500_chargalg_external_power_changed(struct power_supply *psy)
+{
+ struct ab8500_chargalg *di = to_ab8500_chargalg_device_info(psy);
+
+ /*
+ * Trigger execution of the algorithm instantly and read
+ * all power_supply properties there instead
+ */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * ab8500_chargalg_algorithm() - Main function for the algorithm
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * This is the main control function for the charging algorithm.
+ * It is called periodically or when something happens that will
+ * trigger a state change
+ */
+static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
+{
+ int charger_status;
+
+ /* Collect data from all power_supply class devices */
+ class_for_each_device(power_supply_class, NULL,
+ &di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
+
+ ab8500_chargalg_end_of_charge(di);
+ ab8500_chargalg_check_temp(di);
+ ab8500_chargalg_check_charger_voltage(di);
+ charger_status = ab8500_chargalg_check_charger_connection(di);
+
+ /*
+ * First check if we have a charger connected.
+ * Also we don't allow charging of unknown batteries if configured
+ * this way
+ */
+ if (!charger_status ||
+ (di->events.batt_unknown && !di->bat->chg_unknown_bat)) {
+ if (di->charge_state != STATE_HANDHELD) {
+ di->events.safety_timer_expired = false;
+ ab8500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+ }
+ }
+
+ /* If suspended, we should not continue checking the flags */
+ else if (di->charge_state == STATE_SUSPENDED_INIT ||
+ di->charge_state == STATE_SUSPENDED) {
+ /* We don't do anything here, just don,t continue */
+ }
+
+ /* Safety timer expiration */
+ else if (di->events.safety_timer_expired) {
+ if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED)
+ ab8500_chargalg_state_to(di,
+ STATE_SAFETY_TIMER_EXPIRED_INIT);
+ }
+ /*
+ * Check if any interrupts has occured
+ * that will prevent us from charging
+ */
+
+ /* Battery removed */
+ else if (di->events.batt_rem) {
+ if (di->charge_state != STATE_BATT_REMOVED)
+ ab8500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
+ }
+ /* Main or USB charger not ok. */
+ else if (di->events.mainextchnotok || di->events.usbchargernotok) {
+ /*
+ * If vbus_collapsed is set, we have to lower the charger
+ * current, which is done in the normal state below
+ */
+ if (di->charge_state != STATE_CHG_NOT_OK &&
+ !di->events.vbus_collapsed)
+ ab8500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
+ }
+ /* VBUS, Main or VBAT OVV. */
+ else if (di->events.vbus_ovv ||
+ di->events.main_ovv ||
+ di->events.batt_ovv ||
+ !di->chg_info.usb_chg_ok ||
+ !di->chg_info.ac_chg_ok) {
+ if (di->charge_state != STATE_OVV_PROTECT)
+ ab8500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
+ }
+ /* USB Thermal, stop charging */
+ else if (di->events.main_thermal_prot ||
+ di->events.usb_thermal_prot) {
+ if (di->charge_state != STATE_HW_TEMP_PROTECT)
+ ab8500_chargalg_state_to(di,
+ STATE_HW_TEMP_PROTECT_INIT);
+ }
+ /* Battery temp over/under */
+ else if (di->events.btemp_underover) {
+ if (di->charge_state != STATE_TEMP_UNDEROVER)
+ ab8500_chargalg_state_to(di,
+ STATE_TEMP_UNDEROVER_INIT);
+ }
+ /* Watchdog expired */
+ else if (di->events.ac_wd_expired ||
+ di->events.usb_wd_expired) {
+ if (di->charge_state != STATE_WD_EXPIRED)
+ ab8500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
+ }
+ /* Battery temp high/low */
+ else if (di->events.btemp_lowhigh) {
+ if (di->charge_state != STATE_TEMP_LOWHIGH)
+ ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
+ }
+
+ dev_dbg(di->dev,
+ "[CHARGALG] Vb %d Ib_avg %d Ib_inst %d Tb %d Cap %d Maint %d "
+ "State %s Active_chg %d Chg_status %d AC %d USB %d "
+ "AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d "
+ "USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n",
+ di->batt_data.volt,
+ di->batt_data.avg_curr,
+ di->batt_data.inst_curr,
+ di->batt_data.temp,
+ di->batt_data.percent,
+ di->maintenance_chg,
+ states[di->charge_state],
+ di->chg_info.charger_type,
+ di->charge_status,
+ di->chg_info.conn_chg & AC_CHG,
+ di->chg_info.conn_chg & USB_CHG,
+ di->chg_info.online_chg & AC_CHG,
+ di->chg_info.online_chg & USB_CHG,
+ di->events.ac_cv_active,
+ di->events.usb_cv_active,
+ di->chg_info.ac_curr,
+ di->chg_info.usb_curr,
+ di->chg_info.ac_vset,
+ di->chg_info.ac_iset,
+ di->chg_info.usb_vset,
+ di->chg_info.usb_iset);
+
+ switch (di->charge_state) {
+ case STATE_HANDHELD_INIT:
+ ab8500_chargalg_stop_charging(di);
+ di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ ab8500_chargalg_state_to(di, STATE_HANDHELD);
+ /* Intentional fallthrough */
+
+ case STATE_HANDHELD:
+ break;
+
+ case STATE_SUSPENDED_INIT:
+ if (di->susp_status.ac_suspended)
+ ab8500_chargalg_ac_en(di, false, 0, 0);
+ if (di->susp_status.usb_suspended)
+ ab8500_chargalg_usb_en(di, false, 0, 0);
+ ab8500_chargalg_stop_safety_timer(di);
+ ab8500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ di->maintenance_chg = false;
+ ab8500_chargalg_state_to(di, STATE_SUSPENDED);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough */
+
+ case STATE_SUSPENDED:
+ /* CHARGING is suspended */
+ break;
+
+ case STATE_BATT_REMOVED_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_BATT_REMOVED);
+ /* Intentional fallthrough */
+
+ case STATE_BATT_REMOVED:
+ if (!di->events.batt_rem)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_HW_TEMP_PROTECT_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
+ /* Intentional fallthrough */
+
+ case STATE_HW_TEMP_PROTECT:
+ if (!di->events.main_thermal_prot &&
+ !di->events.usb_thermal_prot)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_OVV_PROTECT_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_OVV_PROTECT);
+ /* Intentional fallthrough */
+
+ case STATE_OVV_PROTECT:
+ if (!di->events.vbus_ovv &&
+ !di->events.main_ovv &&
+ !di->events.batt_ovv &&
+ di->chg_info.usb_chg_ok &&
+ di->chg_info.ac_chg_ok)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_CHG_NOT_OK_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_CHG_NOT_OK);
+ /* Intentional fallthrough */
+
+ case STATE_CHG_NOT_OK:
+ if (!di->events.mainextchnotok &&
+ !di->events.usbchargernotok)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_SAFETY_TIMER_EXPIRED_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
+ /* Intentional fallthrough */
+
+ case STATE_SAFETY_TIMER_EXPIRED:
+ /* We exit this state when charger is removed */
+ break;
+
+ case STATE_NORMAL_INIT:
+ ab8500_chargalg_start_charging(di,
+ di->bat->bat_type[di->bat->batt_id].normal_vol_lvl,
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+ ab8500_chargalg_state_to(di, STATE_NORMAL);
+ ab8500_chargalg_stop_maintenance_timer(di);
+ init_maxim_chg_curr(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ di->eoc_cnt = 0;
+ di->maintenance_chg = false;
+ power_supply_changed(&di->chargalg_psy);
+
+ break;
+
+ case STATE_NORMAL:
+ handle_maxim_chg_curr(di);
+ if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
+ di->maintenance_chg) {
+ if (di->bat->no_maintenance)
+ ab8500_chargalg_state_to(di,
+ STATE_WAIT_FOR_RECHARGE_INIT);
+ else
+ ab8500_chargalg_state_to(di,
+ STATE_MAINTENANCE_A_INIT);
+ }
+ /* Check whether we should start the safety timer or not */
+ ab8500_chargalg_check_safety_timer(di);
+ break;
+
+ /* This state will be used when the maintenance state is disabled */
+ case STATE_WAIT_FOR_RECHARGE_INIT:
+ ab8500_chargalg_hold_charging(di);
+ ab8500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
+ di->rch_cnt = RCH_COND_CNT;
+ /* Intentional fallthrough */
+
+ case STATE_WAIT_FOR_RECHARGE:
+ if (di->batt_data.volt <=
+ di->bat->bat_type[di->bat->batt_id].recharge_vol) {
+ if (di->rch_cnt-- == 0)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ } else
+ di->rch_cnt = RCH_COND_CNT;
+ break;
+
+ case STATE_MAINTENANCE_A_INIT:
+ ab8500_chargalg_stop_safety_timer(di);
+ ab8500_chargalg_start_maintenance_timer(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_chg_timer_h);
+ ab8500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_cur_lvl);
+ ab8500_chargalg_state_to(di, STATE_MAINTENANCE_A);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough*/
+
+ case STATE_MAINTENANCE_A:
+ if (di->events.maintenance_timer_expired) {
+ ab8500_chargalg_stop_maintenance_timer(di);
+ ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
+ }
+ break;
+
+ case STATE_MAINTENANCE_B_INIT:
+ ab8500_chargalg_start_maintenance_timer(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_chg_timer_h);
+ ab8500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_cur_lvl);
+ ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough*/
+
+ case STATE_MAINTENANCE_B:
+ if (di->events.maintenance_timer_expired) {
+ ab8500_chargalg_stop_maintenance_timer(di);
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ }
+ break;
+
+ case STATE_TEMP_LOWHIGH_INIT:
+ ab8500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].low_high_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].low_high_cur_lvl);
+ ab8500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough */
+
+ case STATE_TEMP_LOWHIGH:
+ if (!di->events.btemp_lowhigh)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_WD_EXPIRED_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_WD_EXPIRED);
+ /* Intentional fallthrough */
+
+ case STATE_WD_EXPIRED:
+ if (!di->events.ac_wd_expired &&
+ !di->events.usb_wd_expired)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_TEMP_UNDEROVER_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
+ /* Intentional fallthrough */
+
+ case STATE_TEMP_UNDEROVER:
+ if (!di->events.btemp_underover)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+ }
+
+ /* Start charging directly if the new state is a charge state */
+ if (di->charge_state == STATE_NORMAL_INIT ||
+ di->charge_state == STATE_MAINTENANCE_A_INIT ||
+ di->charge_state == STATE_MAINTENANCE_B_INIT)
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * ab8500_chargalg_periodic_work() - Periodic work for the algorithm
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for the charging algorithm
+ */
+static void ab8500_chargalg_periodic_work(struct work_struct *work)
+{
+ struct ab8500_chargalg *di = container_of(work,
+ struct ab8500_chargalg, chargalg_periodic_work.work);
+
+ ab8500_chargalg_algorithm(di);
+
+ /*
+ * If a charger is connected then the battery has to be monitored
+ * frequently, else the work can be delayed.
+ */
+ if (di->chg_info.conn_chg)
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_periodic_work,
+ di->bat->interval_charging * HZ);
+ else
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_periodic_work,
+ di->bat->interval_not_charging * HZ);
+}
+
+/**
+ * ab8500_chargalg_wd_work() - periodic work to kick the charger watchdog
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for kicking the charger watchdog
+ */
+static void ab8500_chargalg_wd_work(struct work_struct *work)
+{
+ int ret;
+ struct ab8500_chargalg *di = container_of(work,
+ struct ab8500_chargalg, chargalg_wd_work.work);
+
+ dev_dbg(di->dev, "ab8500_chargalg_wd_work\n");
+
+ ret = ab8500_chargalg_kick_watchdog(di);
+ if (ret < 0)
+ dev_err(di->dev, "failed to kick watchdog\n");
+
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, CHG_WD_INTERVAL);
+}
+
+/**
+ * ab8500_chargalg_work() - Work to run the charging algorithm instantly
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for calling the charging algorithm
+ */
+static void ab8500_chargalg_work(struct work_struct *work)
+{
+ struct ab8500_chargalg *di = container_of(work,
+ struct ab8500_chargalg, chargalg_work);
+
+ ab8500_chargalg_algorithm(di);
+}
+
+/**
+ * ab8500_chargalg_get_property() - get the chargalg properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * chargalg properties by reading the sysfs files.
+ * status: charging/discharging/full/unknown
+ * health: health of the battery
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_chargalg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_chargalg *di;
+
+ di = to_ab8500_chargalg_device_info(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = di->charge_status;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->events.batt_ovv) {
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else if (di->events.btemp_underover) {
+ if (di->batt_data.temp <= di->bat->temp_under)
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ } else {
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Exposure to the sysfs interface */
+
+/**
+ * ab8500_chargalg_sysfs_charger() - sysfs store operations
+ * @kobj: pointer to the struct kobject
+ * @attr: pointer to the struct attribute
+ * @buf: buffer that holds the parameter passed from userspace
+ * @length: length of the parameter passed
+ *
+ * Returns length of the buffer(input taken from user space) on success
+ * else error code on failure
+ * The operation to be performed on passing the parameters from the user space.
+ */
+static ssize_t ab8500_chargalg_sysfs_charger(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t length)
+{
+ struct ab8500_chargalg *di = container_of(kobj,
+ struct ab8500_chargalg, chargalg_kobject);
+ long int param;
+ int ac_usb;
+ int ret;
+ char entry = *attr->name;
+
+ switch (entry) {
+ case 'c':
+ ret = strict_strtol(buf, 10, &param);
+ if (ret < 0)
+ return ret;
+
+ ac_usb = param;
+ switch (ac_usb) {
+ case 0:
+ /* Disable charging */
+ di->susp_status.ac_suspended = true;
+ di->susp_status.usb_suspended = true;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ case 1:
+ /* Enable AC Charging */
+ di->susp_status.ac_suspended = false;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ case 2:
+ /* Enable USB charging */
+ di->susp_status.usb_suspended = false;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ default:
+ dev_info(di->dev, "Wrong input\n"
+ "Enter 0. Disable AC/USB Charging\n"
+ "1. Enable AC charging\n"
+ "2. Enable USB Charging\n");
+ };
+ break;
+ };
+ return strlen(buf);
+}
+
+static struct attribute ab8500_chargalg_en_charger = \
+{
+ .name = "chargalg",
+ .mode = S_IWUGO,
+};
+
+static struct attribute *ab8500_chargalg_chg[] = {
+ &ab8500_chargalg_en_charger,
+ NULL
+};
+
+const struct sysfs_ops ab8500_chargalg_sysfs_ops = {
+ .store = ab8500_chargalg_sysfs_charger,
+};
+
+static struct kobj_type ab8500_chargalg_ktype = {
+ .sysfs_ops = &ab8500_chargalg_sysfs_ops,
+ .default_attrs = ab8500_chargalg_chg,
+};
+
+/**
+ * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di: pointer to the struct ab8500_chargalg
+ *
+ * This function removes the entry in sysfs.
+ */
+static void ab8500_chargalg_sysfs_exit(struct ab8500_chargalg *di)
+{
+ kobject_del(&di->chargalg_kobject);
+}
+
+/**
+ * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * @di: pointer to the struct ab8500_chargalg
+ *
+ * This function adds an entry in sysfs.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_chargalg_sysfs_init(struct ab8500_chargalg *di)
+{
+ int ret = 0;
+
+ ret = kobject_init_and_add(&di->chargalg_kobject,
+ &ab8500_chargalg_ktype,
+ NULL, "ab8500_chargalg");
+ if (ret < 0)
+ dev_err(di->dev, "failed to create sysfs entry\n");
+
+ return ret;
+}
+/* Exposure to the sysfs interface <<END>> */
+
+#if defined(CONFIG_PM)
+static int ab8500_chargalg_resume(struct platform_device *pdev)
+{
+ struct ab8500_chargalg *di = platform_get_drvdata(pdev);
+
+ /* Kick charger watchdog if charging (any charger online) */
+ if (di->chg_info.online_chg)
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
+
+ /*
+ * Run the charging algorithm directly to be sure we don't
+ * do it too seldom
+ */
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+ return 0;
+}
+
+static int ab8500_chargalg_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab8500_chargalg *di = platform_get_drvdata(pdev);
+
+ if (di->chg_info.online_chg)
+ cancel_delayed_work_sync(&di->chargalg_wd_work);
+
+ cancel_delayed_work_sync(&di->chargalg_periodic_work);
+
+ return 0;
+}
+#else
+#define ab8500_chargalg_suspend NULL
+#define ab8500_chargalg_resume NULL
+#endif
+
+static int __devexit ab8500_chargalg_remove(struct platform_device *pdev)
+{
+ struct ab8500_chargalg *di = platform_get_drvdata(pdev);
+
+ /* sysfs interface to enable/disbale charging from user space */
+ ab8500_chargalg_sysfs_exit(di);
+
+ /* Delete the work queue */
+ destroy_workqueue(di->chargalg_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->chargalg_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab8500_chargalg_probe(struct platform_device *pdev)
+{
+ struct ab8500_platform_data *plat;
+ int ret = 0;
+
+ struct ab8500_chargalg *di =
+ kzalloc(sizeof(struct ab8500_chargalg), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+
+ plat = dev_get_platdata(di->parent->dev);
+
+ /* get chargalg specific platform data */
+ if (!plat->chargalg) {
+ dev_err(di->dev, "no chargalg platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->pdata = plat->chargalg;
+
+ /* get battery specific platform data */
+ if (!plat->battery) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->bat = plat->battery;
+
+ /* chargalg supply */
+ di->chargalg_psy.name = "ab8500_chargalg";
+ di->chargalg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->chargalg_psy.properties = ab8500_chargalg_props;
+ di->chargalg_psy.num_properties = ARRAY_SIZE(ab8500_chargalg_props);
+ di->chargalg_psy.get_property = ab8500_chargalg_get_property;
+ di->chargalg_psy.supplied_to = di->pdata->supplied_to;
+ di->chargalg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->chargalg_psy.external_power_changed =
+ ab8500_chargalg_external_power_changed;
+
+ /* Initilialize safety timer */
+ init_timer(&di->safety_timer);
+ di->safety_timer.function = ab8500_chargalg_safety_timer_expired;
+ di->safety_timer.data = (unsigned long) di;
+
+ /* Initilialize maintenance timer */
+ init_timer(&di->maintenance_timer);
+ di->maintenance_timer.function =
+ ab8500_chargalg_maintenance_timer_expired;
+ di->maintenance_timer.data = (unsigned long) di;
+
+ /* Create a work queue for the chargalg */
+ di->chargalg_wq =
+ create_singlethread_workqueue("ab8500_chargalg_wq");
+ if (di->chargalg_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for chargalg */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_periodic_work,
+ ab8500_chargalg_periodic_work);
+ INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_wd_work,
+ ab8500_chargalg_wd_work);
+
+ /* Init work for chargalg */
+ INIT_WORK(&di->chargalg_work, ab8500_chargalg_work);
+
+ /* To detect charger at startup */
+ di->chg_info.prev_conn_chg = -1;
+
+ /* Register chargalg power supply class */
+ ret = power_supply_register(di->dev, &di->chargalg_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register chargalg psy\n");
+ goto free_chargalg_wq;
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ /* sysfs interface to enable/disable charging from user space */
+ ret = ab8500_chargalg_sysfs_init(di);
+ if (ret) {
+ dev_err(di->dev, "failed to create sysfs entry\n");
+ goto free_psy;
+ }
+
+ /* Run the charging algorithm */
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+ return ret;
+
+free_psy:
+ power_supply_unregister(&di->chargalg_psy);
+free_chargalg_wq:
+ destroy_workqueue(di->chargalg_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab8500_chargalg_driver = {
+ .probe = ab8500_chargalg_probe,
+ .remove = __devexit_p(ab8500_chargalg_remove),
+ .suspend = ab8500_chargalg_suspend,
+ .resume = ab8500_chargalg_resume,
+ .driver = {
+ .name = "ab8500-chargalg",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_chargalg_init(void)
+{
+ return platform_driver_register(&ab8500_chargalg_driver);
+}
+
+static void __exit ab8500_chargalg_exit(void)
+{
+ platform_driver_unregister(&ab8500_chargalg_driver);
+}
+
+module_init(ab8500_chargalg_init);
+module_exit(ab8500_chargalg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab8500-chargalg");
+MODULE_DESCRIPTION("AB8500 battery temperature driver");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
new file mode 100644
index 00000000000..1b827a980ce
--- /dev/null
+++ b/drivers/power/ab8500_charger.c
@@ -0,0 +1,2698 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Charger driver for AB8500
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: Karl Komierowski <karl.komierowski@stericsson.com>
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500/bm.h>
+#include <linux/mfd/ab8500/gpadc.h>
+#include <linux/mfd/ab8500/ux500_chargalg.h>
+#include <linux/usb/otg.h>
+
+/* Charger constants */
+#define NO_PW_CONN 0
+#define AC_PW_CONN 1
+#define USB_PW_CONN 2
+
+#define MAIN_WDOG_ENA 0x01
+#define MAIN_WDOG_KICK 0x02
+#define MAIN_WDOG_DIS 0x00
+#define CHARG_WD_KICK 0x01
+#define MAIN_CH_ENA 0x01
+#define MAIN_CH_NO_OVERSHOOT_ENA_N 0x02
+#define USB_CH_ENA 0x01
+#define USB_CHG_NO_OVERSHOOT_ENA_N 0x02
+#define MAIN_CH_DET 0x01
+#define MAIN_CH_CV_ON 0x04
+#define USB_CH_CV_ON 0x08
+#define VBUS_DET_DBNC100 0x02
+#define VBUS_DET_DBNC1 0x01
+#define OTP_ENABLE_WD 0x01
+
+#define MAIN_CH_INPUT_CURR_SHIFT 4
+#define VBUS_IN_CURR_LIM_SHIFT 4
+
+#define LED_INDICATOR_PWM_ENA 0x01
+#define LED_INDICATOR_PWM_DIS 0x00
+#define LED_IND_CUR_5MA 0x04
+#define LED_INDICATOR_PWM_DUTY_252_256 0xBF
+
+/* HW failure constants */
+#define MAIN_CH_TH_PROT 0x02
+#define VBUS_CH_NOK 0x08
+#define USB_CH_TH_PROT 0x02
+#define VBUS_OVV_TH 0x01
+#define MAIN_CH_NOK 0x01
+#define VBUS_DET 0x80
+
+/* UsbLineStatus register bit masks */
+#define AB8500_USB_LINK_STATUS 0x78
+#define AB8500_STD_HOST_SUSP 0x18
+
+/* Watchdog timeout constant */
+#define WD_TIMER 0x30 /* 4min */
+#define WD_KICK_INTERVAL (60 * HZ)
+
+/* Lowest charger voltage is 3.39V -> 0x4E */
+#define LOW_VOLT_REG 0x4E
+
+/* UsbLineStatus register - usb types */
+enum ab8500_charger_link_status {
+ USB_STAT_NOT_CONFIGURED,
+ USB_STAT_STD_HOST_NC,
+ USB_STAT_STD_HOST_C_NS,
+ USB_STAT_STD_HOST_C_S,
+ USB_STAT_HOST_CHG_NM,
+ USB_STAT_HOST_CHG_HS,
+ USB_STAT_HOST_CHG_HS_CHIRP,
+ USB_STAT_DEDICATED_CHG,
+ USB_STAT_ACA_RID_A,
+ USB_STAT_ACA_RID_B,
+ USB_STAT_ACA_RID_C_NM,
+ USB_STAT_ACA_RID_C_HS,
+ USB_STAT_ACA_RID_C_HS_CHIRP,
+ USB_STAT_HM_IDGND,
+ USB_STAT_RESERVED,
+ USB_STAT_NOT_VALID_LINK,
+};
+
+enum ab8500_usb_state {
+ AB8500_BM_USB_STATE_RESET_HS, /* HighSpeed Reset */
+ AB8500_BM_USB_STATE_RESET_FS, /* FullSpeed/LowSpeed Reset */
+ AB8500_BM_USB_STATE_CONFIGURED,
+ AB8500_BM_USB_STATE_SUSPEND,
+ AB8500_BM_USB_STATE_RESUME,
+ AB8500_BM_USB_STATE_MAX,
+};
+
+/* VBUS input current limits supported in AB8500 in mA */
+#define USB_CH_IP_CUR_LVL_0P05 50
+#define USB_CH_IP_CUR_LVL_0P09 98
+#define USB_CH_IP_CUR_LVL_0P19 193
+#define USB_CH_IP_CUR_LVL_0P29 290
+#define USB_CH_IP_CUR_LVL_0P38 380
+#define USB_CH_IP_CUR_LVL_0P45 450
+#define USB_CH_IP_CUR_LVL_0P5 500
+#define USB_CH_IP_CUR_LVL_0P6 600
+#define USB_CH_IP_CUR_LVL_0P7 700
+#define USB_CH_IP_CUR_LVL_0P8 800
+#define USB_CH_IP_CUR_LVL_0P9 900
+#define USB_CH_IP_CUR_LVL_1P0 1000
+#define USB_CH_IP_CUR_LVL_1P1 1100
+#define USB_CH_IP_CUR_LVL_1P3 1300
+#define USB_CH_IP_CUR_LVL_1P4 1400
+#define USB_CH_IP_CUR_LVL_1P5 1500
+
+#define VBAT_3700 3700
+
+#define to_ab8500_charger_usb_device_info(x) container_of((x), \
+ struct ab8500_charger, usb_chg)
+#define to_ab8500_charger_ac_device_info(x) container_of((x), \
+ struct ab8500_charger, ac_chg)
+
+/**
+ * struct ab8500_charger_interrupts - ab8500 interupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab8500_charger_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab8500_charger_info {
+ int charger_connected;
+ int charger_online;
+ int charger_voltage;
+ int cv_active;
+ bool wd_expired;
+};
+
+struct ab8500_charger_event_flags {
+ bool mainextchnotok;
+ bool main_thermal_prot;
+ bool usb_thermal_prot;
+ bool vbus_ovv;
+ bool usbchargernotok;
+ bool chgwdexp;
+ bool vbus_collapse;
+};
+
+struct ab8500_charger_usb_state {
+ bool usb_changed;
+ int usb_current;
+ enum ab8500_usb_state state;
+ spinlock_t usb_lock;
+};
+
+/**
+ * struct ab8500_charger - ab8500 Charger device information
+ * @dev: Pointer to the structure device
+ * @chip_id: Chip-Id of the AB8500
+ * @max_usb_in_curr: Max USB charger input current
+ * @vbus_detected: VBUS detected
+ * @vbus_detected_start:
+ * VBUS detected during startup
+ * @ac_conn: This will be true when the AC charger has been plugged
+ * @vddadc_en: Indicate if VDD ADC supply is enabled from this driver
+ * @vbat Battery voltage
+ * @old_vbat Previously measured battery voltage
+ * @parent: Pointer to the struct ab8500
+ * @gpadc: Pointer to the struct gpadc
+ * @pdata: Pointer to the ab8500_charger platform data
+ * @bat: Pointer to the ab8500_bm platform data
+ * @flags: Structure for information about events triggered
+ * @usb_state: Structure for usb stack information
+ * @ac_chg: AC charger power supply
+ * @usb_chg: USB charger power supply
+ * @ac: Structure that holds the AC charger properties
+ * @usb: Structure that holds the USB charger properties
+ * @regu: Pointer to the struct regulator
+ * @charger_wq: Work queue for the IRQs and checking HW state
+ * @check_vbat_work Work for checking vbat threshold to adjust vbus current
+ * @check_hw_failure_work: Work for checking HW state
+ * @check_usbchgnotok_work: Work for checking USB charger not ok status
+ * @kick_wd_work: Work for kicking the charger watchdog in case
+ * of ABB rev 1.* due to the watchog logic bug
+ * @ac_work: Work for checking AC charger connection
+ * @detect_usb_type_work: Work for detecting the USB type connected
+ * @usb_link_status_work: Work for checking the new USB link status
+ * @usb_state_changed_work: Work for checking USB state
+ * @check_main_thermal_prot_work:
+ * Work for checking Main thermal status
+ * @check_usb_thermal_prot_work:
+ * Work for checking USB thermal status
+ */
+struct ab8500_charger {
+ struct device *dev;
+ u8 chip_id;
+ int max_usb_in_curr;
+ bool vbus_detected;
+ bool vbus_detected_start;
+ bool ac_conn;
+ bool vddadc_en;
+ int vbat;
+ int old_vbat;
+ struct ab8500 *parent;
+ struct ab8500_gpadc *gpadc;
+ struct ab8500_charger_platform_data *pdata;
+ struct ab8500_bm_data *bat;
+ struct ab8500_charger_event_flags flags;
+ struct ab8500_charger_usb_state usb_state;
+ struct ux500_charger ac_chg;
+ struct ux500_charger usb_chg;
+ struct ab8500_charger_info ac;
+ struct ab8500_charger_info usb;
+ struct regulator *regu;
+ struct workqueue_struct *charger_wq;
+ struct delayed_work check_vbat_work;
+ struct delayed_work check_hw_failure_work;
+ struct delayed_work check_usbchgnotok_work;
+ struct delayed_work kick_wd_work;
+ struct work_struct ac_work;
+ struct work_struct detect_usb_type_work;
+ struct work_struct usb_link_status_work;
+ struct work_struct usb_state_changed_work;
+ struct work_struct check_main_thermal_prot_work;
+ struct work_struct check_usb_thermal_prot_work;
+ struct otg_transceiver *otg;
+ struct notifier_block nb;
+};
+
+/* AC properties */
+static enum power_supply_property ab8500_charger_ac_props[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+/* USB properties */
+static enum power_supply_property ab8500_charger_usb_props[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+/**
+ * ab8500_charger_get_ac_voltage() - get ac charger voltage
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Returns ac charger voltage (on success)
+ */
+static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di)
+{
+ int vch;
+
+ /* Only measure voltage if the charger is connected */
+ if (di->ac.charger_connected) {
+ vch = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_V);
+ if (vch < 0)
+ dev_err(di->dev, "%s gpadc conv failed,\n", __func__);
+ } else {
+ vch = 0;
+ }
+ return vch;
+}
+
+/**
+ * ab8500_charger_ac_cv() - check if the main charger is in CV mode
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Returns ac charger CV mode (on success) else error code
+ */
+static int ab8500_charger_ac_cv(struct ab8500_charger *di)
+{
+ u8 val;
+ int ret = 0;
+
+ /* Only check CV mode if the charger is online */
+ if (di->ac.charger_online) {
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_STATUS1_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return 0;
+ }
+
+ if (val & MAIN_CH_CV_ON)
+ ret = 1;
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_get_vbus_voltage() - get vbus voltage
+ * @di: pointer to the ab8500_charger structure
+ *
+ * This function returns the vbus voltage.
+ * Returns vbus voltage (on success)
+ */
+static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
+{
+ int vch;
+
+ /* Only measure voltage if the charger is connected */
+ if (di->usb.charger_connected) {
+ vch = ab8500_gpadc_convert(di->gpadc, VBUS_V);
+ if (vch < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ vch = 0;
+ }
+ return vch;
+}
+
+/**
+ * ab8500_charger_get_usb_current() - get usb charger current
+ * @di: pointer to the ab8500_charger structure
+ *
+ * This function returns the usb charger current.
+ * Returns usb current (on success) and error code on failure
+ */
+static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
+{
+ int ich;
+
+ /* Only measure current if the charger is online */
+ if (di->usb.charger_online) {
+ ich = ab8500_gpadc_convert(di->gpadc, USB_CHARGER_C);
+ if (ich < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ ich = 0;
+ }
+ return ich;
+}
+
+/**
+ * ab8500_charger_get_ac_current() - get ac charger current
+ * @di: pointer to the ab8500_charger structure
+ *
+ * This function returns the ac charger current.
+ * Returns ac current (on success) and error code on failure.
+ */
+static int ab8500_charger_get_ac_current(struct ab8500_charger *di)
+{
+ int ich;
+
+ /* Only measure current if the charger is online */
+ if (di->ac.charger_online) {
+ ich = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_C);
+ if (ich < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ ich = 0;
+ }
+ return ich;
+}
+
+/**
+ * ab8500_charger_usb_cv() - check if the usb charger is in CV mode
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Returns ac charger CV mode (on success) else error code
+ */
+static int ab8500_charger_usb_cv(struct ab8500_charger *di)
+{
+ int ret;
+ u8 val;
+
+ /* Only check CV mode if the charger is online */
+ if (di->usb.charger_online) {
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_USBCH_STAT1_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return 0;
+ }
+
+ if (val & USB_CH_CV_ON)
+ ret = 1;
+ else
+ ret = 0;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_detect_chargers() - Detect the connected chargers
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Returns the type of charger connected.
+ * For USB it will not mean we can actually charge from it
+ * but that there is a USB cable connected that we have to
+ * identify. This is used during startup when we don't get
+ * interrupts of the charger detection
+ *
+ * Returns an integer value, that means,
+ * NO_PW_CONN no power supply is connected
+ * AC_PW_CONN if the AC power supply is connected
+ * USB_PW_CONN if the USB power supply is connected
+ * AC_PW_CONN + USB_PW_CONN if USB and AC power supplies are both connected
+ */
+static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
+{
+ int result = NO_PW_CONN;
+ int ret;
+ u8 val;
+
+ /* Check for AC charger */
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_STATUS1_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ goto out;
+ }
+
+ if (val & MAIN_CH_DET)
+ result = AC_PW_CONN;
+
+ /* Check for USB charger */
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_USBCH_STAT1_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ goto out;
+ }
+
+ if ((val & VBUS_DET_DBNC1) && (val & VBUS_DET_DBNC100))
+ result |= USB_PW_CONN;
+
+ /*
+ * Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
+ * will be triggered everytime we enable the VDD ADC supply.
+ * This will turn off charging for a short while.
+ * It can be avoided by having the supply on when
+ * there is a charger connected. Normally the VDD ADC supply
+ * is enabled everytime a GPADC conversion is triggered. We will
+ * force it to be enabled from this driver to have
+ * the GPADC module independant of the AB8500 chargers
+ */
+ if (result == NO_PW_CONN && di->vddadc_en) {
+ regulator_disable(di->regu);
+ di->vddadc_en = false;
+ } else if ((result & AC_PW_CONN || result & USB_PW_CONN) &&
+ !di->vddadc_en) {
+ regulator_enable(di->regu);
+ di->vddadc_en = true;
+ }
+
+ return result;
+
+out:
+ if (di->vddadc_en) {
+ regulator_disable(di->regu);
+ di->vddadc_en = false;
+ }
+ return ret;
+}
+
+/**
+ * ab8500_charger_max_usb_curr() - get the max curr for the USB type
+ * @di: pointer to the ab8500_charger structure
+ * @link_status: the identified USB type
+ *
+ * Get the maximum current that is allowed to be drawn from the host
+ * based on the USB type.
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
+ enum ab8500_charger_link_status link_status)
+{
+ int ret = 0;
+
+ switch (link_status) {
+ case USB_STAT_STD_HOST_NC:
+ case USB_STAT_STD_HOST_C_NS:
+ case USB_STAT_STD_HOST_C_S:
+ dev_dbg(di->dev, "USB Type - Standard host is "
+ "detected through USB driver\n");
+ ret = -1;
+ break;
+ case USB_STAT_HOST_CHG_HS_CHIRP:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+ case USB_STAT_HOST_CHG_HS:
+ case USB_STAT_ACA_RID_C_HS:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9;
+ break;
+ case USB_STAT_ACA_RID_A:
+ /*
+ * Dedicated charger level minus maximum current accessory
+ * can consume (300mA). Closest level is 1100mA
+ */
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P1;
+ break;
+ case USB_STAT_ACA_RID_B:
+ /*
+ * Dedicated charger level minus 120mA (20mA for ACA and
+ * 100mA for potential accessory). Closest level is 1300mA
+ */
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3;
+ break;
+ case USB_STAT_DEDICATED_CHG:
+ case USB_STAT_HOST_CHG_NM:
+ case USB_STAT_ACA_RID_C_HS_CHIRP:
+ case USB_STAT_ACA_RID_C_NM:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+ break;
+ case USB_STAT_RESERVED:
+ /*
+ * This state is used to indicate that VBUS has dropped below
+ * the detection level 4 times in a row. This is due to the
+ * charger output current is set to high making the charger
+ * voltage collapse. This have to be propagated through to
+ * chargalg. This is done using the property
+ * POWER_SUPPLY_PROP_CURRENT_AVG = 1
+ */
+ di->flags.vbus_collapse = true;
+ dev_dbg(di->dev, "USB Type - USB_STAT_RESERVED "
+ "VBUS has collapsed\n");
+ ret = -1;
+ break;
+ case USB_STAT_HM_IDGND:
+ case USB_STAT_NOT_CONFIGURED:
+ case USB_STAT_NOT_VALID_LINK:
+ dev_err(di->dev, "USB Type - Charging not allowed\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ ret = -ENXIO;
+ break;
+ default:
+ dev_err(di->dev, "USB Type - Unknown\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ ret = -ENXIO;
+ break;
+ };
+
+ dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
+ link_status, di->max_usb_in_curr);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_read_usb_type() - read the type of usb connected
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Detect the type of the plugged USB
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_charger_read_usb_type(struct ab8500_charger *di)
+{
+ int ret;
+ u8 val;
+
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+ ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+ AB8500_USB_LINE_STAT_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+
+ /* get the USB type */
+ val = (val & AB8500_USB_LINK_STATUS) >> 3;
+ ret = ab8500_charger_max_usb_curr(di,
+ (enum ab8500_charger_link_status) val);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_detect_usb_type() - get the type of usb connected
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Detect the type of the plugged USB
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_charger_detect_usb_type(struct ab8500_charger *di)
+{
+ int i, ret;
+ u8 val;
+
+ /*
+ * On getting the VBUS rising edge detect interrupt there
+ * is a 250ms delay after which the register UsbLineStatus
+ * is filled with valid data.
+ */
+ for (i = 0; i < 10; i++) {
+ msleep(250);
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG,
+ &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+ ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+ AB8500_USB_LINE_STAT_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+ /*
+ * Until the IT source register is read the UsbLineStatus
+ * register is not updated, hence doing the same
+ * Revisit this:
+ */
+
+ /* get the USB type */
+ val = (val & AB8500_USB_LINK_STATUS) >> 3;
+ if (val)
+ break;
+ }
+ ret = ab8500_charger_max_usb_curr(di,
+ (enum ab8500_charger_link_status) val);
+
+ return ret;
+}
+
+/*
+ * This array maps the raw hex value to charger voltage used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_charger_voltage_map[] = {
+ 3500 ,
+ 3525 ,
+ 3550 ,
+ 3575 ,
+ 3600 ,
+ 3625 ,
+ 3650 ,
+ 3675 ,
+ 3700 ,
+ 3725 ,
+ 3750 ,
+ 3775 ,
+ 3800 ,
+ 3825 ,
+ 3850 ,
+ 3875 ,
+ 3900 ,
+ 3925 ,
+ 3950 ,
+ 3975 ,
+ 4000 ,
+ 4025 ,
+ 4050 ,
+ 4060 ,
+ 4070 ,
+ 4080 ,
+ 4090 ,
+ 4100 ,
+ 4110 ,
+ 4120 ,
+ 4130 ,
+ 4140 ,
+ 4150 ,
+ 4160 ,
+ 4170 ,
+ 4180 ,
+ 4190 ,
+ 4200 ,
+ 4210 ,
+ 4220 ,
+ 4230 ,
+ 4240 ,
+ 4250 ,
+ 4260 ,
+ 4270 ,
+ 4280 ,
+ 4290 ,
+ 4300 ,
+ 4310 ,
+ 4320 ,
+ 4330 ,
+ 4340 ,
+ 4350 ,
+ 4360 ,
+ 4370 ,
+ 4380 ,
+ 4390 ,
+ 4400 ,
+ 4410 ,
+ 4420 ,
+ 4430 ,
+ 4440 ,
+ 4450 ,
+ 4460 ,
+ 4470 ,
+ 4480 ,
+ 4490 ,
+ 4500 ,
+ 4510 ,
+ 4520 ,
+ 4530 ,
+ 4540 ,
+ 4550 ,
+ 4560 ,
+ 4570 ,
+ 4580 ,
+ 4590 ,
+ 4600 ,
+};
+
+/*
+ * This array maps the raw hex value to charger current used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_charger_current_map[] = {
+ 100 ,
+ 200 ,
+ 300 ,
+ 400 ,
+ 500 ,
+ 600 ,
+ 700 ,
+ 800 ,
+ 900 ,
+ 1000 ,
+ 1100 ,
+ 1200 ,
+ 1300 ,
+ 1400 ,
+ 1500 ,
+};
+
+/*
+ * This array maps the raw hex value to VBUS input current used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_charger_vbus_in_curr_map[] = {
+ USB_CH_IP_CUR_LVL_0P05,
+ USB_CH_IP_CUR_LVL_0P09,
+ USB_CH_IP_CUR_LVL_0P19,
+ USB_CH_IP_CUR_LVL_0P29,
+ USB_CH_IP_CUR_LVL_0P38,
+ USB_CH_IP_CUR_LVL_0P45,
+ USB_CH_IP_CUR_LVL_0P5,
+ USB_CH_IP_CUR_LVL_0P6,
+ USB_CH_IP_CUR_LVL_0P7,
+ USB_CH_IP_CUR_LVL_0P8,
+ USB_CH_IP_CUR_LVL_0P9,
+ USB_CH_IP_CUR_LVL_1P0,
+ USB_CH_IP_CUR_LVL_1P1,
+ USB_CH_IP_CUR_LVL_1P3,
+ USB_CH_IP_CUR_LVL_1P4,
+ USB_CH_IP_CUR_LVL_1P5,
+};
+
+static int ab8500_voltage_to_regval(int voltage)
+{
+ int i;
+
+ /* Special case for voltage below 3.5V */
+ if (voltage < ab8500_charger_voltage_map[0])
+ return LOW_VOLT_REG;
+
+ for (i = 1; i < ARRAY_SIZE(ab8500_charger_voltage_map); i++) {
+ if (voltage < ab8500_charger_voltage_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab8500_charger_voltage_map) - 1;
+ if (voltage == ab8500_charger_voltage_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab8500_current_to_regval(int curr)
+{
+ int i;
+
+ if (curr < ab8500_charger_current_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_current_map); i++) {
+ if (curr < ab8500_charger_current_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab8500_charger_current_map) - 1;
+ if (curr == ab8500_charger_current_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab8500_vbus_in_curr_to_regval(int curr)
+{
+ int i;
+
+ if (curr < ab8500_charger_vbus_in_curr_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_vbus_in_curr_map); i++) {
+ if (curr < ab8500_charger_vbus_in_curr_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab8500_charger_vbus_in_curr_map) - 1;
+ if (curr == ab8500_charger_vbus_in_curr_map[i])
+ return i;
+ else
+ return -1;
+}
+
+/**
+ * ab8500_charger_get_usb_cur() - get usb current
+ * @di: pointer to the ab8500_charger structre
+ *
+ * The usb stack provides the maximum current that can be drawn from
+ * the standard usb host. This will be in mA.
+ * This function converts current in mA to a value that can be written
+ * to the register. Returns -1 if charging is not allowed
+ */
+static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
+{
+ switch (di->usb_state.usb_current) {
+ case 100:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+ break;
+ case 200:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P19;
+ break;
+ case 300:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P29;
+ break;
+ case 400:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P38;
+ break;
+ case 500:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+ default:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ return -1;
+ break;
+ };
+ return 0;
+}
+
+/**
+ * ab8500_charger_set_vbus_in_curr() - set VBUS input current limit
+ * @di: pointer to the ab8500_charger structure
+ * @ich_in: charger input current limit
+ *
+ * Sets the current that can be drawn from the USB host
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
+ int ich_in)
+{
+ int ret;
+ int input_curr_index;
+ int min_value;
+
+ /* We should always use to lowest current limit */
+ min_value = min(di->bat->chg_params->usb_curr_max, ich_in);
+
+ switch (min_value) {
+ case 100:
+ if (di->vbat < VBAT_3700)
+ min_value = USB_CH_IP_CUR_LVL_0P05;
+ break;
+ case 500:
+ if (di->vbat < VBAT_3700)
+ min_value = USB_CH_IP_CUR_LVL_0P45;
+ break;
+ default:
+ break;
+ }
+
+ input_curr_index = ab8500_vbus_in_curr_to_regval(min_value);
+ if (input_curr_index < 0) {
+ dev_err(di->dev, "VBUS input current limit too high\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_USBCH_IPT_CRNTLVL_REG,
+ input_curr_index << VBUS_IN_CURR_LIM_SHIFT);
+ if (ret)
+ dev_err(di->dev, "%s write failed\n", __func__);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_led_en() - turn on/off chargign led
+ * @di: pointer to the ab8500_charger structure
+ * @on: flag to turn on/off the chargign led
+ *
+ * Power ON/OFF charging LED indication
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_led_en(struct ab8500_charger *di, int on)
+{
+ int ret;
+
+ if (on) {
+ /* Power ON charging LED indicator, set LED current to 5mA */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_LED_INDICATOR_PWM_CTRL,
+ (LED_IND_CUR_5MA | LED_INDICATOR_PWM_ENA));
+ if (ret) {
+ dev_err(di->dev, "Power ON LED failed\n");
+ return ret;
+ }
+ /* LED indicator PWM duty cycle 252/256 */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_LED_INDICATOR_PWM_DUTY,
+ LED_INDICATOR_PWM_DUTY_252_256);
+ if (ret) {
+ dev_err(di->dev, "Set LED PWM duty cycle failed\n");
+ return ret;
+ }
+ } else {
+ /* Power off charging LED indicator */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_LED_INDICATOR_PWM_CTRL,
+ LED_INDICATOR_PWM_DIS);
+ if (ret) {
+ dev_err(di->dev, "Power-off LED failed\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_ac_en() - enable or disable ac charging
+ * @di: pointer to the ab8500_charger structure
+ * @enable: enable/disable flag
+ * @vset: charging voltage
+ * @iset: charging current
+ *
+ * Enable/Disable AC/Mains charging and turns on/off the charging led
+ * respectively.
+ **/
+static int ab8500_charger_ac_en(struct ux500_charger *charger,
+ int enable, int vset, int iset)
+{
+ int ret;
+ int volt_index;
+ int curr_index;
+ int input_curr_index;
+ u8 overshoot = 0;
+
+ struct ab8500_charger *di = to_ab8500_charger_ac_device_info(charger);
+
+ if (enable) {
+ /* Check if AC is connected */
+ if (!di->ac.charger_connected) {
+ dev_err(di->dev, "AC charger not connected\n");
+ return -ENXIO;
+ }
+
+ /* Enable AC charging */
+ dev_dbg(di->dev, "Enable AC: %dmV %dmA\n", vset, iset);
+
+ /* Check if the requested voltage or current is valid */
+ volt_index = ab8500_voltage_to_regval(vset);
+ curr_index = ab8500_current_to_regval(iset);
+ input_curr_index = ab8500_current_to_regval(
+ di->bat->chg_params->ac_curr_max);
+ if (volt_index < 0 || curr_index < 0 || input_curr_index < 0) {
+ dev_err(di->dev,
+ "Charger voltage or current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ /* ChVoltLevel: maximum battery charging voltage */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_VOLT_LVL_REG, (u8) volt_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ /* MainChInputCurr: current that can be drawn from the charger*/
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_MCH_IPT_CURLVL_REG,
+ input_curr_index << MAIN_CH_INPUT_CURR_SHIFT);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ /* ChOutputCurentLevel: protected output current */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ /* Check if VBAT overshoot control should be enabled */
+ if (!di->bat->enable_overshoot)
+ overshoot = MAIN_CH_NO_OVERSHOOT_ENA_N;
+
+ /* Enable Main Charger */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_MCH_CTRL1, MAIN_CH_ENA | overshoot);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ /* Power on charging LED indication */
+ ret = ab8500_charger_led_en(di, true);
+ if (ret < 0)
+ dev_err(di->dev, "failed to enable LED\n");
+
+ di->ac.charger_online = 1;
+ } else {
+ /* Disable AC charging */
+
+ switch (di->chip_id) {
+ case AB8500_CUT1P0:
+ case AB8500_CUT1P1:
+ /*
+ * For ABB revision 1.0 and 1.1 there is a bug in the
+ * watchdog logic. That means we have to continously
+ * kick the charger watchdog even when no charger is
+ * connected. This is only valid once the AC charger
+ * has been enabled. This is a bug that is not handled
+ * by the algorithm and the watchdog have to be kicked
+ * by the charger driver when the AC charger
+ * is disabled
+ */
+ if (di->ac_conn) {
+ queue_delayed_work(di->charger_wq,
+ &di->kick_wd_work,
+ round_jiffies(WD_KICK_INTERVAL));
+ }
+
+ /*
+ * We can't turn off charging completely
+ * due to a bug in AB8500 cut1.
+ * If we do, charging will not start again.
+ * That is why we set the lowest voltage
+ * and current possible
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_VOLT_LVL_REG, CH_VOL_LVL_3P5);
+ if (ret) {
+ dev_err(di->dev,
+ "%s write failed\n", __func__);
+ return ret;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_REG, CH_OP_CUR_LVL_0P1);
+ if (ret) {
+ dev_err(di->dev,
+ "%s write failed\n", __func__);
+ return ret;
+ }
+ break;
+
+ case AB8500_CUT2P0:
+ default:
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_MCH_CTRL1, 0);
+ if (ret) {
+ dev_err(di->dev,
+ "%s write failed\n", __func__);
+ return ret;
+ }
+ break;
+ }
+
+ ret = ab8500_charger_led_en(di, false);
+ if (ret < 0)
+ dev_err(di->dev, "failed to disable LED\n");
+
+ di->ac.charger_online = 0;
+ di->ac.wd_expired = false;
+ dev_dbg(di->dev, "%s Disabled AC charging\n", __func__);
+ }
+ power_supply_changed(&di->ac_chg.psy);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_usb_en() - enable usb charging
+ * @di: pointer to the ab8500_charger structure
+ * @enable: enable/disable flag
+ * @vset: charging voltage
+ * @ich_out: charger output current
+ *
+ * Enable/Disable USB charging and turns on/off the charging led respectively.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_usb_en(struct ux500_charger *charger,
+ int enable, int vset, int ich_out)
+{
+ int ret;
+ int volt_index;
+ int curr_index;
+ u8 overshoot = 0;
+
+ struct ab8500_charger *di = to_ab8500_charger_usb_device_info(charger);
+
+ if (enable) {
+ /* Check if USB is connected */
+ if (!di->usb.charger_connected) {
+ dev_err(di->dev, "USB charger not connected\n");
+ return -ENXIO;
+ }
+
+ /* Enable USB charging */
+ dev_dbg(di->dev, "Enable USB: %dmV %dmA\n", vset, ich_out);
+
+ /* Check if the requested voltage or current is valid */
+ volt_index = ab8500_voltage_to_regval(vset);
+ curr_index = ab8500_current_to_regval(ich_out);
+ if (volt_index < 0 || curr_index < 0) {
+ dev_err(di->dev,
+ "Charger voltage or current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ /* ChVoltLevel: max voltage upto which battery can be charged */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_VOLT_LVL_REG, (u8) volt_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ /* USBChInputCurr: current that can be drawn from the usb */
+ ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+ if (ret) {
+ dev_err(di->dev, "setting USBChInputCurr failed\n");
+ return ret;
+ }
+ /* ChOutputCurentLevel: protected output current */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ /* Check if VBAT overshoot control should be enabled */
+ if (!di->bat->enable_overshoot)
+ overshoot = USB_CHG_NO_OVERSHOOT_ENA_N;
+
+ /* Enable USB Charger */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_USBCH_CTRL1_REG, USB_CH_ENA | overshoot);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ /* If success power on charging LED indication */
+ ret = ab8500_charger_led_en(di, true);
+ if (ret < 0)
+ dev_err(di->dev, "failed to enable LED\n");
+
+ queue_delayed_work(di->charger_wq, &di->check_vbat_work, HZ);
+
+ di->usb.charger_online = 1;
+ } else {
+ /* Disable USB charging */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_USBCH_CTRL1_REG, 0);
+ if (ret) {
+ dev_err(di->dev,
+ "%s write failed\n", __func__);
+ return ret;
+ }
+
+ ret = ab8500_charger_led_en(di, false);
+ if (ret < 0)
+ dev_err(di->dev, "failed to disable LED\n");
+
+ di->usb.charger_online = 0;
+ di->usb.wd_expired = false;
+ dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
+
+ /* Cancel any pending Vbat check work */
+ if (delayed_work_pending(&di->check_vbat_work))
+ cancel_delayed_work(&di->check_vbat_work);
+
+ }
+ power_supply_changed(&di->usb_chg.psy);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_watchdog_kick() - kick charger watchdog
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Kick charger watchdog
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_watchdog_kick(struct ux500_charger *charger)
+{
+ int ret;
+ struct ab8500_charger *di;
+
+ if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
+ di = to_ab8500_charger_ac_device_info(charger);
+ else if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+ di = to_ab8500_charger_usb_device_info(charger);
+ else
+ return -ENXIO;
+
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
+ if (ret)
+ dev_err(di->dev, "Failed to kick WD!\n");
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_update_charger_current() - update charger current
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Update the charger output current for the specified charger
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
+ int ich_out)
+{
+ int ret;
+ int curr_index;
+ struct ab8500_charger *di;
+
+ if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
+ di = to_ab8500_charger_ac_device_info(charger);
+ else if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+ di = to_ab8500_charger_usb_device_info(charger);
+ else
+ return -ENXIO;
+
+ curr_index = ab8500_current_to_regval(ich_out);
+ if (curr_index < 0) {
+ dev_err(di->dev,
+ "Charger current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ /* Reset the main and usb drop input current measurement counter */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CHARGER_CTRL,
+ 0x1);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ab8500_charger_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab8500_charger *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+ struct ux500_charger *usb_chg;
+
+ usb_chg = (struct ux500_charger *)data;
+ psy = &usb_chg->psy;
+
+ di = to_ab8500_charger_usb_device_info(usb_chg);
+
+ ext = dev_get_drvdata(dev);
+
+ /* For all psy where the driver name appears in any supplied_to */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->vbat = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab8500_charger_check_vbat_work() - keep vbus current within spec
+ * @work pointer to the work_struct structure
+ *
+ * Due to a asic bug it is necessary to lower the input current to the vbus
+ * charger when charging with at some specific levels. This issue is only valid
+ * for below a certain battery voltage. This function makes sure that the
+ * the allowed current limit isn't exceeded.
+ */
+static void ab8500_charger_check_vbat_work(struct work_struct *work)
+{
+ int t = 10;
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_vbat_work.work);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->usb_chg.psy, ab8500_charger_get_ext_psy_data);
+
+ /* First run old_vbat is 0. */
+ if (di->old_vbat == 0)
+ di->old_vbat = di->vbat;
+
+ if (!((di->old_vbat <= VBAT_3700 && di->vbat <= VBAT_3700) ||
+ (di->old_vbat > VBAT_3700 && di->vbat > VBAT_3700))) {
+ dev_dbg(di->dev, "Vbat did cross threshold, curr: %d, new: %d,"
+ " old: %d\n", di->max_usb_in_curr, di->vbat,
+ di->old_vbat);
+ ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+ power_supply_changed(&di->usb_chg.psy);
+ }
+
+ di->old_vbat = di->vbat;
+
+ /*
+ * No need to check the battery voltage every second when not close to
+ * the threshold.
+ */
+ if (di->vbat < (VBAT_3700 + 100) &&
+ (di->vbat > (VBAT_3700 - 100)))
+ t = 1;
+
+ queue_delayed_work(di->charger_wq, &di->check_vbat_work, t * HZ);
+}
+
+/**
+ * ab8500_charger_check_hw_failure_work() - check main charger failure
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the main charger status
+ */
+static void ab8500_charger_check_hw_failure_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_hw_failure_work.work);
+
+ /* Check if the status bits for HW failure is still active */
+ if (di->flags.mainextchnotok) {
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_STATUS2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if (!(reg_value & MAIN_CH_NOK)) {
+ di->flags.mainextchnotok = false;
+ power_supply_changed(&di->ac_chg.psy);
+ }
+ }
+ if (di->flags.vbus_ovv) {
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG,
+ &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if (!(reg_value & VBUS_OVV_TH)) {
+ di->flags.vbus_ovv = false;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ }
+ /* If we still have a failure, schedule a new check */
+ if (di->flags.mainextchnotok || di->flags.vbus_ovv) {
+ queue_delayed_work(di->charger_wq,
+ &di->check_hw_failure_work, round_jiffies(HZ));
+ }
+}
+
+/**
+ * ab8500_charger_kick_watchdog_work() - kick the watchdog
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for kicking the charger watchdog.
+ *
+ * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
+ * logic. That means we have to continously kick the charger
+ * watchdog even when no charger is connected. This is only
+ * valid once the AC charger has been enabled. This is
+ * a bug that is not handled by the algorithm and the
+ * watchdog have to be kicked by the charger driver
+ * when the AC charger is disabled
+ */
+static void ab8500_charger_kick_watchdog_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, kick_wd_work.work);
+
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
+ if (ret)
+ dev_err(di->dev, "Failed to kick WD!\n");
+
+ /* Schedule a new watchdog kick */
+ queue_delayed_work(di->charger_wq,
+ &di->kick_wd_work, round_jiffies(WD_KICK_INTERVAL));
+}
+
+/**
+ * ab8500_charger_ac_work() - work to get and set main charger status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the main charger status
+ */
+static void ab8500_charger_ac_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, ac_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if the main charger is
+ * connected by reading the status register
+ */
+ ret = ab8500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (ret & AC_PW_CONN) {
+ di->ac.charger_connected = 1;
+ di->ac_conn = true;
+ } else {
+ di->ac.charger_connected = 0;
+ }
+
+ power_supply_changed(&di->ac_chg.psy);
+}
+
+/**
+ * ab8500_charger_detect_usb_type_work() - work to detect USB type
+ * @work: Pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+void ab8500_charger_detect_usb_type_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, detect_usb_type_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if is
+ * connected by reading the status register
+ */
+ ret = ab8500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (!(ret & USB_PW_CONN)) {
+ di->vbus_detected = 0;
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ } else {
+ di->vbus_detected = 1;
+
+ switch (di->chip_id) {
+ case AB8500_CUT1P0:
+ case AB8500_CUT1P1:
+ ret = ab8500_charger_detect_usb_type(di);
+ if (!ret) {
+ di->usb.charger_connected = 1;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ break;
+
+ case AB8500_CUT2P0:
+ default:
+ /* For ABB cut2.0 and onwards we have an IRQ,
+ * USB_LINK_STATUS that will be triggered when the USB
+ * link status changes. The exception is USB connected
+ * during startup. Then we don't get a
+ * USB_LINK_STATUS IRQ
+ */
+ if (di->vbus_detected_start) {
+ di->vbus_detected_start = false;
+ ret = ab8500_charger_detect_usb_type(di);
+ if (!ret) {
+ di->usb.charger_connected = 1;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ }
+ break;
+ }
+ }
+}
+
+/**
+ * ab8500_charger_usb_link_status_work() - work to detect USB type
+ * @work: pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+static void ab8500_charger_usb_link_status_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, usb_link_status_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if is
+ * connected by reading the status register
+ */
+ ret = ab8500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (!(ret & USB_PW_CONN)) {
+ di->vbus_detected = 0;
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ } else {
+ di->vbus_detected = 1;
+ ret = ab8500_charger_read_usb_type(di);
+ if (!ret) {
+ /* Update maximum input current */
+ ret = ab8500_charger_set_vbus_in_curr(di,
+ di->max_usb_in_curr);
+ if (ret)
+ return;
+
+ di->usb.charger_connected = 1;
+ power_supply_changed(&di->usb_chg.psy);
+ } else if (ret == -ENXIO) {
+ /* No valid charger type detected */
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ }
+}
+
+static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
+{
+ int ret;
+ unsigned long flags;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, usb_state_changed_work);
+
+ if (!di->vbus_detected)
+ return;
+
+ spin_lock_irqsave(&di->usb_state.usb_lock, flags);
+ di->usb_state.usb_changed = false;
+ spin_unlock_irqrestore(&di->usb_state.usb_lock, flags);
+
+ /*
+ * wait for some time until you get updates from the usb stack
+ * and negotiations are completed
+ */
+ msleep(250);
+
+ if (di->usb_state.usb_changed)
+ return;
+
+ dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n",
+ __func__, di->usb_state.state, di->usb_state.usb_current);
+
+ switch (di->usb_state.state) {
+ case AB8500_BM_USB_STATE_RESET_HS:
+ case AB8500_BM_USB_STATE_RESET_FS:
+ case AB8500_BM_USB_STATE_SUSPEND:
+ case AB8500_BM_USB_STATE_MAX:
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ break;
+
+ case AB8500_BM_USB_STATE_RESUME:
+ /*
+ * when suspend->resume there should be delay
+ * of 1sec for enabling charging
+ */
+ msleep(1000);
+ /* Intentional fall through */
+ case AB8500_BM_USB_STATE_CONFIGURED:
+ /*
+ * USB is configured, enable charging with the charging
+ * input current obtained from USB driver
+ */
+ if (!ab8500_charger_get_usb_cur(di)) {
+ /* Update maximum input current */
+ ret = ab8500_charger_set_vbus_in_curr(di,
+ di->max_usb_in_curr);
+ if (ret)
+ return;
+
+ di->usb.charger_connected = 1;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ break;
+
+ default:
+ break;
+ };
+}
+
+/**
+ * ab8500_charger_check_usbchargernotok_work() - check USB chg not ok status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB charger Not OK status
+ */
+static void ab8500_charger_check_usbchargernotok_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+ bool prev_status;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_usbchgnotok_work.work);
+
+ /* Check if the status bit for usbchargernotok is still active */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ prev_status = di->flags.usbchargernotok;
+
+ if (reg_value & VBUS_CH_NOK) {
+ di->flags.usbchargernotok = true;
+ /* Check again in 1sec */
+ queue_delayed_work(di->charger_wq,
+ &di->check_usbchgnotok_work, HZ);
+ } else {
+ di->flags.usbchargernotok = false;
+ di->flags.vbus_collapse = false;
+ }
+
+ if (prev_status != di->flags.usbchargernotok)
+ power_supply_changed(&di->usb_chg.psy);
+}
+
+/**
+ * ab8500_charger_check_main_thermal_prot_work() - check main thermal status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the Main thermal prot status
+ */
+static void ab8500_charger_check_main_thermal_prot_work(
+ struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_main_thermal_prot_work);
+
+ /* Check if the status bit for main_thermal_prot is still active */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_STATUS2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if (reg_value & MAIN_CH_TH_PROT)
+ di->flags.main_thermal_prot = true;
+ else
+ di->flags.main_thermal_prot = false;
+
+ power_supply_changed(&di->ac_chg.psy);
+}
+
+/**
+ * ab8500_charger_check_usb_thermal_prot_work() - check usb thermal status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB thermal prot status
+ */
+static void ab8500_charger_check_usb_thermal_prot_work(
+ struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_usb_thermal_prot_work);
+
+ /* Check if the status bit for usb_thermal_prot is still active */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if (reg_value & USB_CH_TH_PROT)
+ di->flags.usb_thermal_prot = true;
+ else
+ di->flags.usb_thermal_prot = false;
+
+ power_supply_changed(&di->usb_chg.psy);
+}
+
+/**
+ * ab8500_charger_mainchunplugdet_handler() - main charger unplugged
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchunplugdet_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Main charger unplugged\n");
+ queue_work(di->charger_wq, &di->ac_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainchplugdet_handler() - main charger plugged
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchplugdet_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Main charger plugged\n");
+ queue_work(di->charger_wq, &di->ac_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainextchnotok_handler() - main charger not ok
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainextchnotok_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Main charger not ok\n");
+ di->flags.mainextchnotok = true;
+ power_supply_changed(&di->ac_chg.psy);
+
+ /* Schedule a new HW failure check */
+ queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainchthprotr_handler() - Die temp is above main charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchthprotr_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp above Main charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_main_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainchthprotf_handler() - Die temp is below main charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchthprotf_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp ok for Main charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_main_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_vbusdetf_handler() - VBUS falling detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbusdetf_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS falling detected\n");
+ queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_vbusdetr_handler() - VBUS rising detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbusdetr_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ di->vbus_detected = true;
+ dev_dbg(di->dev, "VBUS rising detected\n");
+ queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usblinkstatus_handler() - USB link status has changed
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usblinkstatus_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "USB link status changed\n");
+
+ queue_work(di->charger_wq, &di->usb_link_status_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usbchthprotr_handler() - Die temp is above usb charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usbchthprotr_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp above USB charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usbchthprotf_handler() - Die temp is below usb charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usbchthprotf_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp ok for USB charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usbchargernotokr_handler() - USB charger not ok detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usbchargernotokr_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Not allowed USB charger detected\n");
+ queue_delayed_work(di->charger_wq, &di->check_usbchgnotok_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_chwdexp_handler() - Charger watchdog expired
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_chwdexp_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Charger watchdog expired\n");
+
+ /*
+ * The charger that was online when the watchdog expired
+ * needs to be restarted for charging to start again
+ */
+ if (di->ac.charger_online) {
+ di->ac.wd_expired = true;
+ power_supply_changed(&di->ac_chg.psy);
+ }
+ if (di->usb.charger_online) {
+ di->usb.wd_expired = true;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_vbusovv_handler() - VBUS overvoltage detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbusovv_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS overvoltage detected\n");
+ di->flags.vbus_ovv = true;
+ power_supply_changed(&di->usb_chg.psy);
+
+ /* Schedule a new HW failure check */
+ queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_ac_get_property() - get the ac/mains properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the ac/mains
+ * properties by reading the sysfs files.
+ * AC/Mains properties are online, present and voltage.
+ * online: ac/mains charging is in progress or not
+ * present: presence of the ac/mains
+ * voltage: AC/Mains voltage
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_charger *di;
+
+ di = to_ab8500_charger_ac_device_info(psy_to_ux500_charger(psy));
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->flags.mainextchnotok)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else if (di->ac.wd_expired || di->usb.wd_expired)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (di->flags.main_thermal_prot)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = di->ac.charger_online;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = di->ac.charger_connected;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ di->ac.charger_voltage = ab8500_charger_get_ac_voltage(di);
+ val->intval = di->ac.charger_voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ /*
+ * This property is used to indicate when CV mode is entered
+ * for the AC charger
+ */
+ di->ac.cv_active = ab8500_charger_ac_cv(di);
+ val->intval = di->ac.cv_active;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = ab8500_charger_get_ac_current(di) * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ab8500_charger_usb_get_property() - get the usb properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the usb
+ * properties by reading the sysfs files.
+ * USB properties are online, present and voltage.
+ * online: usb charging is in progress or not
+ * present: presence of the usb
+ * voltage: vbus voltage
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_charger *di;
+
+ di = to_ab8500_charger_usb_device_info(psy_to_ux500_charger(psy));
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->flags.usbchargernotok)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else if (di->ac.wd_expired || di->usb.wd_expired)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (di->flags.usb_thermal_prot)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (di->flags.vbus_ovv)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = di->usb.charger_online;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = di->usb.charger_connected;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ di->usb.charger_voltage = ab8500_charger_get_vbus_voltage(di);
+ val->intval = di->usb.charger_voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ /*
+ * This property is used to indicate when CV mode is entered
+ * for the USB charger
+ */
+ di->usb.cv_active = ab8500_charger_usb_cv(di);
+ val->intval = di->usb.cv_active;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = ab8500_charger_get_usb_current(di) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ /*
+ * This property is used to indicate when VBUS has collapsed
+ * due to too high output current from the USB charger
+ */
+ if (di->flags.vbus_collapse)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ab8500_charger_init_hw_registers() - Set up charger related registers
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Set up charger OVV, watchdog and maximum voltage registers as well as
+ * charging of the backup battery
+ */
+static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
+{
+ int ret = 0;
+
+ /* Setup maximum charger current and voltage for ABB cut2.0 */
+ switch (di->chip_id) {
+ case AB8500_CUT1P0:
+ case AB8500_CUT1P1:
+ break;
+ case AB8500_CUT2P0:
+ default:
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_VOLT_LVL_MAX_REG, CH_VOL_LVL_4P6);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to set CH_VOLT_LVL_MAX_REG\n");
+ goto out;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_MAX_REG, CH_OP_CUR_LVL_1P6);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to set CH_OPT_CRNTLVL_MAX_REG\n");
+ goto out;
+ }
+
+ break;
+ }
+
+ /* VBUS OVV set to 6.3V and enable automatic current limitiation */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_USBCH_CTRL2_REG,
+ VBUS_OVV_SELECT_6P3V | VBUS_AUTO_IN_CURR_LIM_ENA);
+ if (ret) {
+ dev_err(di->dev, "failed to set VBUS OVV\n");
+ goto out;
+ }
+
+ /* Enable main watchdog in OTP */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_OTP_EMUL, AB8500_OTP_CONF_15, OTP_ENABLE_WD);
+ if (ret) {
+ dev_err(di->dev, "failed to enable main WD in OTP\n");
+ goto out;
+ }
+
+ /* Enable main watchdog */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_SYS_CTRL2_BLOCK,
+ AB8500_MAIN_WDOG_CTRL_REG, MAIN_WDOG_ENA);
+ if (ret) {
+ dev_err(di->dev, "faile to enable main watchdog\n");
+ goto out;
+ }
+
+ /*
+ * Due to internal synchronisation, Enable and Kick watchdog bits
+ * cannot be enabled in a single write.
+ * A minimum delay of 2*32 kHz period (62.5µs) must be inserted
+ * between writing Enable then Kick bits.
+ */
+ udelay(63);
+
+ /* Kick main watchdog */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_SYS_CTRL2_BLOCK,
+ AB8500_MAIN_WDOG_CTRL_REG,
+ (MAIN_WDOG_ENA | MAIN_WDOG_KICK));
+ if (ret) {
+ dev_err(di->dev, "failed to kick main watchdog\n");
+ goto out;
+ }
+
+ /* Disable main watchdog */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_SYS_CTRL2_BLOCK,
+ AB8500_MAIN_WDOG_CTRL_REG, MAIN_WDOG_DIS);
+ if (ret) {
+ dev_err(di->dev, "failed to disable main watchdog\n");
+ goto out;
+ }
+
+ /* Set watchdog timeout */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_WD_TIMER_REG, WD_TIMER);
+ if (ret) {
+ dev_err(di->dev, "failed to set charger watchdog timeout\n");
+ goto out;
+ }
+
+ /* Backup battery voltage and current */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_RTC,
+ AB8500_RTC_BACKUP_CHG_REG,
+ di->bat->bkup_bat_v |
+ di->bat->bkup_bat_i);
+ if (ret) {
+ dev_err(di->dev, "failed to setup backup battery charging\n");
+ goto out;
+ }
+
+ /* Enable backup battery charging */
+ abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_RTC, AB8500_RTC_CTRL_REG,
+ RTC_BUP_CH_ENA, RTC_BUP_CH_ENA);
+ if (ret < 0)
+ dev_err(di->dev, "%s mask and set failed\n", __func__);
+
+out:
+ return ret;
+}
+
+/*
+ * ab8500 charger driver interrupts and their respective isr
+ */
+static struct ab8500_charger_interrupts ab8500_charger_irq[] = {
+ {"MAIN_CH_UNPLUG_DET", ab8500_charger_mainchunplugdet_handler},
+ {"MAIN_CHARGE_PLUG_DET", ab8500_charger_mainchplugdet_handler},
+ {"MAIN_EXT_CH_NOT_OK", ab8500_charger_mainextchnotok_handler},
+ {"MAIN_CH_TH_PROT_R", ab8500_charger_mainchthprotr_handler},
+ {"MAIN_CH_TH_PROT_F", ab8500_charger_mainchthprotf_handler},
+ {"VBUS_DET_F", ab8500_charger_vbusdetf_handler},
+ {"VBUS_DET_R", ab8500_charger_vbusdetr_handler},
+ {"USB_LINK_STATUS", ab8500_charger_usblinkstatus_handler},
+ {"USB_CH_TH_PROT_R", ab8500_charger_usbchthprotr_handler},
+ {"USB_CH_TH_PROT_F", ab8500_charger_usbchthprotf_handler},
+ {"USB_CHARGER_NOT_OKR", ab8500_charger_usbchargernotokr_handler},
+ {"VBUS_OVV", ab8500_charger_vbusovv_handler},
+ {"CH_WD_EXP", ab8500_charger_chwdexp_handler},
+};
+
+static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *power)
+{
+ struct ab8500_charger *di =
+ container_of(nb, struct ab8500_charger, nb);
+ enum ab8500_usb_state bm_usb_state;
+ unsigned mA = *((unsigned *)power);
+
+ /* TODO: State is fabricate here. See if charger really needs USB
+ * state or if mA is enough
+ */
+ if ((di->usb_state.usb_current == 2) && (mA > 2))
+ bm_usb_state = AB8500_BM_USB_STATE_RESUME;
+ else if (mA == 0)
+ bm_usb_state = AB8500_BM_USB_STATE_RESET_HS;
+ else if (mA == 2)
+ bm_usb_state = AB8500_BM_USB_STATE_SUSPEND;
+ else if (mA >= 8) /* 8, 100, 500 */
+ bm_usb_state = AB8500_BM_USB_STATE_CONFIGURED;
+ else /* Should never occur */
+ bm_usb_state = AB8500_BM_USB_STATE_RESET_FS;
+
+ dev_dbg(di->dev, "%s usb_state: 0x%02x mA: %d\n",
+ __func__, bm_usb_state, mA);
+
+ spin_lock(&di->usb_state.usb_lock);
+ di->usb_state.usb_changed = true;
+ spin_unlock(&di->usb_state.usb_lock);
+
+ di->usb_state.state = bm_usb_state;
+ di->usb_state.usb_current = mA;
+
+ queue_work(di->charger_wq, &di->usb_state_changed_work);
+
+ return NOTIFY_OK;
+}
+
+#if defined(CONFIG_PM)
+static int ab8500_charger_resume(struct platform_device *pdev)
+{
+ int ret;
+ struct ab8500_charger *di = platform_get_drvdata(pdev);
+
+ /*
+ * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
+ * logic. That means we have to continously kick the charger
+ * watchdog even when no charger is connected. This is only
+ * valid once the AC charger has been enabled. This is
+ * a bug that is not handled by the algorithm and the
+ * watchdog have to be kicked by the charger driver
+ * when the AC charger is disabled
+ */
+ if (di->ac_conn && (di->chip_id == AB8500_CUT1P0 ||
+ di->chip_id == AB8500_CUT1P1)) {
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
+ if (ret)
+ dev_err(di->dev, "Failed to kick WD!\n");
+
+ /* If not already pending start a new timer */
+ if (!delayed_work_pending(
+ &di->kick_wd_work)) {
+ queue_delayed_work(di->charger_wq, &di->kick_wd_work,
+ round_jiffies(WD_KICK_INTERVAL));
+ }
+ }
+
+ /* If we still have a HW failure, schedule a new check */
+ if (di->flags.mainextchnotok || di->flags.vbus_ovv) {
+ queue_delayed_work(di->charger_wq,
+ &di->check_hw_failure_work, 0);
+ }
+
+ return 0;
+}
+
+static int ab8500_charger_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab8500_charger *di = platform_get_drvdata(pdev);
+
+ /* Cancel any pending HW failure check */
+ if (delayed_work_pending(&di->check_hw_failure_work))
+ cancel_delayed_work(&di->check_hw_failure_work);
+
+ return 0;
+}
+#else
+#define ab8500_charger_suspend NULL
+#define ab8500_charger_resume NULL
+#endif
+
+static int __devexit ab8500_charger_remove(struct platform_device *pdev)
+{
+ struct ab8500_charger *di = platform_get_drvdata(pdev);
+ int i, irq, ret;
+
+ /* Disable AC charging */
+ ab8500_charger_ac_en(&di->ac_chg, false, 0, 0);
+
+ /* Disable USB charging */
+ ab8500_charger_usb_en(&di->usb_chg, false, 0, 0);
+
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ free_irq(irq, di);
+ }
+
+ /* disable the regulator */
+ regulator_put(di->regu);
+
+ /* Backup battery voltage and current disable */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_RTC, AB8500_RTC_CTRL_REG, RTC_BUP_CH_ENA, 0);
+ if (ret < 0)
+ dev_err(di->dev, "%s mask and set failed\n", __func__);
+
+ otg_unregister_notifier(di->otg, &di->nb);
+ otg_put_transceiver(di->otg);
+
+ /* Delete the work queue */
+ destroy_workqueue(di->charger_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->usb_chg.psy);
+ power_supply_unregister(&di->ac_chg.psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab8500_charger_probe(struct platform_device *pdev)
+{
+ int irq, i, charger_status, ret = 0;
+ struct ab8500_platform_data *plat;
+
+ struct ab8500_charger *di =
+ kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab8500_gpadc_get();
+
+ /* initialize lock */
+ spin_lock_init(&di->usb_state.usb_lock);
+
+ plat = dev_get_platdata(di->parent->dev);
+
+ /* get charger specific platform data */
+ if (!plat->charger) {
+ dev_err(di->dev, "no charger platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->pdata = plat->charger;
+
+ /* get battery specific platform data */
+ if (!plat->battery) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->bat = plat->battery;
+
+ /* AC supply */
+ /* power_supply base class */
+ di->ac_chg.psy.name = "ab8500_ac";
+ di->ac_chg.psy.type = POWER_SUPPLY_TYPE_MAINS;
+ di->ac_chg.psy.properties = ab8500_charger_ac_props;
+ di->ac_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_ac_props);
+ di->ac_chg.psy.get_property = ab8500_charger_ac_get_property;
+ di->ac_chg.psy.supplied_to = di->pdata->supplied_to;
+ di->ac_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ /* ux500_charger sub-class */
+ di->ac_chg.ops.enable = &ab8500_charger_ac_en;
+ di->ac_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
+ di->ac_chg.ops.update_curr = &ab8500_charger_update_charger_current;
+ di->ac_chg.max_out_volt = ab8500_charger_voltage_map[
+ ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
+ di->ac_chg.max_out_curr = ab8500_charger_current_map[
+ ARRAY_SIZE(ab8500_charger_current_map) - 1];
+
+ /* USB supply */
+ /* power_supply base class */
+ di->usb_chg.psy.name = "ab8500_usb";
+ di->usb_chg.psy.type = POWER_SUPPLY_TYPE_USB;
+ di->usb_chg.psy.properties = ab8500_charger_usb_props;
+ di->usb_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_usb_props);
+ di->usb_chg.psy.get_property = ab8500_charger_usb_get_property;
+ di->usb_chg.psy.supplied_to = di->pdata->supplied_to;
+ di->usb_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ /* ux500_charger sub-class */
+ di->usb_chg.ops.enable = &ab8500_charger_usb_en;
+ di->usb_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
+ di->usb_chg.ops.update_curr = &ab8500_charger_update_charger_current;
+ di->usb_chg.max_out_volt = ab8500_charger_voltage_map[
+ ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
+ di->usb_chg.max_out_curr = ab8500_charger_current_map[
+ ARRAY_SIZE(ab8500_charger_current_map) - 1];
+
+
+ /* Create a work queue for the charger */
+ di->charger_wq =
+ create_singlethread_workqueue("ab8500_charger_wq");
+ if (di->charger_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for HW failure check */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work,
+ ab8500_charger_check_hw_failure_work);
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work,
+ ab8500_charger_check_usbchargernotok_work);
+
+ /*
+ * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
+ * logic. That means we have to continously kick the charger
+ * watchdog even when no charger is connected. This is only
+ * valid once the AC charger has been enabled. This is
+ * a bug that is not handled by the algorithm and the
+ * watchdog have to be kicked by the charger driver
+ * when the AC charger is disabled
+ */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->kick_wd_work,
+ ab8500_charger_kick_watchdog_work);
+
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_vbat_work,
+ ab8500_charger_check_vbat_work);
+
+ /* Init work for charger detection */
+ INIT_WORK(&di->usb_link_status_work,
+ ab8500_charger_usb_link_status_work);
+ INIT_WORK(&di->ac_work, ab8500_charger_ac_work);
+ INIT_WORK(&di->detect_usb_type_work,
+ ab8500_charger_detect_usb_type_work);
+
+ INIT_WORK(&di->usb_state_changed_work,
+ ab8500_charger_usb_state_changed_work);
+
+ /* Init work for checking HW status */
+ INIT_WORK(&di->check_main_thermal_prot_work,
+ ab8500_charger_check_main_thermal_prot_work);
+ INIT_WORK(&di->check_usb_thermal_prot_work,
+ ab8500_charger_check_usb_thermal_prot_work);
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(di->dev);
+ if (ret < 0) {
+ dev_err(di->dev, "failed to get chip ID\n");
+ goto free_charger_wq;
+ }
+ di->chip_id = ret;
+ dev_dbg(di->dev, "AB8500 CID is: 0x%02x\n", di->chip_id);
+
+ /*
+ * VDD ADC supply needs to be enabled from this driver when there
+ * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
+ * interrupts during charging
+ */
+ di->regu = regulator_get(di->dev, "vddadc");
+ if (IS_ERR(di->regu)) {
+ ret = PTR_ERR(di->regu);
+ dev_err(di->dev, "failed to get vddadc regulator\n");
+ goto free_charger_wq;
+ }
+
+
+ /* Initialize OVV, and other registers */
+ ret = ab8500_charger_init_hw_registers(di);
+ if (ret) {
+ dev_err(di->dev, "failed to initialize ABB registers\n");
+ goto free_regulator;
+ }
+
+ /* Register AC charger class */
+ ret = power_supply_register(di->dev, &di->ac_chg.psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register AC charger\n");
+ goto free_regulator;
+ }
+
+ /* Register USB charger class */
+ ret = power_supply_register(di->dev, &di->usb_chg.psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register USB charger\n");
+ goto free_ac;
+ }
+
+ di->otg = otg_get_transceiver();
+ if (!di->otg) {
+ dev_err(di->dev, "failed to get otg transceiver\n");
+ ret = -EINVAL;
+ goto free_usb;
+ }
+ di->nb.notifier_call = ab8500_charger_usb_notifier_call;
+ ret = otg_register_notifier(di->otg, &di->nb);
+ if (ret) {
+ dev_err(di->dev, "failed to register otg notifier\n");
+ goto put_otg_transceiver;
+ }
+
+ /* Identify the connected charger types during startup */
+ charger_status = ab8500_charger_detect_chargers(di);
+ if (charger_status & AC_PW_CONN) {
+ di->ac.charger_connected = 1;
+ di->ac_conn = true;
+ power_supply_changed(&di->ac_chg.psy);
+ }
+
+ if (charger_status & USB_PW_CONN) {
+ dev_dbg(di->dev, "VBUS Detect during startup\n");
+ di->vbus_detected = true;
+ di->vbus_detected_start = true;
+ queue_work(di->charger_wq,
+ &di->detect_usb_type_work);
+ }
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab8500_charger_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab8500_charger_irq[i].name, di);
+
+ if (ret != 0) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab8500_charger_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab8500_charger_irq[i].name, irq, ret);
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ return ret;
+
+free_irq:
+ otg_unregister_notifier(di->otg, &di->nb);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ free_irq(irq, di);
+ }
+put_otg_transceiver:
+ otg_put_transceiver(di->otg);
+free_usb:
+ power_supply_unregister(&di->usb_chg.psy);
+free_ac:
+ power_supply_unregister(&di->ac_chg.psy);
+free_regulator:
+ regulator_put(di->regu);
+free_charger_wq:
+ destroy_workqueue(di->charger_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab8500_charger_driver = {
+ .probe = ab8500_charger_probe,
+ .remove = __devexit_p(ab8500_charger_remove),
+ .suspend = ab8500_charger_suspend,
+ .resume = ab8500_charger_resume,
+ .driver = {
+ .name = "ab8500-charger",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_charger_init(void)
+{
+ return platform_driver_register(&ab8500_charger_driver);
+}
+
+static void __exit ab8500_charger_exit(void)
+{
+ platform_driver_unregister(&ab8500_charger_driver);
+}
+
+subsys_initcall_sync(ab8500_charger_init);
+module_exit(ab8500_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski, Arun R Murthy");
+MODULE_ALIAS("platform:ab8500-charger");
+MODULE_DESCRIPTION("AB8500 charger management driver");
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
new file mode 100644
index 00000000000..e192893764a
--- /dev/null
+++ b/drivers/power/ab8500_fg.c
@@ -0,0 +1,2316 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Main and Back-up battery management driver.
+ *
+ * Note: Backup battery management is required in case of Li-Ion battery and not
+ * for capacitive battery. HREF boards have capacitive battery and hence backup
+ * battery management is not used and the supported code is available in this
+ * driver.
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/kobject.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/slab.h>
+#include <linux/mfd/ab8500/bm.h>
+#include <linux/delay.h>
+#include <linux/mfd/ab8500/gpadc.h>
+#include <linux/mfd/abx500.h>
+#include <linux/time.h>
+
+#define MILLI_TO_MICRO 1000
+#define FG_LSB_IN_MA 1627
+#define QLSB_NANO_AMP_HOURS_X10 1129
+
+#define SEC_TO_SAMPLE(S) (S * 4)
+
+#define NBR_AVG_SAMPLES 20
+
+#define LOW_BAT_CHECK_INTERVAL (2 * HZ)
+
+#define VALID_CAPACITY_SEC (45 * 60) /* 45 minutes */
+
+#define interpolate(x, x1, y1, x2, y2) \
+ ((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1))));
+
+#define to_ab8500_fg_device_info(x) container_of((x), \
+ struct ab8500_fg, fg_psy);
+
+/**
+ * struct ab8500_fg_interrupts - ab8500 fg interupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab8500_fg_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+enum ab8500_fg_discharge_state {
+ AB8500_FG_DISCHARGE_INIT,
+ AB8500_FG_DISCHARGE_INITMEASURING,
+ AB8500_FG_DISCHARGE_INIT_RECOVERY,
+ AB8500_FG_DISCHARGE_RECOVERY,
+ AB8500_FG_DISCHARGE_READOUT,
+ AB8500_FG_DISCHARGE_WAKEUP,
+};
+
+static char *discharge_state[] = {
+ "DISCHARGE_INIT",
+ "DISCHARGE_INITMEASURING",
+ "DISCHARGE_INIT_RECOVERY",
+ "DISCHARGE_RECOVERY",
+ "DISCHARGE_READOUT",
+ "DISCHARGE_WAKEUP",
+};
+
+enum ab8500_fg_charge_state {
+ AB8500_FG_CHARGE_INIT,
+ AB8500_FG_CHARGE_READOUT,
+};
+
+static char *charge_state[] = {
+ "CHARGE_INIT",
+ "CHARGE_READOUT",
+};
+
+enum ab8500_fg_calibration_state {
+ AB8500_FG_CALIB_INIT,
+ AB8500_FG_CALIB_WAIT,
+ AB8500_FG_CALIB_END,
+};
+
+struct ab8500_fg_avg_cap {
+ int avg;
+ int samples[NBR_AVG_SAMPLES];
+ __kernel_time_t time_stamps[NBR_AVG_SAMPLES];
+ int pos;
+ int nbr_samples;
+ int sum;
+};
+
+struct ab8500_fg_battery_capacity {
+ int max_mah_design;
+ int max_mah;
+ int mah;
+ int permille;
+ int level;
+ int prev_mah;
+ int prev_percent;
+ int prev_level;
+};
+
+struct ab8500_fg_flags {
+ bool fg_enabled;
+ bool conv_done;
+ bool charging;
+ bool fully_charged;
+ bool low_bat_delay;
+ bool low_bat;
+ bool bat_ovv;
+ bool batt_unknown;
+ bool calibrate;
+};
+
+struct inst_curr_result_list {
+ struct list_head list;
+ int *result;
+};
+
+/**
+ * struct ab8500_fg - ab8500 FG device information
+ * @dev: Pointer to the structure device
+ * @node: a list of AB8500 FGs, hence prepared for reentrance
+ * @vbat: Battery voltage in mV
+ * @vbat_nom: Nominal battery voltage in mV
+ * @inst_curr: Instantenous battery current in mA
+ * @avg_curr: Average battery current in mA
+ * @fg_samples: Number of samples used in the FG accumulation
+ * @accu_charge: Accumulated charge from the last conversion
+ * @recovery_cnt: Counter for recovery mode
+ * @high_curr_cnt: Counter for high current mode
+ * @init_cnt: Counter for init mode
+ * @recovery_needed: Indicate if recovery is needed
+ * @high_curr_mode: Indicate if we're in high current mode
+ * @init_capacity: Indicate if initial capacity measuring should be done
+ * @inst_curr_mip: Indicate if 'instant' current measurement is in progress
+ * AB8500 does not support real instant current
+ * readings. The best we can do is sample over
+ * 250ms.
+ * @inst_curr_lock: Control access to inst curr wait queues and result lists
+ * @inst_curr_wq: Work queue for running 'instant' current measurements
+ * @fg_inst_curr_work: Work to measure 'instant' current
+ * @result_wq: Wait queue for blocking 'instant' current clients
+ * @cpw_a_wq: Physical wait queue A for concurrent inst curr clients
+ * @cpw_b_wq: Physical wait queue B for concurrent inst curr clients
+ * @cpw_next_wq: Logical next wait queue for concurrent inst curr clients
+ * @cpw_this_wq: Logical this wait queue for concurrent inst curr clients
+ * @inst_curr_result: Result register for blocking instant current clients
+ * @result_a_list: Physical result list A for concurrent inst curr clients
+ * @result_b_list: Physical result list B for concurrent inst curr clients
+ * @next_result_list: Logical next results for concurrent inst curr clients
+ * @this_result_list: Logical this results for concurrent inst curr clients
+ * @calib_state State during offset calibration
+ * @discharge_state: Current discharge state
+ * @charge_state: Current charge state
+ * @flags: Structure for information about events triggered
+ * @bat_cap: Structure for battery capacity specific parameters
+ * @avg_cap: Average capacity filter
+ * @parent: Pointer to the struct ab8500
+ * @gpadc: Pointer to the struct gpadc
+ * @pdata: Pointer to the ab8500_fg platform data
+ * @bat: Pointer to the ab8500_bm platform data
+ * @fg_psy: Structure that holds the FG specific battery properties
+ * @fg_wq: Work queue for running the FG algorithm
+ * @fg_periodic_work: Work to run the FG algorithm periodically
+ * @fg_low_bat_work: Work to check low bat condition
+ * @fg_reinit_work Work used to reset and reinitialise the FG algorithm
+ * @fg_work: Work to run the FG algorithm instantly
+ * @fg_acc_cur_work: Work to read the FG accumulator
+ * @cc_lock: Mutex for locking the CC
+ * @fg_kobject: Structure of type kobject
+ */
+struct ab8500_fg {
+ struct device *dev;
+ struct list_head node;
+ int vbat;
+ int vbat_nom;
+ int inst_curr;
+ int avg_curr;
+ int fg_samples;
+ int accu_charge;
+ int recovery_cnt;
+ int high_curr_cnt;
+ int init_cnt;
+ bool recovery_needed;
+ bool high_curr_mode;
+ bool init_capacity;
+ bool inst_curr_mip;
+ spinlock_t inst_curr_lock;
+ struct workqueue_struct *inst_curr_wq;
+ struct delayed_work fg_inst_curr_work;
+ wait_queue_head_t result_wq;
+ wait_queue_head_t cpw_a_wq;
+ wait_queue_head_t cpw_b_wq;
+ wait_queue_head_t *cpw_next_wq;
+ wait_queue_head_t *cpw_this_wq;
+ int inst_curr_result;
+ struct list_head result_a_list;
+ struct list_head result_b_list;
+ struct list_head *next_result_list;
+ struct list_head *this_result_list;
+ enum ab8500_fg_calibration_state calib_state;
+ enum ab8500_fg_discharge_state discharge_state;
+ enum ab8500_fg_charge_state charge_state;
+ struct ab8500_fg_flags flags;
+ struct ab8500_fg_battery_capacity bat_cap;
+ struct ab8500_fg_avg_cap avg_cap;
+ struct ab8500 *parent;
+ struct ab8500_gpadc *gpadc;
+ struct ab8500_fg_platform_data *pdata;
+ struct ab8500_bm_data *bat;
+ struct power_supply fg_psy;
+ struct workqueue_struct *fg_wq;
+ struct delayed_work fg_periodic_work;
+ struct delayed_work fg_low_bat_work;
+ struct delayed_work fg_reinit_work;
+ struct work_struct fg_work;
+ struct work_struct fg_acc_cur_work;
+ struct mutex cc_lock;
+ struct kobject fg_kobject;
+};
+static LIST_HEAD(ab8500_fg_list);
+
+/**
+ * ab8500_fg_get() - returns a reference to the primary AB8500 fuel gauge
+ * (i.e. the first fuel gauge in the instance list)
+ */
+struct ab8500_fg *ab8500_fg_get(void)
+{
+ struct ab8500_fg *fg;
+
+ if (list_empty(&ab8500_fg_list))
+ return NULL;
+
+ fg = list_first_entry(&ab8500_fg_list, struct ab8500_fg, node);
+ return fg;
+}
+
+/* Main battery properties */
+static enum power_supply_property ab8500_fg_props[] = {
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+};
+
+/*
+ * This array maps the raw hex value to lowbat voltage used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_fg_lowbat_voltage_map[] = {
+ 2300 ,
+ 2325 ,
+ 2350 ,
+ 2375 ,
+ 2400 ,
+ 2425 ,
+ 2450 ,
+ 2475 ,
+ 2500 ,
+ 2525 ,
+ 2550 ,
+ 2575 ,
+ 2600 ,
+ 2625 ,
+ 2650 ,
+ 2675 ,
+ 2700 ,
+ 2725 ,
+ 2750 ,
+ 2775 ,
+ 2800 ,
+ 2825 ,
+ 2850 ,
+ 2875 ,
+ 2900 ,
+ 2925 ,
+ 2950 ,
+ 2975 ,
+ 3000 ,
+ 3025 ,
+ 3050 ,
+ 3075 ,
+ 3100 ,
+ 3125 ,
+ 3150 ,
+ 3175 ,
+ 3200 ,
+ 3225 ,
+ 3250 ,
+ 3275 ,
+ 3300 ,
+ 3325 ,
+ 3350 ,
+ 3375 ,
+ 3400 ,
+ 3425 ,
+ 3450 ,
+ 3475 ,
+ 3500 ,
+ 3525 ,
+ 3550 ,
+ 3575 ,
+ 3600 ,
+ 3625 ,
+ 3650 ,
+ 3675 ,
+ 3700 ,
+ 3725 ,
+ 3750 ,
+ 3775 ,
+ 3800 ,
+ 3825 ,
+ 3850 ,
+ 3850 ,
+};
+
+static u8 ab8500_volt_to_regval(int voltage)
+{
+ int i;
+
+ if (voltage < ab8500_fg_lowbat_voltage_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_fg_lowbat_voltage_map); i++) {
+ if (voltage < ab8500_fg_lowbat_voltage_map[i])
+ return (u8) i - 1;
+ }
+
+ /* If not captured above, return index of last element */
+ return (u8) ARRAY_SIZE(ab8500_fg_lowbat_voltage_map) - 1;
+}
+
+/**
+ * ab8500_fg_is_low_curr() - Low or high current mode
+ * @di: pointer to the ab8500_fg structure
+ * @curr: the current to base or our decision on
+ *
+ * Low current mode if the current consumption is below a certain threshold
+ */
+static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
+{
+ /*
+ * We want to know if we're in low current mode
+ */
+ if (curr > -di->bat->fg_params->high_curr_threshold)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ab8500_fg_add_cap_sample() - Add capacity to average filter
+ * @di: pointer to the ab8500_fg structure
+ * @sample: the capacity in mAh to add to the filter
+ *
+ * A capacity is added to the filter and a new mean capacity is calculated and
+ * returned
+ */
+static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
+{
+ struct timespec ts;
+ struct ab8500_fg_avg_cap *avg = &di->avg_cap;
+
+ getnstimeofday(&ts);
+
+ do {
+ avg->sum += sample - avg->samples[avg->pos];
+ avg->samples[avg->pos] = sample;
+ avg->time_stamps[avg->pos] = ts.tv_sec;
+ avg->pos++;
+
+ if (avg->pos == NBR_AVG_SAMPLES)
+ avg->pos = 0;
+
+ if (avg->nbr_samples < NBR_AVG_SAMPLES)
+ avg->nbr_samples++;
+
+ /*
+ * Check the time stamp for each sample. If too old,
+ * replace with latest sample
+ */
+ } while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
+
+ avg->avg = avg->sum / avg->nbr_samples;
+
+ return avg->avg;
+}
+
+/**
+ * ab8500_fg_clear_cap_samples() - Clear average filter
+ * @di: pointer to the ab8500_fg structure
+ *
+ * The capacity filter is is reset to zero.
+ */
+static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
+{
+ int i;
+ struct ab8500_fg_avg_cap *avg = &di->avg_cap;
+
+ avg->pos = 0;
+ avg->nbr_samples = 0;
+ avg->sum = 0;
+ avg->avg = 0;
+
+ for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+ avg->samples[i] = 0;
+ avg->time_stamps[i] = 0;
+ }
+}
+
+/**
+ * ab8500_fg_fill_cap_sample() - Fill average filter
+ * @di: pointer to the ab8500_fg structure
+ * @sample: the capacity in mAh to fill the filter with
+ *
+ * The capacity filter is filled with a capacity in mAh
+ */
+static void ab8500_fg_fill_cap_sample(struct ab8500_fg *di, int sample)
+{
+ int i;
+ struct timespec ts;
+ struct ab8500_fg_avg_cap *avg = &di->avg_cap;
+
+ getnstimeofday(&ts);
+
+ for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+ avg->samples[i] = sample;
+ avg->time_stamps[i] = ts.tv_sec;
+ }
+
+ avg->pos = 0;
+ avg->nbr_samples = NBR_AVG_SAMPLES;
+ avg->sum = sample * NBR_AVG_SAMPLES;
+ avg->avg = sample;
+}
+
+/**
+ * ab8500_fg_coulomb_counter() - enable coulomb counter
+ * @di: pointer to the ab8500_fg structure
+ * @enable: enable/disable
+ *
+ * Enable/Disable coulomb counter.
+ * On failure returns negative value.
+ */
+static int ab8500_fg_coulomb_counter(struct ab8500_fg *di, bool enable)
+{
+ int ret = 0;
+ mutex_lock(&di->cc_lock);
+ if (enable) {
+ /* To be able to reprogram the number of samples, we have to
+ * first stop the CC and then enable it again */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG, 0x00);
+ if (ret)
+ goto cc_err;
+
+ /* Program the samples */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU,
+ di->fg_samples);
+ if (ret)
+ goto cc_err;
+
+ /* Start the CC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG,
+ (CC_DEEP_SLEEP_ENA | CC_PWR_UP_ENA));
+ if (ret)
+ goto cc_err;
+
+ di->flags.fg_enabled = true;
+ } else {
+ /* Clear any pending read requests */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
+ if (ret)
+ goto cc_err;
+
+ /* Stop the CC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG, 0);
+ if (ret)
+ goto cc_err;
+
+ di->flags.fg_enabled = false;
+
+ }
+ dev_dbg(di->dev, " CC enabled: %d Samples: %d\n",
+ enable, di->fg_samples);
+
+ mutex_unlock(&di->cc_lock);
+
+ return ret;
+cc_err:
+ dev_err(di->dev, "%s Enabling coulomb counter failed\n", __func__);
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+/**
+ * ab8500_fg_inst_curr_nonblocking() - battery instantaneous current
+ * @di: pointer to the ab8500_fg structure
+ * @local_result: pointer to result location, updated after measurement is
+ * completed ~250ms after this function returns
+ *
+ * Returns error code
+ */
+int ab8500_fg_inst_curr_nonblocking(struct ab8500_fg *di, int *local_result)
+{
+ DEFINE_WAIT(wait);
+
+ /*
+ * caller needs to do some other work at
+ * the same time as current measurement
+ */
+ wait_queue_head_t *cpw_my_wq;
+ struct inst_curr_result_list *new =
+ kmalloc(sizeof(struct inst_curr_result_list), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->result = local_result;
+ *local_result = INVALID_CURRENT;
+ INIT_LIST_HEAD(&new->list);
+ spin_lock(&di->inst_curr_lock);
+ list_add(&new->list, di->next_result_list);
+ cpw_my_wq = di->cpw_next_wq;
+ add_wait_queue(cpw_my_wq, &wait);
+ /* queue_work may schedule */
+ queue_delayed_work(di->inst_curr_wq, &di->fg_inst_curr_work, 1);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock(&di->inst_curr_lock);
+ schedule();
+ finish_wait(cpw_my_wq, &wait);
+ return 0;
+}
+
+/**
+ * ab8500_fg_inst_curr_blocking() - battery instantaneous current
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns battery instantenous current(on success) else error code
+ */
+int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
+{
+ DEFINE_WAIT(wait);
+
+ /* caller will wait for the next available result */
+ spin_lock(&di->inst_curr_lock);
+ add_wait_queue(&di->result_wq, &wait);
+ if (!di->inst_curr_mip)
+ /* queue_work may schedule */
+ queue_delayed_work(di->inst_curr_wq, &di->fg_inst_curr_work, 1);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock(&di->inst_curr_lock);
+ schedule();
+ finish_wait(&di->result_wq, &wait);
+ return di->inst_curr_result;
+}
+
+/**
+ * ab8500_fg_inst_curr_work() - take an 'instant' battery current reading
+ * @work: pointer to the work_struct structure
+ *
+ * AB8500 does not provide instant current readings, the best we can do is
+ * average over 250ms.
+ */
+static void ab8500_fg_inst_curr_work(struct work_struct *work)
+{
+ struct list_head *temp_result_list;
+ wait_queue_head_t *cpw_temp_wq;
+ u8 low, high, reg_val;
+ static int val;
+ int ret = 0;
+ bool fg_off = false;
+
+ struct ab8500_fg *di = container_of(work,
+ struct ab8500_fg, fg_inst_curr_work.work);
+
+ spin_lock(&di->inst_curr_lock);
+ temp_result_list = di->next_result_list;
+ cpw_temp_wq = di->cpw_next_wq;
+ di->next_result_list = di->this_result_list;
+ di->cpw_next_wq = di->cpw_this_wq;
+ di->this_result_list = temp_result_list;
+ di->cpw_this_wq = cpw_temp_wq;
+
+ di->inst_curr_mip = true;
+ spin_unlock(&di->inst_curr_lock);
+
+ mutex_lock(&di->cc_lock);
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG, &reg_val);
+ if (ret < 0)
+ goto inst_curr_err1;
+
+ if (!(reg_val & CC_PWR_UP_ENA)) {
+ dev_dbg(di->dev, "%s Enable FG\n", __func__);
+ fg_off = true;
+
+ /* Program the samples */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU,
+ SEC_TO_SAMPLE(10));
+ if (ret)
+ goto inst_curr_err1;
+
+ /* Start the CC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG,
+ (CC_DEEP_SLEEP_ENA | CC_PWR_UP_ENA));
+ if (ret)
+ goto inst_curr_err1;
+ }
+
+ /* Reset counter and Read request */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_CTRL_REG, (RESET_ACCU | READ_REQ));
+ if (ret)
+ goto inst_curr_err1;
+
+ wake_up(di->cpw_this_wq);
+
+ /*
+ * Since there is no interrupt for this, just wait for 250ms
+ * 250ms is one sample conversion time with 32.768 Khz RTC clock
+ */
+ msleep(250);
+
+ /* Read CC Sample conversion value Low and high */
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_SMPL_CNVL_REG, &low);
+ if (ret < 0)
+ goto inst_curr_err2;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_SMPL_CNVH_REG, &high);
+ if (ret < 0)
+ goto inst_curr_err2;
+
+ /*
+ * negative value for Discharging
+ * convert 2's compliment into decimal
+ */
+ if (high & 0x10)
+ val = (low | (high << 8) | 0xFFFFE000);
+ else
+ val = (low | (high << 8));
+
+ /*
+ * Convert to unit value in mA
+ * Full scale input voltage is
+ * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
+ * resistance is in mOhm
+ */
+ val = ((val * 66660) / (4096 * di->bat->fg_res));
+
+ if (fg_off) {
+ dev_dbg(di->dev, "%s Disable FG\n", __func__);
+
+ /* Clear any pending read requests */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
+ if (ret)
+ goto inst_curr_err3;
+
+ /* Stop the CC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG, 0);
+ if (ret)
+ goto inst_curr_err3;
+ }
+
+finished:
+ mutex_unlock(&di->cc_lock);
+
+ spin_lock(&di->inst_curr_lock);
+ di->inst_curr_result = val;
+
+ while (!list_empty(di->this_result_list)) {
+ struct inst_curr_result_list *this = list_first_entry(
+ di->this_result_list,
+ struct inst_curr_result_list,
+ list);
+ *(this->result) = val;
+ list_del(&this->list);
+ kfree(this);
+ }
+ di->inst_curr_mip = false;
+ wake_up(&di->result_wq);
+ spin_unlock(&di->inst_curr_lock);
+ return;
+
+inst_curr_err1:
+ wake_up(di->cpw_this_wq);
+inst_curr_err2:
+ val = 0;
+inst_curr_err3:
+ dev_err(di->dev, "%s Get instanst current failed\n", __func__);
+ goto finished;
+}
+
+/**
+ * ab8500_fg_acc_cur_work() - average battery current
+ * @work: pointer to the work_struct structure
+ *
+ * Updated the average battery current obtained from the
+ * coulomb counter.
+ */
+static void ab8500_fg_acc_cur_work(struct work_struct *work)
+{
+ int val;
+ int ret;
+ u8 low, med, high;
+
+ struct ab8500_fg *di = container_of(work,
+ struct ab8500_fg, fg_acc_cur_work);
+
+ mutex_lock(&di->cc_lock);
+ ret = abx500_set_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_NCOV_ACCU_CTRL, RD_NCONV_ACCU_REQ);
+ if (ret)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_NCOV_ACCU_LOW, &low);
+ if (ret < 0)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_NCOV_ACCU_MED, &med);
+ if (ret < 0)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_NCOV_ACCU_HIGH, &high);
+ if (ret < 0)
+ goto exit;
+
+ /* Check for sign bit in case of negative value, 2's compliment */
+ if (high & 0x10)
+ val = (low | (med << 8) | (high << 16) | 0xFFE00000);
+ else
+ val = (low | (med << 8) | (high << 16));
+
+ di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X10)/10000;
+
+ di->avg_curr = (val * FG_LSB_IN_MA) / (di->fg_samples * 1000);
+ di->flags.conv_done = true;
+
+ mutex_unlock(&di->cc_lock);
+
+ queue_work(di->fg_wq, &di->fg_work);
+
+ return;
+exit:
+ dev_err(di->dev,
+ "Failed to read or write gas gauge registers\n");
+ mutex_unlock(&di->cc_lock);
+ queue_work(di->fg_wq, &di->fg_work);
+}
+
+/**
+ * ab8500_fg_bat_voltage() - get battery voltage
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns battery voltage(on success) else error code
+ */
+static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
+{
+ int vbat;
+ static int prev;
+
+ vbat = ab8500_gpadc_convert(di->gpadc, MAIN_BAT_V);
+ if (vbat < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed, using previous value\n",
+ __func__);
+ return prev;
+ }
+
+ prev = vbat;
+ return vbat;
+}
+
+/**
+ * ab8500_fg_volt_to_capacity() - Voltage based capacity
+ * @di: pointer to the ab8500_fg structure
+ * @voltage: The voltage to convert to a capacity
+ *
+ * Returns battery capacity in per mille based on voltage
+ */
+static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
+{
+ int i, tbl_size;
+ struct v_to_cap *tbl;
+ int cap = 0;
+
+ tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl,
+ tbl_size = di->bat->bat_type[di->bat->batt_id].n_v_cap_tbl_elements;
+
+ for (i = 0; i < tbl_size; ++i) {
+ if (voltage > tbl[i].voltage)
+ break;
+ }
+
+ if ((i > 0) && (i < tbl_size)) {
+ cap = interpolate(voltage,
+ tbl[i].voltage,
+ tbl[i].capacity * 10,
+ tbl[i-1].voltage,
+ tbl[i-1].capacity * 10);
+ } else if (i == 0) {
+ cap = 1000;
+ } else {
+ cap = 0;
+ }
+
+ dev_dbg(di->dev, "%s Vbat: %d, Cap: %d per mille",
+ __func__, voltage, cap);
+
+ return cap;
+}
+
+/**
+ * ab8500_fg_uncomp_volt_to_capacity() - Uncompensated voltage based capacity
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is not compensated
+ * for the voltage drop due to the load
+ */
+static int ab8500_fg_uncomp_volt_to_capacity(struct ab8500_fg *di)
+{
+ di->vbat = ab8500_fg_bat_voltage(di);
+ return ab8500_fg_volt_to_capacity(di, di->vbat);
+}
+
+/**
+ * ab8500_fg_load_comp_volt_to_capacity() - Load compensated voltage based capacity
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is load compensated
+ * for the voltage drop
+ */
+static int ab8500_fg_load_comp_volt_to_capacity(struct ab8500_fg *di)
+{
+ int vbat_comp;
+
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+ di->vbat = ab8500_fg_bat_voltage(di);
+
+ /* Use Ohms law to get the load compensated voltage */
+ vbat_comp = di->vbat - (di->inst_curr *
+ di->bat->bat_type[di->bat->batt_id].battery_resistance) / 1000;
+
+ dev_dbg(di->dev, "%s Measured Vbat: %dmV,Compensated Vbat %dmV, "
+ "R: %dmOhm, Current: %dmA\n",
+ __func__,
+ di->vbat,
+ vbat_comp,
+ di->bat->bat_type[di->bat->batt_id].battery_resistance,
+ di->inst_curr);
+
+ return ab8500_fg_volt_to_capacity(di, vbat_comp);
+}
+
+/**
+ * ab8500_fg_convert_mah_to_permille() - Capacity in mAh to permille
+ * @di: pointer to the ab8500_fg structure
+ * @cap_mah: capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in permille
+ */
+static int ab8500_fg_convert_mah_to_permille(struct ab8500_fg *di, int cap_mah)
+{
+ return (cap_mah * 1000) / di->bat_cap.max_mah_design;
+}
+
+/**
+ * ab8500_fg_convert_permille_to_mah() - Capacity in permille to mAh
+ * @di: pointer to the ab8500_fg structure
+ * @cap_pm: capacity in permille
+ *
+ * Converts capacity in permille to capacity in mAh
+ */
+static int ab8500_fg_convert_permille_to_mah(struct ab8500_fg *di, int cap_pm)
+{
+ return cap_pm * di->bat_cap.max_mah_design / 1000;
+}
+
+/**
+ * ab8500_fg_convert_mah_to_uwh() - Capacity in mAh to uWh
+ * @di: pointer to the ab8500_fg structure
+ * @cap_mah: capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in uWh
+ */
+static int ab8500_fg_convert_mah_to_uwh(struct ab8500_fg *di, int cap_mah)
+{
+ u64 div_res;
+ u32 div_rem;
+
+ div_res = ((u64) cap_mah) * ((u64) di->vbat_nom);
+ div_rem = do_div(div_res, 1000);
+
+ /* Make sure to round upwards if necessary */
+ if (div_rem >= 1000 / 2)
+ div_res++;
+
+ return (int) div_res;
+}
+
+/**
+ * ab8500_fg_calc_cap_charging() - Calculate remaining capacity while charging
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. The filter is filled with this capacity
+ */
+static int ab8500_fg_calc_cap_charging(struct ab8500_fg *di)
+{
+ dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+ __func__,
+ di->bat_cap.mah,
+ di->accu_charge);
+
+ /* Capacity should not be less than 0 */
+ if (di->bat_cap.mah + di->accu_charge > 0)
+ di->bat_cap.mah += di->accu_charge;
+ else
+ di->bat_cap.mah = 0;
+
+ /*
+ * We force capacity to 100% as long as the algorithm
+ * reports that it's full.
+ */
+ if (di->bat_cap.mah >= di->bat_cap.max_mah_design ||
+ di->flags.fully_charged)
+ di->bat_cap.mah = di->bat_cap.max_mah_design;
+
+ ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ di->bat_cap.permille =
+ ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+ /* We need to update battery voltage and inst current when charging */
+ di->vbat = ab8500_fg_bat_voltage(di);
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab8500_fg_calc_cap_discharge_voltage() - Capacity in discharge with voltage
+ * @di: pointer to the ab8500_fg structure
+ * @comp: if voltage should be load compensated before capacity calc
+ *
+ * Return the capacity in mAh based on the battery voltage. The voltage can
+ * either be load compensated or not. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab8500_fg_calc_cap_discharge_voltage(struct ab8500_fg *di, bool comp)
+{
+ int permille, mah;
+
+ if (comp)
+ permille = ab8500_fg_load_comp_volt_to_capacity(di);
+ else
+ permille = ab8500_fg_uncomp_volt_to_capacity(di);
+
+ mah = ab8500_fg_convert_permille_to_mah(di, permille);
+
+ di->bat_cap.mah = ab8500_fg_add_cap_sample(di, mah);
+ di->bat_cap.permille =
+ ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab8500_fg_calc_cap_discharge_fg() - Capacity in discharge with FG
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab8500_fg_calc_cap_discharge_fg(struct ab8500_fg *di)
+{
+ int permille_volt, permille;
+
+ dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+ __func__,
+ di->bat_cap.mah,
+ di->accu_charge);
+
+ /* Capacity should not be less than 0 */
+ if (di->bat_cap.mah + di->accu_charge > 0)
+ di->bat_cap.mah += di->accu_charge;
+ else
+ di->bat_cap.mah = 0;
+
+ if (di->bat_cap.mah >= di->bat_cap.max_mah_design)
+ di->bat_cap.mah = di->bat_cap.max_mah_design;
+
+ /*
+ * Check against voltage based capacity. It can not be lower
+ * than what the uncompensated voltage says
+ */
+ permille = ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+ permille_volt = ab8500_fg_uncomp_volt_to_capacity(di);
+
+ if (permille < permille_volt) {
+ di->bat_cap.permille = permille_volt;
+ di->bat_cap.mah = ab8500_fg_convert_permille_to_mah(di,
+ di->bat_cap.permille);
+
+ dev_dbg(di->dev, "%s voltage based: perm %d perm_volt %d\n",
+ __func__,
+ permille,
+ permille_volt);
+
+ ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ } else {
+ ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ di->bat_cap.permille =
+ ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+ }
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab8500_fg_capacity_level() - Get the battery capacity level
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Get the battery capacity level based on the capacity in percent
+ */
+static int ab8500_fg_capacity_level(struct ab8500_fg *di)
+{
+ int ret, percent;
+
+ percent = di->bat_cap.permille / 10;
+
+ if (percent <= di->bat->cap_levels->critical ||
+ di->flags.low_bat)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else if (percent <= di->bat->cap_levels->low)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (percent <= di->bat->cap_levels->normal)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ else if (percent <= di->bat->cap_levels->high)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
+ else
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+
+ return ret;
+}
+
+/**
+ * ab8500_fg_check_capacity_limits() - Check if capacity has changed
+ * @di: pointer to the ab8500_fg structure
+ * @init: capacity is allowed to go up in init mode
+ *
+ * Check if capacity or capacity limit has changed and notify the system
+ * about it using the power_supply framework
+ */
+static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
+{
+ bool changed = false;
+
+ di->bat_cap.level = ab8500_fg_capacity_level(di);
+
+ if (di->bat_cap.level != di->bat_cap.prev_level) {
+ /*
+ * We do not allow reported capacity level to go up
+ * unless we're charging or if we're in init
+ */
+ if (!(!di->flags.charging && di->bat_cap.level >
+ di->bat_cap.prev_level) || init) {
+ dev_dbg(di->dev, "level changed from %d to %d\n",
+ di->bat_cap.prev_level,
+ di->bat_cap.level);
+ di->bat_cap.prev_level = di->bat_cap.level;
+ changed = true;
+ } else {
+ dev_dbg(di->dev, "level not allowed to go up "
+ "since no charger is connected: %d to %d\n",
+ di->bat_cap.prev_level,
+ di->bat_cap.level);
+ }
+ }
+
+ /*
+ * If we have received the LOW_BAT IRQ, set capacity to 0 to initiate
+ * shutdown
+ */
+ if (di->flags.low_bat) {
+ dev_dbg(di->dev, "Battery low, set capacity to 0\n");
+ di->bat_cap.prev_percent = 0;
+ di->bat_cap.permille = 0;
+ di->bat_cap.prev_mah = 0;
+ di->bat_cap.mah = 0;
+ changed = true;
+ } else if (di->bat_cap.prev_percent != di->bat_cap.permille / 10) {
+ if (di->bat_cap.permille / 10 == 0) {
+ /*
+ * We will not report 0% unless we've got
+ * the LOW_BAT IRQ, no matter what the FG
+ * algorithm says.
+ */
+ di->bat_cap.prev_percent = 1;
+ di->bat_cap.permille = 1;
+ di->bat_cap.prev_mah = 1;
+ di->bat_cap.mah = 1;
+
+ changed = true;
+ } else if (!(!di->flags.charging &&
+ (di->bat_cap.permille / 10) >
+ di->bat_cap.prev_percent) || init) {
+ /*
+ * We do not allow reported capacity to go up
+ * unless we're charging or if we're in init
+ */
+ dev_dbg(di->dev,
+ "capacity changed from %d to %d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.permille / 10,
+ di->bat_cap.permille);
+ di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ di->bat_cap.prev_mah = di->bat_cap.mah;
+
+ changed = true;
+ } else {
+ dev_dbg(di->dev, "capacity not allowed to go up since "
+ "no charger is connected: %d to %d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.permille / 10,
+ di->bat_cap.permille);
+ }
+ }
+
+ if (changed) {
+ power_supply_changed(&di->fg_psy);
+ if (di->flags.fully_charged) {
+ dev_dbg(di->dev, "Full, notifying..: %d\n",
+ di->flags.fully_charged);
+ sysfs_notify(&di->fg_kobject, NULL, "charge_full");
+ }
+ }
+}
+
+static void ab8500_fg_charge_state_to(struct ab8500_fg *di,
+ enum ab8500_fg_charge_state new_state)
+{
+ dev_dbg(di->dev, "Charge state from %d [%s] to %d [%s]\n",
+ di->charge_state,
+ charge_state[di->charge_state],
+ new_state,
+ charge_state[new_state]);
+
+ di->charge_state = new_state;
+}
+
+static void ab8500_fg_discharge_state_to(struct ab8500_fg *di,
+ enum ab8500_fg_charge_state new_state)
+{
+ dev_dbg(di->dev, "Disharge state from %d [%s] to %d [%s]\n",
+ di->discharge_state,
+ discharge_state[di->discharge_state],
+ new_state,
+ discharge_state[new_state]);
+
+ di->discharge_state = new_state;
+}
+
+/**
+ * ab8500_fg_algorithm_charging() - FG algorithm for when charging
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're charging
+ */
+static void ab8500_fg_algorithm_charging(struct ab8500_fg *di)
+{
+ /*
+ * If we change to discharge mode
+ * we should start with recovery
+ */
+ if (di->discharge_state != AB8500_FG_DISCHARGE_INIT_RECOVERY)
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_INIT_RECOVERY);
+
+ switch (di->charge_state) {
+ case AB8500_FG_CHARGE_INIT:
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_charging);
+
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_READOUT);
+
+ break;
+
+ case AB8500_FG_CHARGE_READOUT:
+ /*
+ * Read the FG and calculate the new capacity
+ */
+ mutex_lock(&di->cc_lock);
+ if (!di->flags.conv_done) {
+ /* Wasn't the CC IRQ that got us here */
+ mutex_unlock(&di->cc_lock);
+ dev_dbg(di->dev, "%s CC conv not done\n",
+ __func__);
+
+ break;
+ }
+ di->flags.conv_done = false;
+ mutex_unlock(&di->cc_lock);
+
+ ab8500_fg_calc_cap_charging(di);
+
+ break;
+
+ default:
+ break;
+ }
+
+ /* Check capacity limits */
+ ab8500_fg_check_capacity_limits(di, false);
+}
+
+/**
+ * ab8500_fg_algorithm_discharging() - FG algorithm for when discharging
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're discharging
+ */
+static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
+{
+ int sleep_time;
+
+ /* If we change to charge mode we should start with init */
+ if (di->charge_state != AB8500_FG_CHARGE_INIT)
+ ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
+
+ switch (di->discharge_state) {
+ case AB8500_FG_DISCHARGE_INIT:
+ /* We use the FG IRQ to work on */
+ di->init_cnt = 0;
+ di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_INITMEASURING);
+
+ /* Intentional fallthrough */
+ case AB8500_FG_DISCHARGE_INITMEASURING:
+ /*
+ * Discard a number of samples during startup.
+ * After that, use compensated voltage for a few
+ * samples to get an initial capacity.
+ * Then go to READOUT
+ */
+ sleep_time = di->bat->fg_params->init_timer;
+
+ /* Discard the first [x] seconds */
+ if (di->init_cnt >
+ di->bat->fg_params->init_discard_time) {
+
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+
+ ab8500_fg_check_capacity_limits(di, true);
+ }
+
+ di->init_cnt += sleep_time;
+ if (di->init_cnt >
+ di->bat->fg_params->init_total_time) {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT);
+ }
+
+ break;
+
+ case AB8500_FG_DISCHARGE_INIT_RECOVERY:
+ di->recovery_cnt = 0;
+ di->recovery_needed = true;
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_RECOVERY);
+
+ /* Intentional fallthrough */
+
+ case AB8500_FG_DISCHARGE_RECOVERY:
+ sleep_time = di->bat->fg_params->recovery_sleep_timer;
+
+ /*
+ * We should check the power consumption
+ * If low, go to READOUT (after x min) or
+ * RECOVERY_SLEEP if time left.
+ * If high, go to READOUT
+ */
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+ if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
+ if (di->recovery_cnt >
+ di->bat->fg_params->recovery_total_time) {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT);
+ di->recovery_needed = false;
+ } else {
+ queue_delayed_work(di->fg_wq,
+ &di->fg_periodic_work,
+ sleep_time * HZ);
+ }
+ di->recovery_cnt += sleep_time;
+ } else {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT);
+ }
+
+ break;
+
+ case AB8500_FG_DISCHARGE_READOUT:
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+ if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
+ /* Detect mode change */
+ if (di->high_curr_mode) {
+ di->high_curr_mode = false;
+ di->high_curr_cnt = 0;
+ }
+
+ if (di->recovery_needed) {
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_RECOVERY);
+
+ queue_delayed_work(di->fg_wq,
+ &di->fg_periodic_work, 0);
+
+ break;
+ }
+
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+ } else {
+ mutex_lock(&di->cc_lock);
+ if (!di->flags.conv_done) {
+ /* Wasn't the CC IRQ that got us here */
+ mutex_unlock(&di->cc_lock);
+ dev_dbg(di->dev, "%s CC conv not done\n",
+ __func__);
+
+ break;
+ }
+ di->flags.conv_done = false;
+ mutex_unlock(&di->cc_lock);
+
+ /* Detect mode change */
+ if (!di->high_curr_mode) {
+ di->high_curr_mode = true;
+ di->high_curr_cnt = 0;
+ }
+
+ di->high_curr_cnt +=
+ di->bat->fg_params->accu_high_curr;
+ if (di->high_curr_cnt >
+ di->bat->fg_params->high_curr_time)
+ di->recovery_needed = true;
+
+ ab8500_fg_calc_cap_discharge_fg(di);
+ }
+
+ ab8500_fg_check_capacity_limits(di, false);
+
+ break;
+
+ case AB8500_FG_DISCHARGE_WAKEUP:
+ ab8500_fg_coulomb_counter(di, true);
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ /* Re-program number of samples set above */
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_READOUT);
+
+ ab8500_fg_check_capacity_limits(di, false);
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * ab8500_fg_algorithm_calibrate() - Internal columb counter offset calibration
+ * @di: pointer to the ab8500_fg structure
+ *
+ */
+static void ab8500_fg_algorithm_calibrate(struct ab8500_fg *di)
+{
+ int ret;
+
+ switch (di->calib_state) {
+ case AB8500_FG_CALIB_INIT:
+ dev_dbg(di->dev, "Calibration ongoing...\n");
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+ CC_INT_CAL_N_AVG_MASK, CC_INT_CAL_SAMPLES_8);
+ if (ret < 0)
+ goto err;
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+ CC_INTAVGOFFSET_ENA, CC_INTAVGOFFSET_ENA);
+ if (ret < 0)
+ goto err;
+ di->calib_state = AB8500_FG_CALIB_WAIT;
+ break;
+ case AB8500_FG_CALIB_END:
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+ CC_MUXOFFSET, CC_MUXOFFSET);
+ if (ret < 0)
+ goto err;
+ di->flags.calibrate = false;
+ dev_dbg(di->dev, "Calibration done...\n");
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ break;
+ case AB8500_FG_CALIB_WAIT:
+ dev_dbg(di->dev, "Calibration WFI\n");
+ default:
+ break;
+ }
+ return;
+err:
+ /* Something went wrong, don't calibrate then */
+ dev_err(di->dev, "failed to calibrate the CC\n");
+ di->flags.calibrate = false;
+ di->calib_state = AB8500_FG_CALIB_INIT;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+}
+
+/**
+ * ab8500_fg_algorithm() - Entry point for the FG algorithm
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Entry point for the battery capacity calculation state machine
+ */
+static void ab8500_fg_algorithm(struct ab8500_fg *di)
+{
+ if (di->flags.calibrate)
+ ab8500_fg_algorithm_calibrate(di);
+ else {
+ if (di->flags.charging)
+ ab8500_fg_algorithm_charging(di);
+ else
+ ab8500_fg_algorithm_discharging(di);
+ }
+
+ dev_dbg(di->dev, "[FG_DATA] %d %d %d %d %d %d %d %d %d "
+ "%d %d %d %d %d %d %d\n",
+ di->bat_cap.max_mah_design,
+ di->bat_cap.mah,
+ di->bat_cap.permille,
+ di->bat_cap.level,
+ di->bat_cap.prev_mah,
+ di->bat_cap.prev_percent,
+ di->bat_cap.prev_level,
+ di->vbat,
+ di->inst_curr,
+ di->avg_curr,
+ di->accu_charge,
+ di->flags.charging,
+ di->charge_state,
+ di->discharge_state,
+ di->high_curr_mode,
+ di->recovery_needed);
+}
+
+/**
+ * ab8500_fg_periodic_work() - Run the FG state machine periodically
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for periodic work
+ */
+static void ab8500_fg_periodic_work(struct work_struct *work)
+{
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+ fg_periodic_work.work);
+
+ if (di->init_capacity) {
+ /* A dummy read that will return 0 */
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+ /* Get an initial capacity calculation */
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+ ab8500_fg_check_capacity_limits(di, true);
+ di->init_capacity = false;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ } else
+ ab8500_fg_algorithm(di);
+}
+
+/**
+ * ab8500_fg_low_bat_work() - Check LOW_BAT condition
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the LOW_BAT condition
+ */
+static void ab8500_fg_low_bat_work(struct work_struct *work)
+{
+ int vbat;
+
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+ fg_low_bat_work.work);
+
+ vbat = ab8500_fg_bat_voltage(di);
+
+ /* Check if LOW_BAT still fulfilled */
+ if (vbat < di->bat->fg_params->lowbat_threshold) {
+ di->flags.low_bat = true;
+ dev_warn(di->dev, "Battery voltage still LOW\n");
+
+ /*
+ * We need to re-schedule this check to be able to detect
+ * if the voltage increases again during charging
+ */
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ } else {
+ di->flags.low_bat = false;
+ dev_warn(di->dev, "Battery voltage OK again\n");
+ }
+
+ /* This is needed to dispatch LOW_BAT */
+ ab8500_fg_check_capacity_limits(di, false);
+
+ /* Set this flag to check if LOW_BAT IRQ still occurs */
+ di->flags.low_bat_delay = false;
+}
+
+/**
+ * ab8500_fg_instant_work() - Run the FG state machine instantly
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for instant work
+ */
+static void ab8500_fg_instant_work(struct work_struct *work)
+{
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg, fg_work);
+
+ ab8500_fg_algorithm(di);
+}
+
+/**
+ * ab8500_fg_cc_convend_handler() - isr to get battery avg current.
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_cc_int_calib_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+ di->calib_state = AB8500_FG_CALIB_END;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_cc_convend_handler() - isr to get battery avg current.
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_cc_convend_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+
+ queue_work(di->fg_wq, &di->fg_acc_cur_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_batt_ovv_handler() - Battery OVV occured
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_batt_ovv_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+
+ dev_dbg(di->dev, "Battery OVV\n");
+ di->flags.bat_ovv = true;
+
+ power_supply_changed(&di->fg_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_lowbatf_handler() - Battery voltage is below LOW threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_lowbatf_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+
+ if (!di->flags.low_bat_delay) {
+ dev_warn(di->dev, "Battery voltage is below LOW threshold\n");
+ di->flags.low_bat_delay = true;
+ /*
+ * Start a timer to check LOW_BAT again after some time
+ * This is done to avoid shutdown on single voltage dips
+ */
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_get_property() - get the fg properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * fg properties by reading the sysfs files.
+ * voltage_now: battery voltage
+ * current_now: battery instant current
+ * current_avg: battery average current
+ * charge_full_design: capacity where battery is considered full
+ * charge_now: battery capacity in nAh
+ * capacity: capacity in percent
+ * capacity_level: capacity level
+ *
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_fg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_fg *di;
+
+ di = to_ab8500_fg_device_info(psy);
+
+ /*
+ * If battery is identified as unknown and charging of unknown
+ * batteries is disabled, we always report 100% capacity and
+ * capacity level UNKNOWN, since we can't calculate
+ * remaining capacity
+ */
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (di->flags.bat_ovv)
+ val->intval = 47500000;
+ else
+ val->intval = di->vbat * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = di->inst_curr * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = di->avg_curr * 1000;
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = ab8500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah_design);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ val->intval = ab8500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = ab8500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah);
+ else
+ val->intval = ab8500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.prev_mah);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = di->bat_cap.max_mah_design;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = di->bat_cap.max_mah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = di->bat_cap.max_mah;
+ else
+ val->intval = di->bat_cap.prev_mah;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = 100;
+ else
+ val->intval = di->bat_cap.prev_percent;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ else
+ val->intval = di->bat_cap.prev_level;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab8500_fg *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab8500_fg_device_info(psy);
+
+ /*
+ * For all psy where the name of your driver
+ * appears in any supplied_to
+ */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ switch (ret.intval) {
+ case POWER_SUPPLY_STATUS_UNKNOWN:
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ case POWER_SUPPLY_STATUS_NOT_CHARGING:
+ if (!di->flags.charging)
+ break;
+ di->flags.charging = false;
+ di->flags.fully_charged = false;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ case POWER_SUPPLY_STATUS_FULL:
+ if (di->flags.fully_charged)
+ break;
+ di->flags.fully_charged = true;
+ /* Save current capacity as maximum */
+ di->bat_cap.max_mah = di->bat_cap.mah;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ case POWER_SUPPLY_STATUS_CHARGING:
+ if (di->flags.charging)
+ break;
+ di->flags.charging = true;
+ di->flags.fully_charged = false;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ };
+ default:
+ break;
+ };
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (ret.intval)
+ di->flags.batt_unknown = false;
+ else
+ di->flags.batt_unknown = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab8500_fg_init_hw_registers() - Set up FG related registers
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Set up battery OVV, low battery voltage registers
+ */
+static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
+{
+ int ret;
+
+ /* Set up VBAT OVV register */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_BATT_OVV,
+ (BATT_OVV_ENA | BATT_OVV_TH_4P75));
+ if (ret) {
+ dev_err(di->dev, "failed to set BATT_OVV\n");
+ goto out;
+ }
+
+ /* Low Battery Voltage */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_SYS_CTRL2_BLOCK,
+ AB8500_LOW_BAT_REG,
+ ab8500_volt_to_regval(
+ di->bat->fg_params->lowbat_threshold) << 1 |
+ LOW_BAT_ENABLE);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * ab8500_fg_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void ab8500_fg_external_power_changed(struct power_supply *psy)
+{
+ struct ab8500_fg *di = to_ab8500_fg_device_info(psy);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->fg_psy, ab8500_fg_get_ext_psy_data);
+}
+
+/**
+ * abab8500_fg_reinit_work() - work to reset the FG algorithm
+ * @work: pointer to the work_struct structure
+ *
+ * Used to reset the current battery capacity to be able to
+ * retrigger a new voltage base capacity calculation. For
+ * test and verification purpose.
+ */
+static void ab8500_fg_reinit_work(struct work_struct *work)
+{
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+ fg_reinit_work.work);
+
+ if (di->flags.calibrate == false) {
+ dev_dbg(di->dev, "Resetting FG state machine to init.\n");
+ ab8500_fg_clear_cap_samples(di);
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+ ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
+ ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+
+ } else {
+ dev_err(di->dev, "Residual offset calibration ongoing "
+ "retrying..\n");
+ /* Wait one second until next try*/
+ queue_delayed_work(di->fg_wq, &di->fg_reinit_work,
+ round_jiffies(1));
+ }
+}
+
+/**
+ * ab8500_fg_reinit() - forces FG algorithm to reinitialize with current values
+ *
+ * This function can be used to force the FG algorithm to recalculate a new
+ * voltage based battery capacity.
+ */
+void ab8500_fg_reinit(void)
+{
+ struct ab8500_fg *di = ab8500_fg_get();
+ /* User won't be notified if a null pointer returned. */
+ if (di != NULL)
+ queue_delayed_work(di->fg_wq, &di->fg_reinit_work, 0);
+}
+
+/* Exposure to the sysfs interface */
+
+struct ab8500_fg_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct ab8500_fg *, char *);
+ ssize_t (*store)(struct ab8500_fg *, const char *, size_t);
+};
+
+static ssize_t charge_full_show(struct ab8500_fg *di, char *buf)
+{
+ return sprintf(buf, "%d\n", di->bat_cap.max_mah);
+}
+
+static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf,
+ size_t count)
+{
+ unsigned long charge_full;
+ ssize_t ret = -EINVAL;
+
+ ret = strict_strtoul(buf, 10, &charge_full);
+
+ dev_dbg(di->dev, "Ret %d charge_full %lu", ret, charge_full);
+
+ if (!ret) {
+ di->bat_cap.max_mah = (int) charge_full;
+ ret = count;
+ }
+ return ret;
+}
+static struct ab8500_fg_sysfs_entry charge_full_attr =
+ __ATTR(charge_full, 0644, charge_full_show, charge_full_store);
+
+static ssize_t
+ab8500_fg_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct ab8500_fg_sysfs_entry *entry;
+ struct ab8500_fg *di;
+
+ entry = container_of(attr, struct ab8500_fg_sysfs_entry, attr);
+ di = container_of(kobj, struct ab8500_fg, fg_kobject);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(di, buf);
+}
+static ssize_t
+ab8500_fg_store(struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ struct ab8500_fg_sysfs_entry *entry;
+ struct ab8500_fg *di;
+
+ entry = container_of(attr, struct ab8500_fg_sysfs_entry, attr);
+ di = container_of(kobj, struct ab8500_fg, fg_kobject);
+
+ if (!entry->store)
+ return -EIO;
+
+ return entry->store(di, buf, count);
+}
+
+const struct sysfs_ops ab8500_fg_sysfs_ops = {
+ .show = ab8500_fg_show,
+ .store = ab8500_fg_store,
+};
+
+static struct attribute *ab8500_fg_attrs[] = {
+ &charge_full_attr.attr,
+ NULL,
+};
+
+static struct kobj_type ab8500_fg_ktype = {
+ .sysfs_ops = &ab8500_fg_sysfs_ops,
+ .default_attrs = ab8500_fg_attrs,
+};
+
+/**
+ * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di: pointer to the struct ab8500_chargalg
+ *
+ * This function removes the entry in sysfs.
+ */
+static void ab8500_fg_sysfs_exit(struct ab8500_fg *di)
+{
+ kobject_del(&di->fg_kobject);
+}
+
+/**
+ * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * @di: pointer to the struct ab8500_chargalg
+ *
+ * This function adds an entry in sysfs.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_fg_sysfs_init(struct ab8500_fg *di)
+{
+ int ret = 0;
+
+ ret = kobject_init_and_add(&di->fg_kobject,
+ &ab8500_fg_ktype,
+ NULL, "ab8500_fg");
+ if (ret < 0)
+ dev_err(di->dev, "failed to create sysfs entry\n");
+
+ return ret;
+}
+/* Exposure to the sysfs interface <<END>> */
+
+#if defined(CONFIG_PM)
+static int ab8500_fg_resume(struct platform_device *pdev)
+{
+ struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+ /*
+ * Change state if we're not charging. If we're charging we will wake
+ * up on the FG IRQ
+ */
+ if (!di->flags.charging) {
+ ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_WAKEUP);
+ queue_work(di->fg_wq, &di->fg_work);
+ }
+
+ return 0;
+}
+
+static int ab8500_fg_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+ flush_delayed_work(&di->fg_periodic_work);
+
+ /*
+ * If the FG is enabled we will disable it before going to suspend
+ * only if we're not charging
+ */
+ if (di->flags.fg_enabled && !di->flags.charging)
+ ab8500_fg_coulomb_counter(di, false);
+
+ return 0;
+}
+#else
+#define ab8500_fg_suspend NULL
+#define ab8500_fg_resume NULL
+#endif
+
+static int __devexit ab8500_fg_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+ list_del(&di->node);
+
+ /* Disable coulomb counter */
+ ret = ab8500_fg_coulomb_counter(di, false);
+ if (ret)
+ dev_err(di->dev, "failed to disable coulomb counter\n");
+
+ flush_workqueue(di->inst_curr_wq);
+ destroy_workqueue(di->inst_curr_wq);
+ destroy_workqueue(di->fg_wq);
+ ab8500_fg_sysfs_exit(di);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->fg_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+ return ret;
+}
+
+/* ab8500 fg driver interrupts and their respective isr */
+static struct ab8500_fg_interrupts ab8500_fg_irq[] = {
+ {"NCONV_ACCU", ab8500_fg_cc_convend_handler},
+ {"BATT_OVV", ab8500_fg_batt_ovv_handler},
+ {"LOW_BAT_F", ab8500_fg_lowbatf_handler},
+ {"CC_INT_CALIB", ab8500_fg_cc_int_calib_handler},
+};
+
+static int __devinit ab8500_fg_probe(struct platform_device *pdev)
+{
+ int i, irq;
+ struct ab8500_platform_data *plat;
+ int ret = 0;
+
+ struct ab8500_fg *di =
+ kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ mutex_init(&di->cc_lock);
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab8500_gpadc_get();
+
+ plat = dev_get_platdata(di->parent->dev);
+
+ /* get fg specific platform data */
+ if (!plat->fg) {
+ dev_err(di->dev, "no fg platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->pdata = plat->fg;
+
+ /* get battery specific platform data */
+ if (!plat->battery) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->bat = plat->battery;
+
+ di->fg_psy.name = "ab8500_fg";
+ di->fg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->fg_psy.properties = ab8500_fg_props;
+ di->fg_psy.num_properties = ARRAY_SIZE(ab8500_fg_props);
+ di->fg_psy.get_property = ab8500_fg_get_property;
+ di->fg_psy.supplied_to = di->pdata->supplied_to;
+ di->fg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->fg_psy.external_power_changed = ab8500_fg_external_power_changed;
+
+ di->bat_cap.max_mah_design = MILLI_TO_MICRO *
+ di->bat->bat_type[di->bat->batt_id].charge_full_design;
+
+ di->bat_cap.max_mah = di->bat_cap.max_mah_design;
+
+ di->vbat_nom = di->bat->bat_type[di->bat->batt_id].nominal_voltage;
+
+ di->init_capacity = true;
+
+ di->inst_curr_mip = false;
+ spin_lock_init(&di->inst_curr_lock);
+ init_waitqueue_head(&di->result_wq);
+ init_waitqueue_head(&di->cpw_a_wq);
+ init_waitqueue_head(&di->cpw_b_wq);
+ di->cpw_next_wq = &di->cpw_a_wq;
+ di->cpw_this_wq = &di->cpw_b_wq;
+ di->inst_curr_result = 0;
+ INIT_LIST_HEAD(&di->result_a_list);
+ INIT_LIST_HEAD(&di->result_b_list);
+ di->next_result_list = &di->result_a_list;
+ di->this_result_list = &di->result_b_list;
+
+ ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
+ ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
+
+ /* Create a work queue for running the FG algorithm */
+ di->fg_wq = create_singlethread_workqueue("ab8500_fg_wq");
+ if (di->fg_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ di->inst_curr_wq = create_singlethread_workqueue("ab8500_inst_curr_wq");
+ if (di->inst_curr_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_fg_wq;
+ }
+
+ /* Init work for running the instant current measurment */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_inst_curr_work,
+ ab8500_fg_inst_curr_work);
+
+ /* Init work for running the fg algorithm instantly */
+ INIT_WORK(&di->fg_work, ab8500_fg_instant_work);
+
+ /* Init work for getting the battery accumulated current */
+ INIT_WORK(&di->fg_acc_cur_work, ab8500_fg_acc_cur_work);
+
+ /* Init work for reinitialising the fg algorithm */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work,
+ ab8500_fg_reinit_work);
+
+ /* Work delayed Queue to run the state machine */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work,
+ ab8500_fg_periodic_work);
+
+ /* Work to check low battery condition */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work,
+ ab8500_fg_low_bat_work);
+
+ /* Initialize OVV, and other registers */
+ ret = ab8500_fg_init_hw_registers(di);
+ if (ret) {
+ dev_err(di->dev, "failed to initialize registers\n");
+ goto free_inst_curr_wq;
+ }
+
+ /* Consider battery unknown until we're informed otherwise */
+ di->flags.batt_unknown = true;
+
+ /* Register FG power supply class */
+ ret = power_supply_register(di->dev, &di->fg_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register FG psy\n");
+ goto free_inst_curr_wq;
+ }
+
+ di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+ ab8500_fg_coulomb_counter(di, true);
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab8500_fg_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab8500_fg_irq[i].name, di);
+
+ if (ret != 0) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab8500_fg_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab8500_fg_irq[i].name, irq, ret);
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ ret = ab8500_fg_sysfs_init(di);
+ if (ret) {
+ dev_err(di->dev, "failed to create sysfs entry\n");
+ goto free_irq;
+ }
+
+ /* Calibrate the fg first time */
+ di->flags.calibrate = true;
+ di->calib_state = AB8500_FG_CALIB_INIT;
+
+ /* Run the FG algorithm */
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+
+ list_add_tail(&di->node, &ab8500_fg_list);
+
+ return ret;
+
+free_irq:
+ power_supply_unregister(&di->fg_psy);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
+ free_irq(irq, di);
+ }
+free_inst_curr_wq:
+ destroy_workqueue(di->inst_curr_wq);
+free_fg_wq:
+ destroy_workqueue(di->fg_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab8500_fg_driver = {
+ .probe = ab8500_fg_probe,
+ .remove = __devexit_p(ab8500_fg_remove),
+ .suspend = ab8500_fg_suspend,
+ .resume = ab8500_fg_resume,
+ .driver = {
+ .name = "ab8500-fg",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_fg_init(void)
+{
+ return platform_driver_register(&ab8500_fg_driver);
+}
+
+static void __exit ab8500_fg_exit(void)
+{
+ platform_driver_unregister(&ab8500_fg_driver);
+}
+
+subsys_initcall_sync(ab8500_fg_init);
+module_exit(ab8500_fg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab8500-fg");
+MODULE_DESCRIPTION("AB8500 Fuel Gauge driver");
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
new file mode 100644
index 00000000000..bb0fa42b109
--- /dev/null
+++ b/drivers/power/abx500_chargalg.c
@@ -0,0 +1,1920 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Charging algorithm driver for abx500 variants
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ux500_chargalg.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+
+/* Watchdog kick interval */
+#define CHG_WD_INTERVAL (6 * HZ)
+
+/* End-of-charge criteria counter */
+#define EOC_COND_CNT 10
+
+/* Recharge criteria counter */
+#define RCH_COND_CNT 3
+
+#define to_abx500_chargalg_device_info(x) container_of((x), \
+ struct abx500_chargalg, chargalg_psy);
+
+enum abx500_chargers {
+ NO_CHG,
+ AC_CHG,
+ USB_CHG,
+};
+
+struct abx500_chargalg_charger_info {
+ enum abx500_chargers conn_chg;
+ enum abx500_chargers prev_conn_chg;
+ enum abx500_chargers online_chg;
+ enum abx500_chargers prev_online_chg;
+ enum abx500_chargers charger_type;
+ bool usb_chg_ok;
+ bool ac_chg_ok;
+ int usb_volt;
+ int usb_curr;
+ int ac_volt;
+ int ac_curr;
+ int usb_vset;
+ int usb_iset;
+ int ac_vset;
+ int ac_iset;
+};
+
+struct abx500_chargalg_suspension_status {
+ bool suspended_change;
+ bool ac_suspended;
+ bool usb_suspended;
+};
+
+struct abx500_chargalg_battery_data {
+ int temp;
+ int volt;
+ int avg_curr;
+ int inst_curr;
+ int percent;
+};
+
+enum abx500_chargalg_states {
+ STATE_HANDHELD_INIT,
+ STATE_HANDHELD,
+ STATE_CHG_NOT_OK_INIT,
+ STATE_CHG_NOT_OK,
+ STATE_HW_TEMP_PROTECT_INIT,
+ STATE_HW_TEMP_PROTECT,
+ STATE_NORMAL_INIT,
+ STATE_NORMAL,
+ STATE_WAIT_FOR_RECHARGE_INIT,
+ STATE_WAIT_FOR_RECHARGE,
+ STATE_MAINTENANCE_A_INIT,
+ STATE_MAINTENANCE_A,
+ STATE_MAINTENANCE_B_INIT,
+ STATE_MAINTENANCE_B,
+ STATE_TEMP_UNDEROVER_INIT,
+ STATE_TEMP_UNDEROVER,
+ STATE_TEMP_LOWHIGH_INIT,
+ STATE_TEMP_LOWHIGH,
+ STATE_SUSPENDED_INIT,
+ STATE_SUSPENDED,
+ STATE_OVV_PROTECT_INIT,
+ STATE_OVV_PROTECT,
+ STATE_SAFETY_TIMER_EXPIRED_INIT,
+ STATE_SAFETY_TIMER_EXPIRED,
+ STATE_BATT_REMOVED_INIT,
+ STATE_BATT_REMOVED,
+ STATE_WD_EXPIRED_INIT,
+ STATE_WD_EXPIRED,
+};
+
+static const char *states[] = {
+ "HANDHELD_INIT",
+ "HANDHELD",
+ "CHG_NOT_OK_INIT",
+ "CHG_NOT_OK",
+ "HW_TEMP_PROTECT_INIT",
+ "HW_TEMP_PROTECT",
+ "NORMAL_INIT",
+ "NORMAL",
+ "WAIT_FOR_RECHARGE_INIT",
+ "WAIT_FOR_RECHARGE",
+ "MAINTENANCE_A_INIT",
+ "MAINTENANCE_A",
+ "MAINTENANCE_B_INIT",
+ "MAINTENANCE_B",
+ "TEMP_UNDEROVER_INIT",
+ "TEMP_UNDEROVER",
+ "TEMP_LOWHIGH_INIT",
+ "TEMP_LOWHIGH",
+ "SUSPENDED_INIT",
+ "SUSPENDED",
+ "OVV_PROTECT_INIT",
+ "OVV_PROTECT",
+ "SAFETY_TIMER_EXPIRED_INIT",
+ "SAFETY_TIMER_EXPIRED",
+ "BATT_REMOVED_INIT",
+ "BATT_REMOVED",
+ "WD_EXPIRED_INIT",
+ "WD_EXPIRED",
+};
+
+struct abx500_chargalg_events {
+ bool batt_unknown;
+ bool mainextchnotok;
+ bool batt_ovv;
+ bool batt_rem;
+ bool btemp_underover;
+ bool btemp_lowhigh;
+ bool main_thermal_prot;
+ bool usb_thermal_prot;
+ bool main_ovv;
+ bool vbus_ovv;
+ bool usbchargernotok;
+ bool safety_timer_expired;
+ bool maintenance_timer_expired;
+ bool ac_wd_expired;
+ bool usb_wd_expired;
+ bool ac_cv_active;
+ bool usb_cv_active;
+ bool vbus_collapsed;
+};
+
+/**
+ * struct abx500_charge_curr_maximization - Charger maximization parameters
+ * @original_iset: the non optimized/maximised charger current
+ * @current_iset: the charging current used at this moment
+ * @test_delta_i: the delta between the current we want to charge and the
+ current that is really going into the battery
+ * @condition_cnt: number of iterations needed before a new charger current
+ is set
+ * @max_current: maximum charger current
+ * @wait_cnt: to avoid too fast current step down in case of charger
+ * voltage collapse, we insert this delay between step
+ * down
+ * @level: tells in how many steps the charging current has been
+ increased
+ */
+struct abx500_charge_curr_maximization {
+ int original_iset;
+ int current_iset;
+ int test_delta_i;
+ int condition_cnt;
+ int max_current;
+ int wait_cnt;
+ u8 level;
+};
+
+enum maxim_ret {
+ MAXIM_RET_NOACTION,
+ MAXIM_RET_CHANGE,
+ MAXIM_RET_IBAT_TOO_HIGH,
+};
+
+/**
+ * struct abx500_chargalg - abx500 Charging algorithm device information
+ * @dev: pointer to the structure device
+ * @charge_status: battery operating status
+ * @eoc_cnt: counter used to determine end-of_charge
+ * @rch_cnt: counter used to determine start of recharge
+ * @maintenance_chg: indicate if maintenance charge is active
+ * @t_hyst_norm temperature hysteresis when the temperature has been
+ * over or under normal limits
+ * @t_hyst_lowhigh temperature hysteresis when the temperature has been
+ * over or under the high or low limits
+ * @charge_state: current state of the charging algorithm
+ * @ccm charging current maximization parameters
+ * @chg_info: information about connected charger types
+ * @batt_data: data of the battery
+ * @susp_status: current charger suspension status
+ * @pdata: pointer to the abx500_chargalg platform data
+ * @bat: pointer to the abx500_bm platform data
+ * @chargalg_psy: structure that holds the battery properties exposed by
+ * the charging algorithm
+ * @events: structure for information about events triggered
+ * @chargalg_wq: work queue for running the charging algorithm
+ * @chargalg_periodic_work: work to run the charging algorithm periodically
+ * @chargalg_wd_work: work to kick the charger watchdog periodically
+ * @chargalg_work: work to run the charging algorithm instantly
+ * @safety_timer: charging safety timer
+ * @maintenance_timer: maintenance charging timer
+ * @chargalg_kobject: structure of type kobject
+ */
+struct abx500_chargalg {
+ struct device *dev;
+ int charge_status;
+ int eoc_cnt;
+ int rch_cnt;
+ bool maintenance_chg;
+ int t_hyst_norm;
+ int t_hyst_lowhigh;
+ enum abx500_chargalg_states charge_state;
+ struct abx500_charge_curr_maximization ccm;
+ struct abx500_chargalg_charger_info chg_info;
+ struct abx500_chargalg_battery_data batt_data;
+ struct abx500_chargalg_suspension_status susp_status;
+ struct abx500_chargalg_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct power_supply chargalg_psy;
+ struct ux500_charger *ac_chg;
+ struct ux500_charger *usb_chg;
+ struct abx500_chargalg_events events;
+ struct workqueue_struct *chargalg_wq;
+ struct delayed_work chargalg_periodic_work;
+ struct delayed_work chargalg_wd_work;
+ struct work_struct chargalg_work;
+ struct timer_list safety_timer;
+ struct timer_list maintenance_timer;
+ struct kobject chargalg_kobject;
+};
+
+/* Main battery properties */
+static enum power_supply_property abx500_chargalg_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+/**
+ * abx500_chargalg_safety_timer_expired() - Expiration of the safety timer
+ * @data: pointer to the abx500_chargalg structure
+ *
+ * This function gets called when the safety timer for the charger
+ * expires
+ */
+static void abx500_chargalg_safety_timer_expired(unsigned long data)
+{
+ struct abx500_chargalg *di = (struct abx500_chargalg *) data;
+ dev_err(di->dev, "Safety timer expired\n");
+ di->events.safety_timer_expired = true;
+
+ /* Trigger execution of the algorithm instantly */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_maintenance_timer_expired() - Expiration of
+ * the maintenance timer
+ * @i: pointer to the abx500_chargalg structure
+ *
+ * This function gets called when the maintenence timer
+ * expires
+ */
+static void abx500_chargalg_maintenance_timer_expired(unsigned long data)
+{
+
+ struct abx500_chargalg *di = (struct abx500_chargalg *) data;
+ dev_dbg(di->dev, "Maintenance timer expired\n");
+ di->events.maintenance_timer_expired = true;
+
+ /* Trigger execution of the algorithm instantly */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_state_to() - Change charge state
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This function gets called when a charge state change should occur
+ */
+static void abx500_chargalg_state_to(struct abx500_chargalg *di,
+ enum abx500_chargalg_states state)
+{
+ dev_dbg(di->dev,
+ "State changed: %s (From state: [%d] %s =to=> [%d] %s )\n",
+ di->charge_state == state ? "NO" : "YES",
+ di->charge_state,
+ states[di->charge_state],
+ state,
+ states[state]);
+
+ di->charge_state = state;
+}
+
+/**
+ * abx500_chargalg_check_charger_connection() - Check charger connection change
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This function will check if there is a change in the charger connection
+ * and change charge state accordingly. AC has precedence over USB.
+ */
+static int abx500_chargalg_check_charger_connection(struct abx500_chargalg *di)
+{
+ if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg ||
+ di->susp_status.suspended_change) {
+ /*
+ * Charger state changed or suspension
+ * has changed since last update
+ */
+ if ((di->chg_info.conn_chg & AC_CHG) &&
+ !di->susp_status.ac_suspended) {
+ dev_dbg(di->dev, "Charging source is AC\n");
+ if (di->chg_info.charger_type != AC_CHG) {
+ di->chg_info.charger_type = AC_CHG;
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ }
+ } else if ((di->chg_info.conn_chg & USB_CHG) &&
+ !di->susp_status.usb_suspended) {
+ dev_dbg(di->dev, "Charging source is USB\n");
+ di->chg_info.charger_type = USB_CHG;
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ } else if (di->chg_info.conn_chg &&
+ (di->susp_status.ac_suspended ||
+ di->susp_status.usb_suspended)) {
+ dev_dbg(di->dev, "Charging is suspended\n");
+ di->chg_info.charger_type = NO_CHG;
+ abx500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
+ } else {
+ dev_dbg(di->dev, "Charging source is OFF\n");
+ di->chg_info.charger_type = NO_CHG;
+ abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+ }
+ di->chg_info.prev_conn_chg = di->chg_info.conn_chg;
+ di->susp_status.suspended_change = false;
+ }
+ return di->chg_info.conn_chg;
+}
+
+/**
+ * abx500_chargalg_start_safety_timer() - Start charging safety timer
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The safety timer is used to avoid overcharging of old or bad batteries.
+ * There are different timers for AC and USB
+ */
+static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
+{
+ unsigned long timer_expiration = 0;
+
+ switch (di->chg_info.charger_type) {
+ case AC_CHG:
+ timer_expiration =
+ round_jiffies(jiffies +
+ (di->bat->main_safety_tmr_h * 3600 * HZ));
+ break;
+
+ case USB_CHG:
+ timer_expiration =
+ round_jiffies(jiffies +
+ (di->bat->usb_safety_tmr_h * 3600 * HZ));
+ break;
+
+ default:
+ dev_err(di->dev, "Unknown charger to charge from\n");
+ break;
+ }
+
+ di->events.safety_timer_expired = false;
+ di->safety_timer.expires = timer_expiration;
+ if (!timer_pending(&di->safety_timer))
+ add_timer(&di->safety_timer);
+ else
+ mod_timer(&di->safety_timer, timer_expiration);
+}
+
+/**
+ * abx500_chargalg_stop_safety_timer() - Stop charging safety timer
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The safety timer is stopped whenever the NORMAL state is exited
+ */
+static void abx500_chargalg_stop_safety_timer(struct abx500_chargalg *di)
+{
+ di->events.safety_timer_expired = false;
+ del_timer(&di->safety_timer);
+}
+
+/**
+ * abx500_chargalg_start_maintenance_timer() - Start charging maintenance timer
+ * @di: pointer to the abx500_chargalg structure
+ * @duration: duration of ther maintenance timer in hours
+ *
+ * The maintenance timer is used to maintain the charge in the battery once
+ * the battery is considered full. These timers are chosen to match the
+ * discharge curve of the battery
+ */
+static void abx500_chargalg_start_maintenance_timer(struct abx500_chargalg *di,
+ int duration)
+{
+ unsigned long timer_expiration;
+
+ /* Convert from hours to jiffies */
+ timer_expiration = round_jiffies(jiffies + (duration * 3600 * HZ));
+
+ di->events.maintenance_timer_expired = false;
+ di->maintenance_timer.expires = timer_expiration;
+ if (!timer_pending(&di->maintenance_timer))
+ add_timer(&di->maintenance_timer);
+ else
+ mod_timer(&di->maintenance_timer, timer_expiration);
+}
+
+/**
+ * abx500_chargalg_stop_maintenance_timer() - Stop maintenance timer
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The maintenance timer is stopped whenever maintenance ends or when another
+ * state is entered
+ */
+static void abx500_chargalg_stop_maintenance_timer(struct abx500_chargalg *di)
+{
+ di->events.maintenance_timer_expired = false;
+ del_timer(&di->maintenance_timer);
+}
+
+/**
+ * abx500_chargalg_kick_watchdog() - Kick charger watchdog
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The charger watchdog have to be kicked periodically whenever the charger is
+ * on, else the ABB will reset the system
+ */
+static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
+{
+ /* Check if charger exists and kick watchdog if charging */
+ if (di->ac_chg && di->ac_chg->ops.kick_wd &&
+ di->chg_info.online_chg & AC_CHG)
+ return di->ac_chg->ops.kick_wd(di->ac_chg);
+ else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
+ di->chg_info.online_chg & USB_CHG)
+ return di->usb_chg->ops.kick_wd(di->usb_chg);
+
+ return -ENXIO;
+}
+
+/**
+ * abx500_chargalg_ac_en() - Turn on/off the AC charger
+ * @di: pointer to the abx500_chargalg structure
+ * @enable: charger on/off
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * The AC charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int abx500_chargalg_ac_en(struct abx500_chargalg *di, int enable,
+ int vset, int iset)
+{
+ if (!di->ac_chg || !di->ac_chg->ops.enable)
+ return -ENXIO;
+
+ /* Select maximum of what both the charger and the battery supports */
+ if (di->ac_chg->max_out_volt)
+ vset = min(vset, di->ac_chg->max_out_volt);
+ if (di->ac_chg->max_out_curr)
+ iset = min(iset, di->ac_chg->max_out_curr);
+
+ di->chg_info.ac_iset = iset;
+ di->chg_info.ac_vset = vset;
+
+ return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
+}
+
+/**
+ * abx500_chargalg_usb_en() - Turn on/off the USB charger
+ * @di: pointer to the abx500_chargalg structure
+ * @enable: charger on/off
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * The USB charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int abx500_chargalg_usb_en(struct abx500_chargalg *di, int enable,
+ int vset, int iset)
+{
+ if (!di->usb_chg || !di->usb_chg->ops.enable)
+ return -ENXIO;
+
+ /* Select maximum of what both the charger and the battery supports */
+ if (di->usb_chg->max_out_volt)
+ vset = min(vset, di->usb_chg->max_out_volt);
+ if (di->usb_chg->max_out_curr)
+ iset = min(iset, di->usb_chg->max_out_curr);
+
+ di->chg_info.usb_iset = iset;
+ di->chg_info.usb_vset = vset;
+
+ return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
+}
+
+/**
+ * abx500_chargalg_update_chg_curr() - Update charger current
+ * @di: pointer to the abx500_chargalg structure
+ * @iset: requested charger output current
+ *
+ * The charger output current will be updated for the charger
+ * that is currently in use
+ */
+static int abx500_chargalg_update_chg_curr(struct abx500_chargalg *di,
+ int iset)
+{
+ /* Check if charger exists and update current if charging */
+ if (di->ac_chg && di->ac_chg->ops.update_curr &&
+ di->chg_info.charger_type & AC_CHG) {
+ /*
+ * Select maximum of what both the charger
+ * and the battery supports
+ */
+ if (di->ac_chg->max_out_curr)
+ iset = min(iset, di->ac_chg->max_out_curr);
+
+ di->chg_info.ac_iset = iset;
+
+ return di->ac_chg->ops.update_curr(di->ac_chg, iset);
+ } else if (di->usb_chg && di->usb_chg->ops.update_curr &&
+ di->chg_info.charger_type & USB_CHG) {
+ /*
+ * Select maximum of what both the charger
+ * and the battery supports
+ */
+ if (di->usb_chg->max_out_curr)
+ iset = min(iset, di->usb_chg->max_out_curr);
+
+ di->chg_info.usb_iset = iset;
+
+ return di->usb_chg->ops.update_curr(di->usb_chg, iset);
+ }
+
+ return -ENXIO;
+}
+
+/**
+ * abx500_chargalg_stop_charging() - Stop charging
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This function is called from any state where charging should be stopped.
+ * All charging is disabled and all status parameters and timers are changed
+ * accordingly
+ */
+static void abx500_chargalg_stop_charging(struct abx500_chargalg *di)
+{
+ abx500_chargalg_ac_en(di, false, 0, 0);
+ abx500_chargalg_usb_en(di, false, 0, 0);
+ abx500_chargalg_stop_safety_timer(di);
+ abx500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ di->maintenance_chg = false;
+ cancel_delayed_work(&di->chargalg_wd_work);
+ power_supply_changed(&di->chargalg_psy);
+}
+
+/**
+ * abx500_chargalg_hold_charging() - Pauses charging
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This function is called in the case where maintenance charging has been
+ * disabled and instead a battery voltage mode is entered to check when the
+ * battery voltage has reached a certain recharge voltage
+ */
+static void abx500_chargalg_hold_charging(struct abx500_chargalg *di)
+{
+ abx500_chargalg_ac_en(di, false, 0, 0);
+ abx500_chargalg_usb_en(di, false, 0, 0);
+ abx500_chargalg_stop_safety_timer(di);
+ abx500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ di->maintenance_chg = false;
+ cancel_delayed_work(&di->chargalg_wd_work);
+ power_supply_changed(&di->chargalg_psy);
+}
+
+/**
+ * abx500_chargalg_start_charging() - Start the charger
+ * @di: pointer to the abx500_chargalg structure
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * A charger will be enabled depending on the requested charger type that was
+ * detected previously.
+ */
+static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
+ int vset, int iset)
+{
+ switch (di->chg_info.charger_type) {
+ case AC_CHG:
+ dev_dbg(di->dev,
+ "AC parameters: Vset %d, Ich %d\n", vset, iset);
+ abx500_chargalg_usb_en(di, false, 0, 0);
+ abx500_chargalg_ac_en(di, true, vset, iset);
+ break;
+
+ case USB_CHG:
+ dev_dbg(di->dev,
+ "USB parameters: Vset %d, Ich %d\n", vset, iset);
+ abx500_chargalg_ac_en(di, false, 0, 0);
+ abx500_chargalg_usb_en(di, true, vset, iset);
+ break;
+
+ default:
+ dev_err(di->dev, "Unknown charger to charge from\n");
+ break;
+ }
+}
+
+/**
+ * abx500_chargalg_check_temp() - Check battery temperature ranges
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The battery temperature is checked against the predefined limits and the
+ * charge state is changed accordingly
+ */
+static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
+{
+ if (di->batt_data.temp > (di->bat->temp_low + di->t_hyst_norm) &&
+ di->batt_data.temp < (di->bat->temp_high - di->t_hyst_norm)) {
+ /* Temp OK! */
+ di->events.btemp_underover = false;
+ di->events.btemp_lowhigh = false;
+ di->t_hyst_norm = 0;
+ di->t_hyst_lowhigh = 0;
+ } else {
+ if (((di->batt_data.temp >= di->bat->temp_high) &&
+ (di->batt_data.temp <
+ (di->bat->temp_over - di->t_hyst_lowhigh))) ||
+ ((di->batt_data.temp >
+ (di->bat->temp_under + di->t_hyst_lowhigh)) &&
+ (di->batt_data.temp <= di->bat->temp_low))) {
+ /* TEMP minor!!!!! */
+ di->events.btemp_underover = false;
+ di->events.btemp_lowhigh = true;
+ di->t_hyst_norm = di->bat->temp_hysteresis;
+ di->t_hyst_lowhigh = 0;
+ } else if (di->batt_data.temp <= di->bat->temp_under ||
+ di->batt_data.temp >= di->bat->temp_over) {
+ /* TEMP major!!!!! */
+ di->events.btemp_underover = true;
+ di->events.btemp_lowhigh = false;
+ di->t_hyst_norm = 0;
+ di->t_hyst_lowhigh = di->bat->temp_hysteresis;
+ } else {
+ /* Within hysteresis */
+ dev_dbg(di->dev, "Within hysteresis limit temp: %d "
+ "hyst_lowhigh %d, hyst normal %d\n",
+ di->batt_data.temp, di->t_hyst_lowhigh,
+ di->t_hyst_norm);
+ }
+ }
+}
+
+/**
+ * abx500_chargalg_check_charger_voltage() - Check charger voltage
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * Charger voltage is checked against maximum limit
+ */
+static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di)
+{
+ if (di->chg_info.usb_volt > di->bat->chg_params->usb_volt_max)
+ di->chg_info.usb_chg_ok = false;
+ else
+ di->chg_info.usb_chg_ok = true;
+
+ if (di->chg_info.ac_volt > di->bat->chg_params->ac_volt_max)
+ di->chg_info.ac_chg_ok = false;
+ else
+ di->chg_info.ac_chg_ok = true;
+
+}
+
+/**
+ * abx500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * End-of-charge criteria is fulfilled when the battery voltage is above a
+ * certain limit and the battery current is below a certain limit for a
+ * predefined number of consecutive seconds. If true, the battery is full
+ */
+static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
+{
+ if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+ di->charge_state == STATE_NORMAL &&
+ !di->maintenance_chg && (di->batt_data.volt >=
+ di->bat->bat_type[di->bat->batt_id].termination_vol ||
+ di->events.usb_cv_active || di->events.ac_cv_active) &&
+ di->batt_data.avg_curr <
+ di->bat->bat_type[di->bat->batt_id].termination_curr &&
+ di->batt_data.avg_curr > 0) {
+ if (++di->eoc_cnt >= EOC_COND_CNT) {
+ di->eoc_cnt = 0;
+ di->charge_status = POWER_SUPPLY_STATUS_FULL;
+ di->maintenance_chg = true;
+ dev_dbg(di->dev, "EOC reached!\n");
+ power_supply_changed(&di->chargalg_psy);
+ } else {
+ dev_dbg(di->dev,
+ " EOC limit reached for the %d"
+ " time, out of %d before EOC\n",
+ di->eoc_cnt,
+ EOC_COND_CNT);
+ }
+ } else {
+ di->eoc_cnt = 0;
+ }
+}
+
+static void init_maxim_chg_curr(struct abx500_chargalg *di)
+{
+ di->ccm.original_iset =
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+ di->ccm.current_iset =
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+ di->ccm.test_delta_i = di->bat->maxi->charger_curr_step;
+ di->ccm.max_current = di->bat->maxi->chg_curr;
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.level = 0;
+}
+
+/**
+ * abx500_chargalg_chg_curr_maxim - increases the charger current to
+ * compensate for the system load
+ * @di pointer to the abx500_chargalg structure
+ *
+ * This maximization function is used to raise the charger current to get the
+ * battery current as close to the optimal value as possible. The battery
+ * current during charging is affected by the system load
+ */
+static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
+{
+ int delta_i;
+
+ if (!di->bat->maxi->ena_maxi)
+ return MAXIM_RET_NOACTION;
+
+ delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
+
+ if (di->events.vbus_collapsed) {
+ dev_dbg(di->dev, "Charger voltage has collapsed %d\n",
+ di->ccm.wait_cnt);
+ if (di->ccm.wait_cnt == 0) {
+ dev_dbg(di->dev, "lowering current\n");
+ di->ccm.wait_cnt++;
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.max_current =
+ di->ccm.current_iset - di->ccm.test_delta_i;
+ di->ccm.current_iset = di->ccm.max_current;
+ di->ccm.level--;
+ return MAXIM_RET_CHANGE;
+ } else {
+ dev_dbg(di->dev, "waiting\n");
+ /* Let's go in here twice before lowering curr again */
+ di->ccm.wait_cnt = (di->ccm.wait_cnt + 1) % 3;
+ return MAXIM_RET_NOACTION;
+ }
+ }
+
+ di->ccm.wait_cnt = 0;
+
+ if ((di->batt_data.inst_curr > di->ccm.original_iset)) {
+ dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
+ " (limit %dmA) (current iset: %dmA)!\n",
+ di->batt_data.inst_curr, di->ccm.original_iset,
+ di->ccm.current_iset);
+
+ if (di->ccm.current_iset == di->ccm.original_iset)
+ return MAXIM_RET_NOACTION;
+
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.current_iset = di->ccm.original_iset;
+ di->ccm.level = 0;
+
+ return MAXIM_RET_IBAT_TOO_HIGH;
+ }
+
+ if (delta_i > di->ccm.test_delta_i &&
+ (di->ccm.current_iset + di->ccm.test_delta_i) <
+ di->ccm.max_current) {
+ if (di->ccm.condition_cnt-- == 0) {
+ /* Increse the iset with cco.test_delta_i */
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.current_iset += di->ccm.test_delta_i;
+ di->ccm.level++;
+ dev_dbg(di->dev, " Maximization needed, increase"
+ " with %d mA to %dmA (Optimal ibat: %d)"
+ " Level %d\n",
+ di->ccm.test_delta_i,
+ di->ccm.current_iset,
+ di->ccm.original_iset,
+ di->ccm.level);
+ return MAXIM_RET_CHANGE;
+ } else {
+ return MAXIM_RET_NOACTION;
+ }
+ } else {
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ return MAXIM_RET_NOACTION;
+ }
+}
+
+static void handle_maxim_chg_curr(struct abx500_chargalg *di)
+{
+ enum maxim_ret ret;
+ int result;
+
+ ret = abx500_chargalg_chg_curr_maxim(di);
+ switch (ret) {
+ case MAXIM_RET_CHANGE:
+ result = abx500_chargalg_update_chg_curr(di,
+ di->ccm.current_iset);
+ if (result)
+ dev_err(di->dev, "failed to set chg curr\n");
+ break;
+ case MAXIM_RET_IBAT_TOO_HIGH:
+ result = abx500_chargalg_update_chg_curr(di,
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+ if (result)
+ dev_err(di->dev, "failed to set chg curr\n");
+ break;
+
+ case MAXIM_RET_NOACTION:
+ default:
+ /* Do nothing..*/
+ break;
+ }
+}
+
+static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct abx500_chargalg *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_abx500_chargalg_device_info(psy);
+ /* For all psy where the driver name appears in any supplied_to */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ /* Initialize chargers if not already done */
+ if (!di->ac_chg &&
+ ext->type == POWER_SUPPLY_TYPE_MAINS)
+ di->ac_chg = psy_to_ux500_charger(ext);
+ else if (!di->usb_chg &&
+ ext->type == POWER_SUPPLY_TYPE_USB)
+ di->usb_chg = psy_to_ux500_charger(ext);
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ /* Battery present */
+ if (ret.intval)
+ di->events.batt_rem = false;
+ /* Battery removed */
+ else
+ di->events.batt_rem = true;
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC disconnected */
+ if (!ret.intval &&
+ (di->chg_info.conn_chg & AC_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg &= ~AC_CHG;
+ }
+ /* AC connected */
+ else if (ret.intval &&
+ !(di->chg_info.conn_chg & AC_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg |= AC_CHG;
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB disconnected */
+ if (!ret.intval &&
+ (di->chg_info.conn_chg & USB_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg &= ~USB_CHG;
+ }
+ /* USB connected */
+ else if (ret.intval &&
+ !(di->chg_info.conn_chg & USB_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg |= USB_CHG;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_ONLINE:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC offline */
+ if (!ret.intval &&
+ (di->chg_info.online_chg & AC_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg &= ~AC_CHG;
+ }
+ /* AC online */
+ else if (ret.intval &&
+ !(di->chg_info.online_chg & AC_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg |= AC_CHG;
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, 0);
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB offline */
+ if (!ret.intval &&
+ (di->chg_info.online_chg & USB_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg &= ~USB_CHG;
+ }
+ /* USB online */
+ else if (ret.intval &&
+ !(di->chg_info.online_chg & USB_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg |= USB_CHG;
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ switch (ret.intval) {
+ case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+ di->events.mainextchnotok = true;
+ di->events.main_thermal_prot = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_DEAD:
+ di->events.ac_wd_expired = true;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.main_thermal_prot = false;
+ break;
+ case POWER_SUPPLY_HEALTH_COLD:
+ case POWER_SUPPLY_HEALTH_OVERHEAT:
+ di->events.main_thermal_prot = true;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+ di->events.main_ovv = true;
+ di->events.mainextchnotok = false;
+ di->events.main_thermal_prot = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_GOOD:
+ di->events.main_thermal_prot = false;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_TYPE_USB:
+ switch (ret.intval) {
+ case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+ di->events.usbchargernotok = true;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_DEAD:
+ di->events.usb_wd_expired = true;
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ break;
+ case POWER_SUPPLY_HEALTH_COLD:
+ case POWER_SUPPLY_HEALTH_OVERHEAT:
+ di->events.usb_thermal_prot = true;
+ di->events.usbchargernotok = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+ di->events.vbus_ovv = true;
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_GOOD:
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.volt = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ di->chg_info.ac_volt = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ di->chg_info.usb_volt = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AVG is used to indicate when we are
+ * in CV mode */
+ if (ret.intval)
+ di->events.ac_cv_active = true;
+ else
+ di->events.ac_cv_active = false;
+
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* AVG is used to indicate when we are
+ * in CV mode */
+ if (ret.intval)
+ di->events.usb_cv_active = true;
+ else
+ di->events.usb_cv_active = false;
+
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (ret.intval)
+ di->events.batt_unknown = false;
+ else
+ di->events.batt_unknown = true;
+
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_TEMP:
+ di->batt_data.temp = ret.intval / 10;
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ di->chg_info.ac_curr =
+ ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ di->chg_info.usb_curr =
+ ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.inst_curr = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.avg_curr = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ if (ret.intval)
+ di->events.vbus_collapsed = true;
+ else
+ di->events.vbus_collapsed = false;
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ di->batt_data.percent = ret.intval;
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * abx500_chargalg_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void abx500_chargalg_external_power_changed(struct power_supply *psy)
+{
+ struct abx500_chargalg *di = to_abx500_chargalg_device_info(psy);
+
+ /*
+ * Trigger execution of the algorithm instantly and read
+ * all power_supply properties there instead
+ */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_algorithm() - Main function for the algorithm
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This is the main control function for the charging algorithm.
+ * It is called periodically or when something happens that will
+ * trigger a state change
+ */
+static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
+{
+ int charger_status;
+
+ /* Collect data from all power_supply class devices */
+ class_for_each_device(power_supply_class, NULL,
+ &di->chargalg_psy, abx500_chargalg_get_ext_psy_data);
+
+ abx500_chargalg_end_of_charge(di);
+ abx500_chargalg_check_temp(di);
+ abx500_chargalg_check_charger_voltage(di);
+
+ charger_status = abx500_chargalg_check_charger_connection(di);
+ /*
+ * First check if we have a charger connected.
+ * Also we don't allow charging of unknown batteries if configured
+ * this way
+ */
+ if (!charger_status ||
+ (di->events.batt_unknown && !di->bat->chg_unknown_bat)) {
+ if (di->charge_state != STATE_HANDHELD) {
+ di->events.safety_timer_expired = false;
+ abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+ }
+ }
+
+ /* If suspended, we should not continue checking the flags */
+ else if (di->charge_state == STATE_SUSPENDED_INIT ||
+ di->charge_state == STATE_SUSPENDED) {
+ /* We don't do anything here, just don,t continue */
+ }
+
+ /* Safety timer expiration */
+ else if (di->events.safety_timer_expired) {
+ if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED)
+ abx500_chargalg_state_to(di,
+ STATE_SAFETY_TIMER_EXPIRED_INIT);
+ }
+ /*
+ * Check if any interrupts has occured
+ * that will prevent us from charging
+ */
+
+ /* Battery removed */
+ else if (di->events.batt_rem) {
+ if (di->charge_state != STATE_BATT_REMOVED)
+ abx500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
+ }
+ /* Main or USB charger not ok. */
+ else if (di->events.mainextchnotok || di->events.usbchargernotok) {
+ /*
+ * If vbus_collapsed is set, we have to lower the charger
+ * current, which is done in the normal state below
+ */
+ if (di->charge_state != STATE_CHG_NOT_OK &&
+ !di->events.vbus_collapsed)
+ abx500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
+ }
+ /* VBUS, Main or VBAT OVV. */
+ else if (di->events.vbus_ovv ||
+ di->events.main_ovv ||
+ di->events.batt_ovv ||
+ !di->chg_info.usb_chg_ok ||
+ !di->chg_info.ac_chg_ok) {
+ if (di->charge_state != STATE_OVV_PROTECT)
+ abx500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
+ }
+ /* USB Thermal, stop charging */
+ else if (di->events.main_thermal_prot ||
+ di->events.usb_thermal_prot) {
+ if (di->charge_state != STATE_HW_TEMP_PROTECT)
+ abx500_chargalg_state_to(di,
+ STATE_HW_TEMP_PROTECT_INIT);
+ }
+ /* Battery temp over/under */
+ else if (di->events.btemp_underover) {
+ if (di->charge_state != STATE_TEMP_UNDEROVER)
+ abx500_chargalg_state_to(di,
+ STATE_TEMP_UNDEROVER_INIT);
+ }
+ /* Watchdog expired */
+ else if (di->events.ac_wd_expired ||
+ di->events.usb_wd_expired) {
+ if (di->charge_state != STATE_WD_EXPIRED)
+ abx500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
+ }
+ /* Battery temp high/low */
+ else if (di->events.btemp_lowhigh) {
+ if (di->charge_state != STATE_TEMP_LOWHIGH)
+ abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
+ }
+
+ dev_dbg(di->dev,
+ "[CHARGALG] Vb %d Ib_avg %d Ib_inst %d Tb %d Cap %d Maint %d "
+ "State %s Active_chg %d Chg_status %d AC %d USB %d "
+ "AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d "
+ "USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n",
+ di->batt_data.volt,
+ di->batt_data.avg_curr,
+ di->batt_data.inst_curr,
+ di->batt_data.temp,
+ di->batt_data.percent,
+ di->maintenance_chg,
+ states[di->charge_state],
+ di->chg_info.charger_type,
+ di->charge_status,
+ di->chg_info.conn_chg & AC_CHG,
+ di->chg_info.conn_chg & USB_CHG,
+ di->chg_info.online_chg & AC_CHG,
+ di->chg_info.online_chg & USB_CHG,
+ di->events.ac_cv_active,
+ di->events.usb_cv_active,
+ di->chg_info.ac_curr,
+ di->chg_info.usb_curr,
+ di->chg_info.ac_vset,
+ di->chg_info.ac_iset,
+ di->chg_info.usb_vset,
+ di->chg_info.usb_iset);
+
+ switch (di->charge_state) {
+ case STATE_HANDHELD_INIT:
+ abx500_chargalg_stop_charging(di);
+ di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ abx500_chargalg_state_to(di, STATE_HANDHELD);
+ /* Intentional fallthrough */
+
+ case STATE_HANDHELD:
+ break;
+
+ case STATE_SUSPENDED_INIT:
+ if (di->susp_status.ac_suspended)
+ abx500_chargalg_ac_en(di, false, 0, 0);
+ if (di->susp_status.usb_suspended)
+ abx500_chargalg_usb_en(di, false, 0, 0);
+ abx500_chargalg_stop_safety_timer(di);
+ abx500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ di->maintenance_chg = false;
+ abx500_chargalg_state_to(di, STATE_SUSPENDED);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough */
+
+ case STATE_SUSPENDED:
+ /* CHARGING is suspended */
+ break;
+
+ case STATE_BATT_REMOVED_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_BATT_REMOVED);
+ /* Intentional fallthrough */
+
+ case STATE_BATT_REMOVED:
+ if (!di->events.batt_rem)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_HW_TEMP_PROTECT_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
+ /* Intentional fallthrough */
+
+ case STATE_HW_TEMP_PROTECT:
+ if (!di->events.main_thermal_prot &&
+ !di->events.usb_thermal_prot)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_OVV_PROTECT_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_OVV_PROTECT);
+ /* Intentional fallthrough */
+
+ case STATE_OVV_PROTECT:
+ if (!di->events.vbus_ovv &&
+ !di->events.main_ovv &&
+ !di->events.batt_ovv &&
+ di->chg_info.usb_chg_ok &&
+ di->chg_info.ac_chg_ok)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_CHG_NOT_OK_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_CHG_NOT_OK);
+ /* Intentional fallthrough */
+
+ case STATE_CHG_NOT_OK:
+ if (!di->events.mainextchnotok &&
+ !di->events.usbchargernotok)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_SAFETY_TIMER_EXPIRED_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
+ /* Intentional fallthrough */
+
+ case STATE_SAFETY_TIMER_EXPIRED:
+ /* We exit this state when charger is removed */
+ break;
+
+ case STATE_NORMAL_INIT:
+ abx500_chargalg_start_charging(di,
+ di->bat->bat_type[di->bat->batt_id].normal_vol_lvl,
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+ abx500_chargalg_state_to(di, STATE_NORMAL);
+ abx500_chargalg_start_safety_timer(di);
+ abx500_chargalg_stop_maintenance_timer(di);
+ init_maxim_chg_curr(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ di->eoc_cnt = 0;
+ di->maintenance_chg = false;
+ power_supply_changed(&di->chargalg_psy);
+
+ break;
+
+ case STATE_NORMAL:
+ handle_maxim_chg_curr(di);
+ if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
+ di->maintenance_chg) {
+ if (di->bat->no_maintenance)
+ abx500_chargalg_state_to(di,
+ STATE_WAIT_FOR_RECHARGE_INIT);
+ else
+ abx500_chargalg_state_to(di,
+ STATE_MAINTENANCE_A_INIT);
+ }
+ break;
+
+ /* This state will be used when the maintenance state is disabled */
+ case STATE_WAIT_FOR_RECHARGE_INIT:
+ abx500_chargalg_hold_charging(di);
+ abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
+ di->rch_cnt = RCH_COND_CNT;
+ /* Intentional fallthrough */
+
+ case STATE_WAIT_FOR_RECHARGE:
+ if (di->batt_data.volt <=
+ di->bat->bat_type[di->bat->batt_id].recharge_vol) {
+ if (di->rch_cnt-- == 0)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ } else
+ di->rch_cnt = RCH_COND_CNT;
+ break;
+
+ case STATE_MAINTENANCE_A_INIT:
+ abx500_chargalg_stop_safety_timer(di);
+ abx500_chargalg_start_maintenance_timer(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_chg_timer_h);
+ abx500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_cur_lvl);
+ abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough*/
+
+ case STATE_MAINTENANCE_A:
+ if (di->events.maintenance_timer_expired) {
+ abx500_chargalg_stop_maintenance_timer(di);
+ abx500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
+ }
+ break;
+
+ case STATE_MAINTENANCE_B_INIT:
+ abx500_chargalg_start_maintenance_timer(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_chg_timer_h);
+ abx500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_cur_lvl);
+ abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough*/
+
+ case STATE_MAINTENANCE_B:
+ if (di->events.maintenance_timer_expired) {
+ abx500_chargalg_stop_maintenance_timer(di);
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ }
+ break;
+
+ case STATE_TEMP_LOWHIGH_INIT:
+ abx500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].low_high_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].low_high_cur_lvl);
+ abx500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough */
+
+ case STATE_TEMP_LOWHIGH:
+ if (!di->events.btemp_lowhigh)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_WD_EXPIRED_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_WD_EXPIRED);
+ /* Intentional fallthrough */
+
+ case STATE_WD_EXPIRED:
+ if (!di->events.ac_wd_expired &&
+ !di->events.usb_wd_expired)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_TEMP_UNDEROVER_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
+ /* Intentional fallthrough */
+
+ case STATE_TEMP_UNDEROVER:
+ if (!di->events.btemp_underover)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+ }
+
+ /* Start charging directly if the new state is a charge state */
+ if (di->charge_state == STATE_NORMAL_INIT ||
+ di->charge_state == STATE_MAINTENANCE_A_INIT ||
+ di->charge_state == STATE_MAINTENANCE_B_INIT)
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_periodic_work() - Periodic work for the algorithm
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for the charging algorithm
+ */
+static void abx500_chargalg_periodic_work(struct work_struct *work)
+{
+ struct abx500_chargalg *di = container_of(work,
+ struct abx500_chargalg, chargalg_periodic_work.work);
+
+ abx500_chargalg_algorithm(di);
+
+ /*
+ * If a charger is connected then the battery has to be monitored
+ * frequently, else the work can be delayed.
+ */
+ if (di->chg_info.conn_chg)
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_periodic_work,
+ di->bat->interval_charging * HZ);
+ else
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_periodic_work,
+ di->bat->interval_not_charging * HZ);
+}
+
+/**
+ * abx500_chargalg_wd_work() - periodic work to kick the charger watchdog
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for kicking the charger watchdog
+ */
+static void abx500_chargalg_wd_work(struct work_struct *work)
+{
+ int ret;
+ struct abx500_chargalg *di = container_of(work,
+ struct abx500_chargalg, chargalg_wd_work.work);
+
+ dev_dbg(di->dev, "abx500_chargalg_wd_work\n");
+
+ ret = abx500_chargalg_kick_watchdog(di);
+ if (ret < 0)
+ dev_err(di->dev, "failed to kick watchdog\n");
+
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, CHG_WD_INTERVAL);
+}
+
+/**
+ * abx500_chargalg_work() - Work to run the charging algorithm instantly
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for calling the charging algorithm
+ */
+static void abx500_chargalg_work(struct work_struct *work)
+{
+ struct abx500_chargalg *di = container_of(work,
+ struct abx500_chargalg, chargalg_work);
+
+ abx500_chargalg_algorithm(di);
+}
+
+/**
+ * abx500_chargalg_get_property() - get the chargalg properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * chargalg properties by reading the sysfs files.
+ * status: charging/discharging/full/unknown
+ * health: health of the battery
+ * Returns error code in case of failure else 0 on success
+ */
+static int abx500_chargalg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct abx500_chargalg *di;
+
+ di = to_abx500_chargalg_device_info(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = di->charge_status;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->events.batt_ovv) {
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else if (di->events.btemp_underover) {
+ if (di->batt_data.temp <= di->bat->temp_under)
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ } else {
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Exposure to the sysfs interface */
+
+/**
+ * abx500_chargalg_sysfs_charger() - sysfs store operations
+ * @kobj: pointer to the struct kobject
+ * @attr: pointer to the struct attribute
+ * @buf: buffer that holds the parameter passed from userspace
+ * @length: length of the parameter passed
+ *
+ * Returns length of the buffer(input taken from user space) on success
+ * else error code on failure
+ * The operation to be performed on passing the parameters from the user space.
+ */
+static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t length)
+{
+ struct abx500_chargalg *di = container_of(kobj,
+ struct abx500_chargalg, chargalg_kobject);
+ long int param;
+ int ac_usb;
+ int ret;
+ char entry = *attr->name;
+
+ switch (entry) {
+ case 'c':
+ ret = strict_strtol(buf, 10, &param);
+ if (ret < 0)
+ return ret;
+
+ ac_usb = param;
+ switch (ac_usb) {
+ case 0:
+ /* Disable charging */
+ di->susp_status.ac_suspended = true;
+ di->susp_status.usb_suspended = true;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ case 1:
+ /* Enable AC Charging */
+ di->susp_status.ac_suspended = false;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ case 2:
+ /* Enable USB charging */
+ di->susp_status.usb_suspended = false;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ default:
+ dev_info(di->dev, "Wrong input\n"
+ "Enter 0. Disable AC/USB Charging\n"
+ "1. Enable AC charging\n"
+ "2. Enable USB Charging\n");
+ };
+ break;
+ };
+ return strlen(buf);
+}
+
+static struct attribute abx500_chargalg_en_charger = \
+{
+ .name = "chargalg",
+ .mode = S_IWUGO,
+};
+
+static struct attribute *abx500_chargalg_chg[] = {
+ &abx500_chargalg_en_charger,
+ NULL
+};
+
+const struct sysfs_ops abx500_chargalg_sysfs_ops = {
+ .store = abx500_chargalg_sysfs_charger,
+};
+
+static struct kobj_type abx500_chargalg_ktype = {
+ .sysfs_ops = &abx500_chargalg_sysfs_ops,
+ .default_attrs = abx500_chargalg_chg,
+};
+
+/**
+ * abx500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di: pointer to the struct abx500_chargalg
+ *
+ * This function removes the entry in sysfs.
+ */
+static void abx500_chargalg_sysfs_exit(struct abx500_chargalg *di)
+{
+ kobject_del(&di->chargalg_kobject);
+}
+
+/**
+ * abx500_chargalg_sysfs_init() - init of sysfs entry
+ * @di: pointer to the struct abx500_chargalg
+ *
+ * This function adds an entry in sysfs.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int abx500_chargalg_sysfs_init(struct abx500_chargalg *di)
+{
+ int ret = 0;
+
+ ret = kobject_init_and_add(&di->chargalg_kobject,
+ &abx500_chargalg_ktype,
+ NULL, "abx500_chargalg");
+ if (ret < 0)
+ dev_err(di->dev, "failed to create sysfs entry\n");
+
+ return ret;
+}
+/* Exposure to the sysfs interface <<END>> */
+
+#if defined(CONFIG_PM)
+static int abx500_chargalg_resume(struct platform_device *pdev)
+{
+ struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+ /* Kick charger watchdog if charging (any charger online) */
+ if (di->chg_info.online_chg)
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
+
+ /*
+ * Run the charging algorithm directly to be sure we don't
+ * do it too seldom
+ */
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+ return 0;
+}
+
+static int abx500_chargalg_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+ if (di->chg_info.online_chg)
+ cancel_delayed_work_sync(&di->chargalg_wd_work);
+
+ cancel_delayed_work_sync(&di->chargalg_periodic_work);
+
+ return 0;
+}
+#else
+#define abx500_chargalg_suspend NULL
+#define abx500_chargalg_resume NULL
+#endif
+
+static int __devexit abx500_chargalg_remove(struct platform_device *pdev)
+{
+ struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+ /* sysfs interface to enable/disbale charging from user space */
+ abx500_chargalg_sysfs_exit(di);
+
+ /* Delete the work queue */
+ destroy_workqueue(di->chargalg_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->chargalg_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit abx500_chargalg_probe(struct platform_device *pdev)
+{
+ struct abx500_bm_plat_data *plat_data;
+ int ret = 0;
+
+ struct abx500_chargalg *di =
+ kzalloc(sizeof(struct abx500_chargalg), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get device struct */
+ di->dev = &pdev->dev;
+
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->chargalg;
+ di->bat = plat_data->battery;
+
+ /* chargalg supply */
+ di->chargalg_psy.name = "abx500_chargalg";
+ di->chargalg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->chargalg_psy.properties = abx500_chargalg_props;
+ di->chargalg_psy.num_properties = ARRAY_SIZE(abx500_chargalg_props);
+ di->chargalg_psy.get_property = abx500_chargalg_get_property;
+ di->chargalg_psy.supplied_to = di->pdata->supplied_to;
+ di->chargalg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->chargalg_psy.external_power_changed =
+ abx500_chargalg_external_power_changed;
+
+ /* Initilialize safety timer */
+ init_timer(&di->safety_timer);
+ di->safety_timer.function = abx500_chargalg_safety_timer_expired;
+ di->safety_timer.data = (unsigned long) di;
+
+ /* Initilialize maintenance timer */
+ init_timer(&di->maintenance_timer);
+ di->maintenance_timer.function =
+ abx500_chargalg_maintenance_timer_expired;
+ di->maintenance_timer.data = (unsigned long) di;
+
+ /* Create a work queue for the chargalg */
+ di->chargalg_wq =
+ create_singlethread_workqueue("abx500_chargalg_wq");
+ if (di->chargalg_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for chargalg */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_periodic_work,
+ abx500_chargalg_periodic_work);
+ INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_wd_work,
+ abx500_chargalg_wd_work);
+
+ /* Init work for chargalg */
+ INIT_WORK(&di->chargalg_work, abx500_chargalg_work);
+
+ /* To detect charger at startup */
+ di->chg_info.prev_conn_chg = -1;
+
+ /* Register chargalg power supply class */
+ ret = power_supply_register(di->dev, &di->chargalg_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register chargalg psy\n");
+ goto free_chargalg_wq;
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ /* sysfs interface to enable/disable charging from user space */
+ ret = abx500_chargalg_sysfs_init(di);
+ if (ret) {
+ dev_err(di->dev, "failed to create sysfs entry\n");
+ goto free_psy;
+ }
+
+ /* Run the charging algorithm */
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+ dev_info(di->dev, "probe success\n");
+ return ret;
+
+free_psy:
+ power_supply_unregister(&di->chargalg_psy);
+free_chargalg_wq:
+ destroy_workqueue(di->chargalg_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver abx500_chargalg_driver = {
+ .probe = abx500_chargalg_probe,
+ .remove = __devexit_p(abx500_chargalg_remove),
+ .suspend = abx500_chargalg_suspend,
+ .resume = abx500_chargalg_resume,
+ .driver = {
+ .name = "abx500-chargalg",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init abx500_chargalg_init(void)
+{
+ return platform_driver_register(&abx500_chargalg_driver);
+}
+
+static void __exit abx500_chargalg_exit(void)
+{
+ platform_driver_unregister(&abx500_chargalg_driver);
+}
+
+module_init(abx500_chargalg_init);
+module_exit(abx500_chargalg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:abx500-chargalg");
+MODULE_DESCRIPTION("abx500 battery charging algorithm");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 9713b1b860c..8d78f7424f9 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -284,6 +284,13 @@ config REGULATOR_AD5398
This driver supports AD5398 and AD5821 current regulator chips.
If building into module, its name is ad5398.ko.
+config REGULATOR_AB5500
+ bool "ST-Ericsson AB5500 Power Regulators"
+ depends on AB5500_CORE
+ help
+ This driver supports the regulators found on the ST-Ericsson mixed
+ signal AB5500 PMIC
+
config REGULATOR_AB8500
bool "ST-Ericsson AB8500 Power Regulators"
depends on AB8500_CORE
@@ -291,13 +298,41 @@ config REGULATOR_AB8500
This driver supports the regulators found on the ST-Ericsson mixed
signal AB8500 PMIC
+config REGULATOR_AB8500_EXT
+ bool "ST-Ericsson AB8500 External Regulators"
+ depends on REGULATOR_AB8500
+ default y if REGULATOR_AB8500
+ help
+ This driver supports the external regulator controls found on the
+ ST-Ericsson mixed signal AB8500 PMIC
+
+config REGULATOR_DBX500_PRCMU
+ bool
+
+config REGULATOR_DB5500_PRCMU
+ bool "ST-Ericsson DB5500 Voltage Domain Regulators"
+ depends on MFD_DB5500_PRCMU
+ select REGULATOR_DBX500_PRCMU
+ help
+ This driver supports the voltage domain regulators controlled by the
+ DB5500 PRCMU
+
config REGULATOR_DB8500_PRCMU
bool "ST-Ericsson DB8500 Voltage Domain Regulators"
depends on MFD_DB8500_PRCMU
+ select REGULATOR_DBX500_PRCMU
help
This driver supports the voltage domain regulators controlled by the
DB8500 PRCMU
+config REGULATOR_AB8500_DEBUG
+ bool "AB8500 regulator debug"
+ depends on REGULATOR_AB8500
+ help
+ Say Y here to add debug functionality for ST-Ericsson
+ ab8500 regulators. This is a module that exposes a
+ number of settings and debug output in debugfs.
+
config REGULATOR_TPS6586X
tristate "TI TPS6586X Power regulators"
depends on MFD_TPS6586X
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 93a6318f532..e162da56b7a 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -42,9 +42,14 @@ obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
+obj-$(CONFIG_REGULATOR_AB5500) += ab5500.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
+obj-$(CONFIG_REGULATOR_AB8500_EXT) += ab8500-ext.o
+obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o
+obj-$(CONFIG_REGULATOR_DB5500_PRCMU) += db5500-prcmu.o
obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
+obj-$(CONFIG_REGULATOR_AB8500_DEBUG) += ab8500-debug.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/ab5500.c b/drivers/regulator/ab5500.c
new file mode 100644
index 00000000000..8c2c5ec9974
--- /dev/null
+++ b/drivers/regulator/ab5500.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Based on ab3100.c.
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/regulator/ab5500.h>
+
+#define AB5500_LDO_VDIGMIC_ST 0x50
+
+#define AB5500_LDO_G_ST 0x78
+#define AB5500_LDO_G_PWR1 0x79
+#define AB5500_LDO_G_PWR0 0x7a
+
+#define AB5500_LDO_H_ST 0x7b
+#define AB5500_LDO_H_PWR1 0x7c
+#define AB5500_LDO_H_PWR0 0x7d
+
+#define AB5500_LDO_K_ST 0x7e
+#define AB5500_LDO_K_PWR1 0x7f
+#define AB5500_LDO_K_PWR0 0x80
+
+#define AB5500_LDO_L_ST 0x81
+#define AB5500_LDO_L_PWR1 0x82
+#define AB5500_LDO_L_PWR0 0x83
+
+/* In SIM bank */
+#define AB5500_SIM_SUP 0x14
+
+#define AB5500_MBIAS2 0x01
+
+#define AB5500_LDO_MODE_MASK (0x3 << 4)
+#define AB5500_LDO_MODE_FULLPOWER (0x3 << 4)
+#define AB5500_LDO_MODE_PWRCTRL (0x2 << 4)
+#define AB5500_LDO_MODE_LOWPOWER (0x1 << 4)
+#define AB5500_LDO_MODE_OFF (0x0 << 4)
+#define AB5500_LDO_VOLT_MASK 0x07
+
+#define AB5500_MBIAS2_ENABLE (0x1 << 1)
+#define AB5500_MBIAS2_VOLT_MASK (0x1 << 2)
+#define AB5500_MBIAS2_MODE_MASK (0x1 << 1)
+
+struct ab5500_regulator {
+ struct regulator_desc desc;
+ const int *voltages;
+ int num_holes;
+ bool pwrctrl;
+ bool enabled;
+ int enable_time;
+ u8 bank;
+ u8 reg;
+ u8 mode;
+ u8 update_mask;
+ u8 update_val_idle;
+ u8 update_val_normal;
+ u8 voltage_mask;
+};
+
+struct ab5500_regulators {
+ struct device *dev;
+ struct ab5500_regulator *regulator[AB5500_NUM_REGULATORS];
+ struct regulator_dev *rdev[AB5500_NUM_REGULATORS];
+};
+
+static int ab5500_regulator_enable_time(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+
+ return r->enable_time; /* microseconds */
+}
+
+static int ab5500_regulator_enable(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ int ret;
+
+ ret = abx500_mask_and_set(ab5500->dev, r->bank, r->reg,
+ r->update_mask, r->mode);
+ if (ret < 0)
+ return ret;
+
+ r->enabled = true;
+
+ return 0;
+}
+
+static int ab5500_regulator_disable(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ u8 regval = r->pwrctrl ? AB5500_LDO_MODE_PWRCTRL : AB5500_LDO_MODE_OFF;
+ int ret;
+
+ ret = abx500_mask_and_set(ab5500->dev, r->bank, r->reg,
+ r->update_mask, regval);
+ if (ret < 0)
+ return ret;
+
+ r->enabled = false;
+
+ return 0;
+}
+
+static unsigned int ab5500_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+
+ if (r->mode == r->update_val_idle)
+ return REGULATOR_MODE_IDLE;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int ab5500_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ r->mode = r->update_val_normal;
+ break;
+ case REGULATOR_MODE_IDLE:
+ r->mode = r->update_val_idle;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (r->enabled)
+ return ab5500_regulator_enable(rdev);
+
+ return 0;
+}
+
+static int ab5500_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ u8 regval;
+ int err;
+
+ err = abx500_get_register_interruptible(ab5500->dev,
+ r->bank, r->reg, &regval);
+ if (err) {
+ dev_err(rdev_get_dev(rdev), "unable to get register 0x%x\n",
+ r->reg);
+ return err;
+ }
+
+ switch (regval & r->update_mask) {
+ case AB5500_LDO_MODE_PWRCTRL:
+ case AB5500_LDO_MODE_OFF:
+ r->enabled = false;
+ break;
+ default:
+ r->enabled = true;
+ break;
+ }
+
+ return r->enabled;
+}
+
+static int
+ab5500_regulator_list_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ unsigned n_voltages = r->desc.n_voltages;
+ int selindex;
+ int i;
+
+ for (i = 0, selindex = 0; selindex < n_voltages; i++) {
+ int voltage = r->voltages[i];
+
+ if (!voltage)
+ continue;
+
+ if (selindex == selector)
+ return voltage;
+
+ selindex++;
+ }
+
+ return -EINVAL;
+}
+
+static int ab5500_regulator_fixed_get_voltage(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+
+ return r->voltages[0];
+}
+
+static int ab5500_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ u8 regval;
+ int ret;
+
+ ret = abx500_get_register_interruptible(ab5500->dev,
+ r->bank, r->reg, &regval);
+ if (ret) {
+ dev_warn(rdev_get_dev(rdev),
+ "failed to get regulator value in register "
+ "%02x\n", r->reg);
+ return ret;
+ }
+
+ regval &= r->voltage_mask;
+ if (regval >= r->desc.n_voltages + r->num_holes)
+ return -EINVAL;
+
+ if (!r->voltages[regval])
+ return -EINVAL;
+
+ return r->voltages[regval];
+}
+
+static int ab5500_get_best_voltage_index(struct ab5500_regulator *r,
+ int min_uV, int max_uV)
+{
+ unsigned n_voltages = r->desc.n_voltages;
+ int bestmatch = INT_MAX;
+ int bestindex = -EINVAL;
+ int selindex;
+ int i;
+
+ /*
+ * Locate the minimum voltage fitting the criteria on
+ * this regulator. The switchable voltages are not
+ * in strict falling order so we need to check them
+ * all for the best match.
+ */
+ for (i = 0, selindex = 0; selindex < n_voltages; i++) {
+ int voltage = r->voltages[i];
+
+ if (!voltage)
+ continue;
+
+ if (voltage <= max_uV &&
+ voltage >= min_uV &&
+ voltage < bestmatch) {
+ bestmatch = voltage;
+ bestindex = i;
+ }
+
+ selindex++;
+ }
+
+ return bestindex;
+}
+
+static int ab5500_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ int bestindex;
+
+ bestindex = ab5500_get_best_voltage_index(r, min_uV, max_uV);
+ if (bestindex < 0) {
+ dev_warn(rdev_get_dev(rdev),
+ "requested %d<=x<=%d uV, out of range!\n",
+ min_uV, max_uV);
+ return bestindex;
+ }
+
+ *selector = bestindex;
+
+ return abx500_mask_and_set_register_interruptible(ab5500->dev,
+ r->bank, r->reg, r->voltage_mask, bestindex);
+
+}
+
+static struct regulator_ops ab5500_regulator_variable_ops = {
+ .enable = ab5500_regulator_enable,
+ .disable = ab5500_regulator_disable,
+ .is_enabled = ab5500_regulator_is_enabled,
+ .enable_time = ab5500_regulator_enable_time,
+ .get_voltage = ab5500_regulator_get_voltage,
+ .set_voltage = ab5500_regulator_set_voltage,
+ .list_voltage = ab5500_regulator_list_voltage,
+ .set_mode = ab5500_regulator_set_mode,
+ .get_mode = ab5500_regulator_get_mode,
+};
+
+static struct regulator_ops ab5500_regulator_fixed_ops = {
+ .enable = ab5500_regulator_enable,
+ .disable = ab5500_regulator_disable,
+ .is_enabled = ab5500_regulator_is_enabled,
+ .enable_time = ab5500_regulator_enable_time,
+ .get_voltage = ab5500_regulator_fixed_get_voltage,
+ .list_voltage = ab5500_regulator_list_voltage,
+ .set_mode = ab5500_regulator_set_mode,
+ .get_mode = ab5500_regulator_get_mode,
+};
+
+static const int ab5500_ldo_lg_voltages[] = {
+ [0x00] = 1200000,
+ [0x01] = 0, /* not used */
+ [0x02] = 1500000,
+ [0x03] = 1800000,
+ [0x04] = 0, /* not used */
+ [0x05] = 2500000,
+ [0x06] = 2730000,
+ [0x07] = 2910000,
+};
+
+static const int ab5500_ldo_kh_voltages[] = {
+ [0x00] = 1200000,
+ [0x01] = 1500000,
+ [0x02] = 1800000,
+ [0x03] = 2100000,
+ [0x04] = 2500000,
+ [0x05] = 2750000,
+ [0x06] = 2790000,
+ [0x07] = 2910000,
+};
+
+static const int ab5500_ldo_vdigmic_voltages[] = {
+ [0x00] = 2100000,
+};
+
+static const int ab5500_ldo_sim_voltages[] = {
+ [0x00] = 1875000,
+ [0x01] = 2800000,
+ [0x02] = 2900000,
+};
+
+static const int ab5500_bias2_voltages[] = {
+ [0x00] = 2000000,
+ [0x01] = 2200000,
+};
+
+static struct ab5500_regulator ab5500_regulators[] = {
+ [AB5500_LDO_L] = {
+ .desc = {
+ .name = "LDO_L",
+ .id = AB5500_LDO_L,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_lg_voltages) -
+ 2,
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_L_ST,
+ .voltages = ab5500_ldo_lg_voltages,
+ .num_holes = 2, /* 2 register values unused */
+ .enable_time = 400,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_G] = {
+ .desc = {
+ .name = "LDO_G",
+ .id = AB5500_LDO_G,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_lg_voltages) -
+ 2,
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_G_ST,
+ .voltages = ab5500_ldo_lg_voltages,
+ .num_holes = 2, /* 2 register values unused */
+ .enable_time = 400,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_K] = {
+ .desc = {
+ .name = "LDO_K",
+ .id = AB5500_LDO_K,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_kh_voltages),
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_K_ST,
+ .voltages = ab5500_ldo_kh_voltages,
+ .enable_time = 400,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_H] = {
+ .desc = {
+ .name = "LDO_H",
+ .id = AB5500_LDO_H,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_kh_voltages),
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_H_ST,
+ .voltages = ab5500_ldo_kh_voltages,
+ .enable_time = 400,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_VDIGMIC] = {
+ .desc = {
+ .name = "LDO_VDIGMIC",
+ .id = AB5500_LDO_VDIGMIC,
+ .ops = &ab5500_regulator_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages =
+ ARRAY_SIZE(ab5500_ldo_vdigmic_voltages),
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_VDIGMIC_ST,
+ .voltages = ab5500_ldo_vdigmic_voltages,
+ .enable_time = 450,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_SIM] = {
+ .desc = {
+ .name = "LDO_SIM",
+ .id = AB5500_LDO_SIM,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_sim_voltages),
+ },
+ .bank = AB5500_BANK_SIM_USBSIM,
+ .reg = AB5500_SIM_SUP,
+ .voltages = ab5500_ldo_sim_voltages,
+ .enable_time = 1000,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_BIAS2] = {
+ .desc = {
+ .name = "BIAS2",
+ .id = AB5500_BIAS2,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_bias2_voltages),
+ },
+ .bank = AB5500_BANK_AUDIO_HEADSETUSB,
+ .reg = AB5500_MBIAS2,
+ .voltages = ab5500_bias2_voltages,
+ .enable_time = 1000,
+ .mode = AB5500_MBIAS2_ENABLE,
+ .update_mask = AB5500_MBIAS2_MODE_MASK,
+ .update_val_normal = AB5500_MBIAS2_ENABLE,
+ .update_val_idle = AB5500_MBIAS2_ENABLE,
+ .voltage_mask = AB5500_MBIAS2_VOLT_MASK,
+ },
+};
+
+
+static int __devinit ab5500_regulator_probe(struct platform_device *pdev)
+{
+ struct ab5500_platform_data *ppdata = pdev->dev.parent->platform_data;
+ struct ab5500_regulator_platform_data *pdata = ppdata->regulator;
+ struct ab5500_regulators *ab5500;
+ int err = 0;
+ int i;
+
+ if (!pdata || !pdata->regulator)
+ return -EINVAL;
+
+ ab5500 = kzalloc(sizeof(*ab5500), GFP_KERNEL);
+ if (!ab5500)
+ return -ENOMEM;
+
+ ab5500->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, ab5500);
+
+ for (i = 0; i < AB5500_NUM_REGULATORS; i++) {
+ struct ab5500_regulator *regulator = &ab5500_regulators[i];
+ struct regulator_dev *rdev;
+
+ ab5500->regulator[i] = regulator;
+
+ rdev = regulator_register(&regulator->desc, &pdev->dev,
+ &pdata->regulator[i], ab5500);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
+ dev_err(&pdev->dev, "failed to register regulator %s err %d\n",
+ regulator->desc.name, err);
+ goto err_unregister;
+ }
+
+ ab5500->rdev[i] = rdev;
+ }
+
+ return 0;
+
+err_unregister:
+ /* remove the already registered regulators */
+ while (--i >= 0)
+ regulator_unregister(ab5500->rdev[i]);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(ab5500);
+
+ return err;
+}
+
+static int __devexit ab5500_regulators_remove(struct platform_device *pdev)
+{
+ struct ab5500_regulators *ab5500 = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < AB5500_NUM_REGULATORS; i++)
+ regulator_unregister(ab5500->rdev[i]);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(ab5500);
+
+ return 0;
+}
+
+static struct platform_driver ab5500_regulator_driver = {
+ .driver = {
+ .name = "ab5500-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_regulator_probe,
+ .remove = __devexit_p(ab5500_regulators_remove),
+};
+
+static __init int ab5500_regulator_init(void)
+{
+ return platform_driver_register(&ab5500_regulator_driver);
+}
+
+static __exit void ab5500_regulator_exit(void)
+{
+ platform_driver_unregister(&ab5500_regulator_driver);
+}
+
+subsys_initcall(ab5500_regulator_init);
+module_exit(ab5500_regulator_exit);
+
+MODULE_DESCRIPTION("AB5500 Regulator Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ab5500-regulator");
diff --git a/drivers/regulator/ab8500-debug.c b/drivers/regulator/ab8500-debug.c
new file mode 100644
index 00000000000..da182641b2c
--- /dev/null
+++ b/drivers/regulator/ab8500-debug.c
@@ -0,0 +1,1853 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson.
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/mfd/abx500.h>
+#include <linux/regulator/ab8500-debug.h>
+#include <linux/io.h>
+#include <mach/db8500-regs.h> /* U8500_BACKUPRAM1_BASE */
+#include <mach/hardware.h>
+
+/* board profile address - to determine if suspend-force is default */
+#define BOOT_INFO_BACKUPRAM1 (U8500_BACKUPRAM1_BASE + 0xffc)
+#define BOARD_PROFILE_BACKUPRAM1 (0x3)
+
+/* board profile option */
+#define OPTION_BOARD_VERSION_V5X 50
+
+/* for error prints */
+struct device *dev;
+struct platform_device *pdev;
+
+/* setting for suspend force (disabled by default) */
+static bool setting_suspend_force;
+
+/*
+ * regulator states
+ */
+enum ab8500_regulator_state_id {
+ AB8500_REGULATOR_STATE_INIT,
+ AB8500_REGULATOR_STATE_SUSPEND,
+ AB8500_REGULATOR_STATE_SUSPEND_CORE,
+ AB8500_REGULATOR_STATE_RESUME_CORE,
+ AB8500_REGULATOR_STATE_RESUME,
+ AB8500_REGULATOR_STATE_CURRENT,
+ NUM_REGULATOR_STATE
+};
+
+static const char *regulator_state_name[NUM_REGULATOR_STATE] = {
+ [AB8500_REGULATOR_STATE_INIT] = "init",
+ [AB8500_REGULATOR_STATE_SUSPEND] = "suspend",
+ [AB8500_REGULATOR_STATE_SUSPEND_CORE] = "suspend-core",
+ [AB8500_REGULATOR_STATE_RESUME_CORE] = "resume-core",
+ [AB8500_REGULATOR_STATE_RESUME] = "resume",
+ [AB8500_REGULATOR_STATE_CURRENT] = "current",
+};
+
+/*
+ * regulator register definitions
+ */
+enum ab8500_register_id {
+ AB8500_REGU_NOUSE, /* if not defined */
+ AB8500_REGU_REQUEST_CTRL1,
+ AB8500_REGU_REQUEST_CTRL2,
+ AB8500_REGU_REQUEST_CTRL3,
+ AB8500_REGU_REQUEST_CTRL4,
+ AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ AB8500_REGU_HW_HP_REQ1_VALID1,
+ AB8500_REGU_HW_HP_REQ1_VALID2,
+ AB8500_REGU_HW_HP_REQ2_VALID1,
+ AB8500_REGU_HW_HP_REQ2_VALID2,
+ AB8500_REGU_SW_HP_REQ_VALID1,
+ AB8500_REGU_SW_HP_REQ_VALID2,
+ AB8500_REGU_SYSCLK_REQ1_VALID,
+ AB8500_REGU_SYSCLK_REQ2_VALID,
+ AB8500_REGU_MISC1,
+ AB8500_REGU_OTG_SUPPLY_CTRL,
+ AB8500_REGU_VUSB_CTRL,
+ AB8500_REGU_VAUDIO_SUPPLY,
+ AB8500_REGU_CTRL1_VAMIC,
+ AB8500_REGU_ARM_REGU1,
+ AB8500_REGU_ARM_REGU2,
+ AB8500_REGU_VAPE_REGU,
+ AB8500_REGU_VSMPS1_REGU,
+ AB8500_REGU_VSMPS2_REGU,
+ AB8500_REGU_VSMPS3_REGU,
+ AB8500_REGU_VPLL_VANA_REGU,
+ AB8500_REGU_VREF_DDR,
+ AB8500_REGU_EXT_SUPPLY_REGU,
+ AB8500_REGU_VAUX12_REGU,
+ AB8500_REGU_VRF1_VAUX3_REGU,
+ AB8500_REGU_VARM_SEL1,
+ AB8500_REGU_VARM_SEL2,
+ AB8500_REGU_VARM_SEL3,
+ AB8500_REGU_VAPE_SEL1,
+ AB8500_REGU_VAPE_SEL2,
+ AB8500_REGU_VAPE_SEL3,
+ AB8500_REGU_VBB_SEL1,
+ AB8500_REGU_VBB_SEL2,
+ AB8500_REGU_VSMPS1_SEL1,
+ AB8500_REGU_VSMPS1_SEL2,
+ AB8500_REGU_VSMPS1_SEL3,
+ AB8500_REGU_VSMPS2_SEL1,
+ AB8500_REGU_VSMPS2_SEL2,
+ AB8500_REGU_VSMPS2_SEL3,
+ AB8500_REGU_VSMPS3_SEL1,
+ AB8500_REGU_VSMPS3_SEL2,
+ AB8500_REGU_VSMPS3_SEL3,
+ AB8500_REGU_VAUX1_SEL,
+ AB8500_REGU_VAUX2_SEL,
+ AB8500_REGU_VRF1_VAUX3_SEL,
+ AB8500_REGU_CTRL_EXT_SUP,
+ AB8500_REGU_VMOD_REGU,
+ AB8500_REGU_VMOD_SEL1,
+ AB8500_REGU_VMOD_SEL2,
+ AB8500_REGU_CTRL_DISCH,
+ AB8500_REGU_CTRL_DISCH2,
+ AB8500_OTHER_SYSCLK_CTRL, /* Other */
+ AB8500_OTHER_VSIM_SYSCLK_CTRL, /* Other */
+ AB8500_OTHER_SYSULPCLK_CTRL1, /* Other */
+ AB8500_OTHER_TVOUT_CTRL, /* Other */
+ NUM_AB8500_REGISTER
+};
+
+struct ab8500_register {
+ const char *name;
+ u8 bank;
+ u8 addr;
+};
+
+static struct ab8500_register
+ ab8500_register[NUM_AB8500_REGISTER] = {
+ [AB8500_REGU_REQUEST_CTRL1] = {
+ .name = "ReguRequestCtrl1",
+ .bank = 0x03,
+ .addr = 0x03,
+ },
+ [AB8500_REGU_REQUEST_CTRL2] = {
+ .name = "ReguRequestCtrl2",
+ .bank = 0x03,
+ .addr = 0x04,
+ },
+ [AB8500_REGU_REQUEST_CTRL3] = {
+ .name = "ReguRequestCtrl3",
+ .bank = 0x03,
+ .addr = 0x05,
+ },
+ [AB8500_REGU_REQUEST_CTRL4] = {
+ .name = "ReguRequestCtrl4",
+ .bank = 0x03,
+ .addr = 0x06,
+ },
+ [AB8500_REGU_SYSCLK_REQ1_HP_VALID1] = {
+ .name = "ReguSysClkReq1HPValid",
+ .bank = 0x03,
+ .addr = 0x07,
+ },
+ [AB8500_REGU_SYSCLK_REQ1_HP_VALID2] = {
+ .name = "ReguSysClkReq1HPValid2",
+ .bank = 0x03,
+ .addr = 0x08,
+ },
+ [AB8500_REGU_HW_HP_REQ1_VALID1] = {
+ .name = "ReguHwHPReq1Valid1",
+ .bank = 0x03,
+ .addr = 0x09,
+ },
+ [AB8500_REGU_HW_HP_REQ1_VALID2] = {
+ .name = "ReguHwHPReq1Valid2",
+ .bank = 0x03,
+ .addr = 0x0a,
+ },
+ [AB8500_REGU_HW_HP_REQ2_VALID1] = {
+ .name = "ReguHwHPReq2Valid1",
+ .bank = 0x03,
+ .addr = 0x0b,
+ },
+ [AB8500_REGU_HW_HP_REQ2_VALID2] = {
+ .name = "ReguHwHPReq2Valid2",
+ .bank = 0x03,
+ .addr = 0x0c,
+ },
+ [AB8500_REGU_SW_HP_REQ_VALID1] = {
+ .name = "ReguSwHPReqValid1",
+ .bank = 0x03,
+ .addr = 0x0d,
+ },
+ [AB8500_REGU_SW_HP_REQ_VALID2] = {
+ .name = "ReguSwHPReqValid2",
+ .bank = 0x03,
+ .addr = 0x0e,
+ },
+ [AB8500_REGU_SYSCLK_REQ1_VALID] = {
+ .name = "ReguSysClkReqValid1",
+ .bank = 0x03,
+ .addr = 0x0f,
+ },
+ [AB8500_REGU_SYSCLK_REQ2_VALID] = {
+ .name = "ReguSysClkReqValid2",
+ .bank = 0x03,
+ .addr = 0x10,
+ },
+ [AB8500_REGU_MISC1] = {
+ .name = "ReguMisc1",
+ .bank = 0x03,
+ .addr = 0x80,
+ },
+ [AB8500_REGU_OTG_SUPPLY_CTRL] = {
+ .name = "OTGSupplyCtrl",
+ .bank = 0x03,
+ .addr = 0x81,
+ },
+ [AB8500_REGU_VUSB_CTRL] = {
+ .name = "VusbCtrl",
+ .bank = 0x03,
+ .addr = 0x82,
+ },
+ [AB8500_REGU_VAUDIO_SUPPLY] = {
+ .name = "VaudioSupply",
+ .bank = 0x03,
+ .addr = 0x83,
+ },
+ [AB8500_REGU_CTRL1_VAMIC] = {
+ .name = "ReguCtrl1VAmic",
+ .bank = 0x03,
+ .addr = 0x84,
+ },
+ [AB8500_REGU_ARM_REGU1] = {
+ .name = "ArmRegu1",
+ .bank = 0x04,
+ .addr = 0x00,
+ },
+ [AB8500_REGU_ARM_REGU2] = {
+ .name = "ArmRegu2",
+ .bank = 0x04,
+ .addr = 0x01,
+ },
+ [AB8500_REGU_VAPE_REGU] = {
+ .name = "VapeRegu",
+ .bank = 0x04,
+ .addr = 0x02,
+ },
+ [AB8500_REGU_VSMPS1_REGU] = {
+ .name = "Vsmps1Regu",
+ .bank = 0x04,
+ .addr = 0x03,
+ },
+ [AB8500_REGU_VSMPS2_REGU] = {
+ .name = "Vsmps2Regu",
+ .bank = 0x04,
+ .addr = 0x04,
+ },
+ [AB8500_REGU_VSMPS3_REGU] = {
+ .name = "Vsmps3Regu",
+ .bank = 0x04,
+ .addr = 0x05,
+ },
+ [AB8500_REGU_VPLL_VANA_REGU] = {
+ .name = "VpllVanaRegu",
+ .bank = 0x04,
+ .addr = 0x06,
+ },
+ [AB8500_REGU_VREF_DDR] = {
+ .name = "VrefDDR",
+ .bank = 0x04,
+ .addr = 0x07,
+ },
+ [AB8500_REGU_EXT_SUPPLY_REGU] = {
+ .name = "ExtSupplyRegu",
+ .bank = 0x04,
+ .addr = 0x08,
+ },
+ [AB8500_REGU_VAUX12_REGU] = {
+ .name = "Vaux12Regu",
+ .bank = 0x04,
+ .addr = 0x09,
+ },
+ [AB8500_REGU_VRF1_VAUX3_REGU] = {
+ .name = "VRF1Vaux3Regu",
+ .bank = 0x04,
+ .addr = 0x0a,
+ },
+ [AB8500_REGU_VARM_SEL1] = {
+ .name = "VarmSel1",
+ .bank = 0x04,
+ .addr = 0x0b,
+ },
+ [AB8500_REGU_VARM_SEL2] = {
+ .name = "VarmSel2",
+ .bank = 0x04,
+ .addr = 0x0c,
+ },
+ [AB8500_REGU_VARM_SEL3] = {
+ .name = "VarmSel3",
+ .bank = 0x04,
+ .addr = 0x0d,
+ },
+ [AB8500_REGU_VAPE_SEL1] = {
+ .name = "VapeSel1",
+ .bank = 0x04,
+ .addr = 0x0e,
+ },
+ [AB8500_REGU_VAPE_SEL2] = {
+ .name = "VapeSel2",
+ .bank = 0x04,
+ .addr = 0x0f,
+ },
+ [AB8500_REGU_VAPE_SEL3] = {
+ .name = "VapeSel3",
+ .bank = 0x04,
+ .addr = 0x10,
+ },
+ [AB8500_REGU_VBB_SEL1] = {
+ .name = "VBBSel1",
+ .bank = 0x04,
+ .addr = 0x11,
+ },
+ [AB8500_REGU_VBB_SEL2] = {
+ .name = "VBBSel2",
+ .bank = 0x04,
+ .addr = 0x12,
+ },
+ [AB8500_REGU_VSMPS1_SEL1] = {
+ .name = "Vsmps1Sel1",
+ .bank = 0x04,
+ .addr = 0x13,
+ },
+ [AB8500_REGU_VSMPS1_SEL2] = {
+ .name = "Vsmps1Sel2",
+ .bank = 0x04,
+ .addr = 0x14,
+ },
+ [AB8500_REGU_VSMPS1_SEL3] = {
+ .name = "Vsmps1Sel3",
+ .bank = 0x04,
+ .addr = 0x15,
+ },
+ [AB8500_REGU_VSMPS2_SEL1] = {
+ .name = "Vsmps2Sel1",
+ .bank = 0x04,
+ .addr = 0x17,
+ },
+ [AB8500_REGU_VSMPS2_SEL2] = {
+ .name = "Vsmps2Sel2",
+ .bank = 0x04,
+ .addr = 0x18,
+ },
+ [AB8500_REGU_VSMPS2_SEL3] = {
+ .name = "Vsmps2Sel3",
+ .bank = 0x04,
+ .addr = 0x19,
+ },
+ [AB8500_REGU_VSMPS3_SEL1] = {
+ .name = "Vsmps3Sel1",
+ .bank = 0x04,
+ .addr = 0x1b,
+ },
+ [AB8500_REGU_VSMPS3_SEL2] = {
+ .name = "Vsmps3Sel2",
+ .bank = 0x04,
+ .addr = 0x1c,
+ },
+ [AB8500_REGU_VSMPS3_SEL3] = {
+ .name = "Vsmps3Sel3",
+ .bank = 0x04,
+ .addr = 0x1d,
+ },
+ [AB8500_REGU_VAUX1_SEL] = {
+ .name = "Vaux1Sel",
+ .bank = 0x04,
+ .addr = 0x1f,
+ },
+ [AB8500_REGU_VAUX2_SEL] = {
+ .name = "Vaux2Sel",
+ .bank = 0x04,
+ .addr = 0x20,
+ },
+ [AB8500_REGU_VRF1_VAUX3_SEL] = {
+ .name = "VRF1Vaux3Sel",
+ .bank = 0x04,
+ .addr = 0x21,
+ },
+ [AB8500_REGU_CTRL_EXT_SUP] = {
+ .name = "ReguCtrlExtSup",
+ .bank = 0x04,
+ .addr = 0x22,
+ },
+ [AB8500_REGU_VMOD_REGU] = {
+ .name = "VmodRegu",
+ .bank = 0x04,
+ .addr = 0x40,
+ },
+ [AB8500_REGU_VMOD_SEL1] = {
+ .name = "VmodSel1",
+ .bank = 0x04,
+ .addr = 0x41,
+ },
+ [AB8500_REGU_VMOD_SEL2] = {
+ .name = "VmodSel2",
+ .bank = 0x04,
+ .addr = 0x42,
+ },
+ [AB8500_REGU_CTRL_DISCH] = {
+ .name = "ReguCtrlDisch",
+ .bank = 0x04,
+ .addr = 0x43,
+ },
+ [AB8500_REGU_CTRL_DISCH2] = {
+ .name = "ReguCtrlDisch2",
+ .bank = 0x04,
+ .addr = 0x44,
+ },
+ /* Outside regulator banks */
+ [AB8500_OTHER_SYSCLK_CTRL] = {
+ .name = "SysClkCtrl",
+ .bank = 0x02,
+ .addr = 0x0c,
+ },
+ [AB8500_OTHER_VSIM_SYSCLK_CTRL] = {
+ .name = "VsimSysClkCtrl",
+ .bank = 0x02,
+ .addr = 0x33,
+ },
+ [AB8500_OTHER_SYSULPCLK_CTRL1] = {
+ .name = "SysUlpClkCtrl1",
+ .bank = 0x02,
+ .addr = 0x0b,
+ },
+ [AB8500_OTHER_TVOUT_CTRL] = {
+ .name = "TVoutCtrl",
+ .bank = 0x06,
+ .addr = 0x80,
+ },
+};
+
+static u8 ab8500_register_state[NUM_REGULATOR_STATE][NUM_AB8500_REGISTER];
+static bool ab8500_register_state_saved[NUM_REGULATOR_STATE];
+static bool ab8500_register_state_save = true;
+
+static int ab8500_regulator_record_state(int state)
+{
+ u8 val;
+ int i;
+ int ret;
+
+ /* check arguments */
+ if ((state > NUM_REGULATOR_STATE) || (state < 0)) {
+ dev_err(dev, "Wrong state specified\n");
+ return -EINVAL;
+ }
+
+ /* record */
+ if (!ab8500_register_state_save)
+ goto exit;
+
+ ab8500_register_state_saved[state] = true;
+
+ for (i = 1; i < NUM_AB8500_REGISTER; i++) {
+ ret = abx500_get_register_interruptible(dev,
+ ab8500_register[i].bank,
+ ab8500_register[i].addr,
+ &val);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ ab8500_register_state[state][i] = val;
+ }
+exit:
+ return 0;
+}
+
+/*
+ * regulator register dump
+ */
+static int ab8500_regulator_dump_print(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ int state, reg_id, i;
+ int err;
+
+ /* record current state */
+ ab8500_regulator_record_state(AB8500_REGULATOR_STATE_CURRENT);
+
+ /* print dump header */
+ err = seq_printf(s, "ab8500-regulator dump:\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+
+ /* print states */
+ for (state = NUM_REGULATOR_STATE - 1; state >= 0; state--) {
+ if (ab8500_register_state_saved[state])
+ err = seq_printf(s, "%16s saved -------",
+ regulator_state_name[state]);
+ else
+ err = seq_printf(s, "%12s not saved -------",
+ regulator_state_name[state]);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+
+ for (i = 0; i < NUM_REGULATOR_STATE; i++) {
+ if (i < state)
+ err = seq_printf(s, "-----");
+ else if (i == state)
+ err = seq_printf(s, "----+");
+ else
+ err = seq_printf(s, " |");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n",
+ __LINE__);
+ }
+ err = seq_printf(s, "\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ }
+
+ /* print labels */
+ err = seq_printf(s, "\n addr\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+
+ /* dump registers */
+ for (reg_id = 1; reg_id < NUM_AB8500_REGISTER; reg_id++) {
+ err = seq_printf(s, "%22s 0x%02x%02x:",
+ ab8500_register[reg_id].name,
+ ab8500_register[reg_id].bank,
+ ab8500_register[reg_id].addr);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ reg_id, __LINE__);
+
+ for (state = 0; state < NUM_REGULATOR_STATE; state++) {
+ err = seq_printf(s, " 0x%02x",
+ ab8500_register_state[state][reg_id]);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ reg_id, __LINE__);
+ }
+
+ err = seq_printf(s, "\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ reg_id, __LINE__);
+ }
+
+ return 0;
+}
+
+static int ab8500_regulator_dump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_regulator_dump_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_regulator_dump_fops = {
+ .open = ab8500_regulator_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * regulator status print
+ */
+enum ab8500_regulator_id {
+ AB8500_VARM,
+ AB8500_VBBP,
+ AB8500_VBBN,
+ AB8500_VAPE,
+ AB8500_VSMPS1,
+ AB8500_VSMPS2,
+ AB8500_VSMPS3,
+ AB8500_VPLL,
+ AB8500_VREFDDR,
+ AB8500_VMOD,
+ AB8500_VEXTSUPPLY1,
+ AB8500_VEXTSUPPLY2,
+ AB8500_VEXTSUPPLY3,
+ AB8500_VRF1,
+ AB8500_VANA,
+ AB8500_VAUX1,
+ AB8500_VAUX2,
+ AB8500_VAUX3,
+ AB8500_VINTCORE,
+ AB8500_VTVOUT,
+ AB8500_VAUDIO,
+ AB8500_VANAMIC1,
+ AB8500_VANAMIC2,
+ AB8500_VDMIC,
+ AB8500_VUSB,
+ AB8500_VOTG,
+ AB8500_VBUSBIS,
+ AB8500_NUM_REGULATORS,
+};
+
+/*
+ * regulator_voltage
+ */
+struct regulator_volt {
+ u8 value;
+ int volt;
+};
+
+struct regulator_volt_range {
+ struct regulator_volt start;
+ struct regulator_volt step;
+ struct regulator_volt end;
+};
+
+/*
+ * ab8500_regulator
+ * @name
+ * @update_regid
+ * @update_mask
+ * @update_val[4] {off, on, hw, lp}
+ * @hw_mode_regid
+ * @hw_mode_mask
+ * @hw_mode_val[4] {hp/lp, hp/off, hp, hp}
+ * @hw_valid_regid[4] {sysclkreq1, hw1, hw2, sw}
+ * @hw_valid_mask[4] {sysclkreq1, hw1, hw2, sw}
+ * @vsel_sel_regid
+ * @vsel_sel_mask
+ * @vsel_val[333] {sel1, sel2, sel3, sel3}
+ * @vsel_regid
+ * @vsel_mask
+ * @vsel_range
+ * @vsel_range_len
+ */
+struct ab8500_regulator {
+ const char *name;
+ int update_regid;
+ u8 update_mask;
+ u8 update_val[4];
+ int hw_mode_regid;
+ u8 hw_mode_mask;
+ u8 hw_mode_val[4];
+ int hw_valid_regid[4];
+ u8 hw_valid_mask[4];
+ int vsel_sel_regid;
+ u8 vsel_sel_mask;
+ u8 vsel_sel_val[4];
+ int vsel_regid[3];
+ u8 vsel_mask[3];
+ struct regulator_volt_range const *vsel_range[3];
+ int vsel_range_len[3];
+};
+
+static const char *update_val_name[] = {
+ "off",
+ "on ",
+ "hw ",
+ "lp ",
+ " - " /* undefined value */
+};
+
+static const char *hw_mode_val_name[] = {
+ "hp/lp ",
+ "hp/off",
+ "hp ",
+ "hp ",
+ "-/- ", /* undefined value */
+};
+
+/* voltage selection */
+static const struct regulator_volt_range varm_vape_vmod_vsel[] = {
+ { {0x00, 700000}, {0x01, 12500}, {0x35, 1362500} },
+ { {0x36, 1362500}, {0x01, 0}, {0x3f, 1362500} },
+};
+
+static const struct regulator_volt_range vbbp_vsel[] = {
+ { {0x00, 0}, {0x10, 100000}, {0x40, 400000} },
+ { {0x50, 400000}, {0x10, 0}, {0x70, 400000} },
+ { {0x80, -400000}, {0x10, 0}, {0xb0, -400000} },
+ { {0xc0, -400000}, {0x10, 100000}, {0xf0, -100000} },
+};
+
+static const struct regulator_volt_range vbbn_vsel[] = {
+ { {0x00, 0}, {0x01, -100000}, {0x04, -400000} },
+ { {0x05, -400000}, {0x01, 0}, {0x07, -400000} },
+ { {0x08, 0}, {0x01, 100000}, {0x0c, 400000} },
+ { {0x0d, 400000}, {0x01, 0}, {0x0f, 400000} },
+};
+
+static const struct regulator_volt_range vsmps1_vsel[] = {
+ { {0x00, 1100000}, {0x01, 0}, {0x1f, 1100000} },
+ { {0x20, 1100000}, {0x01, 12500}, {0x30, 1300000} },
+ { {0x31, 1300000}, {0x01, 0}, {0x3f, 1300000} },
+};
+
+static const struct regulator_volt_range vsmps2_vsel[] = {
+ { {0x00, 1800000}, {0x01, 0}, {0x38, 1800000} },
+ { {0x39, 1800000}, {0x01, 12500}, {0x7f, 1875000} },
+};
+
+static const struct regulator_volt_range vsmps3_vsel[] = {
+ { {0x00, 700000}, {0x01, 12500}, {0x35, 1363500} },
+ { {0x36, 1363500}, {0x01, 0}, {0x7f, 1363500} },
+};
+
+static const struct regulator_volt_range vaux1_vaux2_vsel[] = {
+ { {0x00, 1100000}, {0x01, 100000}, {0x04, 1500000} },
+ { {0x05, 1800000}, {0x01, 50000}, {0x07, 1900000} },
+ { {0x08, 2500000}, {0x01, 0}, {0x08, 2500000} },
+ { {0x09, 2650000}, {0x01, 50000}, {0x0c, 2800000} },
+ { {0x0d, 2900000}, {0x01, 100000}, {0x0e, 3000000} },
+ { {0x0f, 3300000}, {0x01, 0}, {0x0f, 3300000} },
+};
+
+static const struct regulator_volt_range vaux3_vsel[] = {
+ { {0x00, 1200000}, {0x01, 300000}, {0x03, 2100000} },
+ { {0x04, 2500000}, {0x01, 250000}, {0x05, 2750000} },
+ { {0x06, 2790000}, {0x01, 0}, {0x06, 2790000} },
+ { {0x07, 2910000}, {0x01, 0}, {0x07, 2910000} },
+};
+
+static const struct regulator_volt_range vrf1_vsel[] = {
+ { {0x00, 1800000}, {0x10, 200000}, {0x10, 2000000} },
+ { {0x20, 2150000}, {0x10, 0}, {0x20, 2150000} },
+ { {0x30, 2500000}, {0x10, 0}, {0x30, 2500000} },
+};
+
+static const struct regulator_volt_range vintcore12_vsel[] = {
+ { {0x00, 1200000}, {0x08, 25000}, {0x30, 1350000} },
+ { {0x38, 1350000}, {0x01, 0}, {0x38, 1350000} },
+};
+
+/* regulators */
+static struct ab8500_regulator ab8500_regulator[AB8500_NUM_REGULATORS] = {
+ [AB8500_VARM] = {
+ .name = "Varm",
+ .update_regid = AB8500_REGU_ARM_REGU1,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x02,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x02,
+ .vsel_sel_regid = AB8500_REGU_ARM_REGU1,
+ .vsel_sel_mask = 0x0c,
+ .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VARM_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = varm_vape_vmod_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ .vsel_regid[1] = AB8500_REGU_VARM_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = varm_vape_vmod_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ .vsel_regid[2] = AB8500_REGU_VARM_SEL3,
+ .vsel_mask[2] = 0x3f,
+ .vsel_range[2] = varm_vape_vmod_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ },
+ [AB8500_VBBP] = {
+ .name = "Vbbp",
+ .update_regid = AB8500_REGU_ARM_REGU2,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x00},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x04,
+ .vsel_sel_regid = AB8500_REGU_ARM_REGU1,
+ .vsel_sel_mask = 0x10,
+ .vsel_sel_val = {0x00, 0x10, 0x00, 0x00},
+ .vsel_regid[0] = AB8500_REGU_VBB_SEL1,
+ .vsel_mask[0] = 0xf0,
+ .vsel_range[0] = vbbp_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vbbp_vsel),
+ .vsel_regid[1] = AB8500_REGU_VBB_SEL2,
+ .vsel_mask[1] = 0xf0,
+ .vsel_range[1] = vbbp_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vbbp_vsel),
+ },
+ [AB8500_VBBN] = {
+ .name = "Vbbn",
+ .update_regid = AB8500_REGU_ARM_REGU2,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x00},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x04,
+ .vsel_sel_regid = AB8500_REGU_ARM_REGU1,
+ .vsel_sel_mask = 0x20,
+ .vsel_sel_val = {0x00, 0x20, 0x00, 0x00},
+ .vsel_regid[0] = AB8500_REGU_VBB_SEL1,
+ .vsel_mask[0] = 0x0f,
+ .vsel_range[0] = vbbn_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vbbn_vsel),
+ .vsel_regid[1] = AB8500_REGU_VBB_SEL2,
+ .vsel_mask[1] = 0x0f,
+ .vsel_range[1] = vbbn_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vbbn_vsel),
+ },
+ [AB8500_VAPE] = {
+ .name = "Vape",
+ .update_regid = AB8500_REGU_VAPE_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1,
+ .hw_mode_mask = 0x0c,
+ .hw_mode_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x01,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x01,
+ .vsel_sel_regid = AB8500_REGU_VAPE_REGU,
+ .vsel_sel_mask = 0x24,
+ .vsel_sel_val = {0x00, 0x04, 0x20, 0x24},
+ .vsel_regid[0] = AB8500_REGU_VAPE_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = varm_vape_vmod_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ .vsel_regid[1] = AB8500_REGU_VAPE_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = varm_vape_vmod_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ .vsel_regid[2] = AB8500_REGU_VAPE_SEL3,
+ .vsel_mask[2] = 0x3f,
+ .vsel_range[2] = varm_vape_vmod_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ },
+ [AB8500_VSMPS1] = {
+ .name = "Vsmps1",
+ .update_regid = AB8500_REGU_VSMPS1_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1,
+ .hw_mode_mask = 0x30,
+ .hw_mode_val = {0x00, 0x10, 0x20, 0x30},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x01,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x01,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x01,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x04,
+ .vsel_sel_regid = AB8500_REGU_VSMPS1_REGU,
+ .vsel_sel_mask = 0x0c,
+ .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VSMPS1_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = vsmps1_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vsmps1_vsel),
+ .vsel_regid[1] = AB8500_REGU_VSMPS1_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = vsmps1_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vsmps1_vsel),
+ .vsel_regid[2] = AB8500_REGU_VSMPS1_SEL3,
+ .vsel_mask[2] = 0x3f,
+ .vsel_range[2] = vsmps1_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(vsmps1_vsel),
+ },
+ [AB8500_VSMPS2] = {
+ .name = "Vsmps2",
+ .update_regid = AB8500_REGU_VSMPS2_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1,
+ .hw_mode_mask = 0xc0,
+ .hw_mode_val = {0x00, 0x40, 0x80, 0xc0},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x02,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x02,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x02,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x08,
+ .vsel_sel_regid = AB8500_REGU_VSMPS2_REGU,
+ .vsel_sel_mask = 0x0c,
+ .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VSMPS2_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = vsmps2_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vsmps2_vsel),
+ .vsel_regid[1] = AB8500_REGU_VSMPS2_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = vsmps2_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vsmps2_vsel),
+ .vsel_regid[2] = AB8500_REGU_VSMPS2_SEL3,
+ .vsel_mask[2] = 0x3f,
+ .vsel_range[2] = vsmps2_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(vsmps2_vsel),
+ },
+ [AB8500_VSMPS3] = {
+ .name = "Vsmps3",
+ .update_regid = AB8500_REGU_VSMPS3_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x04,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x04,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x04,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x10,
+ .vsel_sel_regid = AB8500_REGU_VSMPS3_REGU,
+ .vsel_sel_mask = 0x0c,
+ .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VSMPS3_SEL1,
+ .vsel_mask[0] = 0x7f,
+ .vsel_range[0] = vsmps3_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vsmps3_vsel),
+ .vsel_regid[1] = AB8500_REGU_VSMPS3_SEL2,
+ .vsel_mask[1] = 0x7f,
+ .vsel_range[1] = vsmps3_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vsmps3_vsel),
+ .vsel_regid[2] = AB8500_REGU_VSMPS3_SEL3,
+ .vsel_mask[2] = 0x7f,
+ .vsel_range[2] = vsmps3_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(vsmps3_vsel),
+ },
+ [AB8500_VPLL] = {
+ .name = "Vpll",
+ .update_regid = AB8500_REGU_VPLL_VANA_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2,
+ .hw_mode_mask = 0x0c,
+ .hw_mode_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x10,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x10,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x10,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x40,
+ },
+ [AB8500_VREFDDR] = {
+ .name = "VrefDDR",
+ .update_regid = AB8500_REGU_VREF_DDR,
+ .update_mask = 0x01,
+ .update_val = {0x00, 0x01, 0x00, 0x00},
+ },
+ [AB8500_VMOD] = {
+ .name = "Vmod",
+ .update_regid = AB8500_REGU_VMOD_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_VMOD_REGU,
+ .hw_mode_mask = 0xc0,
+ .hw_mode_val = {0x00, 0x40, 0x80, 0xc0},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x08,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2,
+ .hw_valid_mask[1] = 0x08,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2,
+ .hw_valid_mask[2] = 0x08,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x20,
+ .vsel_sel_regid = AB8500_REGU_VMOD_REGU,
+ .vsel_sel_mask = 0x04,
+ .vsel_sel_val = {0x00, 0x04, 0x00, 0x00},
+ .vsel_regid[0] = AB8500_REGU_VMOD_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = varm_vape_vmod_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ .vsel_regid[1] = AB8500_REGU_VMOD_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = varm_vape_vmod_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ },
+ [AB8500_VEXTSUPPLY1] = {
+ .name = "Vextsupply1",
+ .update_regid = AB8500_REGU_EXT_SUPPLY_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2,
+ .hw_mode_mask = 0xc0,
+ .hw_mode_val = {0x00, 0x40, 0x80, 0xc0},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x10,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2,
+ .hw_valid_mask[1] = 0x01,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2,
+ .hw_valid_mask[2] = 0x01,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x04,
+ },
+ [AB8500_VEXTSUPPLY2] = {
+ .name = "VextSupply2",
+ .update_regid = AB8500_REGU_EXT_SUPPLY_REGU,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x20,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2,
+ .hw_valid_mask[1] = 0x02,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2,
+ .hw_valid_mask[2] = 0x02,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x08,
+ },
+ [AB8500_VEXTSUPPLY3] = {
+ .name = "VextSupply3",
+ .update_regid = AB8500_REGU_EXT_SUPPLY_REGU,
+ .update_mask = 0x30,
+ .update_val = {0x00, 0x10, 0x20, 0x30},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3,
+ .hw_mode_mask = 0x0c,
+ .hw_mode_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x40,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2,
+ .hw_valid_mask[1] = 0x04,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2,
+ .hw_valid_mask[2] = 0x04,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x10,
+ },
+ [AB8500_VRF1] = {
+ .name = "Vrf1",
+ .update_regid = AB8500_REGU_VRF1_VAUX3_REGU,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VRF1_VAUX3_SEL,
+ .vsel_mask[0] = 0x30,
+ .vsel_range[0] = vrf1_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vrf1_vsel),
+ },
+ [AB8500_VANA] = {
+ .name = "Vana",
+ .update_regid = AB8500_REGU_VPLL_VANA_REGU,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2,
+ .hw_mode_mask = 0x30,
+ .hw_mode_val = {0x00, 0x10, 0x20, 0x30},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x08,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x08,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x08,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x20,
+ },
+ [AB8500_VAUX1] = {
+ .name = "Vaux1",
+ .update_regid = AB8500_REGU_VAUX12_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3,
+ .hw_mode_mask = 0x30,
+ .hw_mode_val = {0x00, 0x10, 0x20, 0x30},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x20,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x20,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x20,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x80,
+ .vsel_regid[0] = AB8500_REGU_VAUX1_SEL,
+ .vsel_mask[0] = 0x0f,
+ .vsel_range[0] = vaux1_vaux2_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vaux1_vaux2_vsel),
+ },
+ [AB8500_VAUX2] = {
+ .name = "Vaux2",
+ .update_regid = AB8500_REGU_VAUX12_REGU,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3,
+ .hw_mode_mask = 0xc0,
+ .hw_mode_val = {0x00, 0x40, 0x80, 0xc0},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x40,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x40,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x40,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x01,
+ .vsel_regid[0] = AB8500_REGU_VAUX2_SEL,
+ .vsel_mask[0] = 0x0f,
+ .vsel_range[0] = vaux1_vaux2_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vaux1_vaux2_vsel),
+ },
+ [AB8500_VAUX3] = {
+ .name = "Vaux3",
+ .update_regid = AB8500_REGU_VRF1_VAUX3_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL4,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x80,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x80,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x80,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x02,
+ .vsel_regid[0] = AB8500_REGU_VRF1_VAUX3_SEL,
+ .vsel_mask[0] = 0x07,
+ .vsel_range[0] = vaux3_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vaux3_vsel),
+ },
+ [AB8500_VINTCORE] = {
+ .name = "VintCore12",
+ .update_regid = AB8500_REGU_MISC1,
+ .update_mask = 0x44,
+ .update_val = {0x00, 0x04, 0x00, 0x44},
+ .vsel_regid[0] = AB8500_REGU_MISC1,
+ .vsel_mask[0] = 0x38,
+ .vsel_range[0] = vintcore12_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vintcore12_vsel),
+ },
+ [AB8500_VTVOUT] = {
+ .name = "VTVout",
+ .update_regid = AB8500_REGU_MISC1,
+ .update_mask = 0x82,
+ .update_val = {0x00, 0x02, 0x00, 0x82},
+ },
+ [AB8500_VAUDIO] = {
+ .name = "Vaudio",
+ .update_regid = AB8500_REGU_VAUDIO_SUPPLY,
+ .update_mask = 0x02,
+ .update_val = {0x00, 0x02, 0x00, 0x00},
+ },
+ [AB8500_VANAMIC1] = {
+ .name = "Vanamic1",
+ .update_regid = AB8500_REGU_VAUDIO_SUPPLY,
+ .update_mask = 0x08,
+ .update_val = {0x00, 0x08, 0x00, 0x00},
+ },
+ [AB8500_VANAMIC2] = {
+ .name = "Vanamic2",
+ .update_regid = AB8500_REGU_VAUDIO_SUPPLY,
+ .update_mask = 0x10,
+ .update_val = {0x00, 0x10, 0x00, 0x00},
+ },
+ [AB8500_VDMIC] = {
+ .name = "Vdmic",
+ .update_regid = AB8500_REGU_VAUDIO_SUPPLY,
+ .update_mask = 0x04,
+ .update_val = {0x00, 0x04, 0x00, 0x00},
+ },
+ [AB8500_VUSB] = {
+ .name = "Vusb",
+ .update_regid = AB8500_REGU_VUSB_CTRL,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x00, 0x03},
+ },
+ [AB8500_VOTG] = {
+ .name = "VOTG",
+ .update_regid = AB8500_REGU_OTG_SUPPLY_CTRL,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x00, 0x03},
+ },
+ [AB8500_VBUSBIS] = {
+ .name = "Vbusbis",
+ .update_regid = AB8500_REGU_OTG_SUPPLY_CTRL,
+ .update_mask = 0x08,
+ .update_val = {0x00, 0x08, 0x00, 0x00},
+ },
+};
+
+static int status_state;
+
+static int _get_voltage(struct regulator_volt_range const *volt_range,
+ u8 value, int *volt)
+{
+ u8 start = volt_range->start.value;
+ u8 end = volt_range->end.value;
+ u8 step = volt_range->step.value;
+
+ /* Check if witin range */
+ if (step == 0) {
+ if (value == start) {
+ *volt = volt_range->start.volt;
+ return 1;
+ }
+ } else {
+ if ((start <= value) && (value <= end)) {
+ if ((value - start)%step != 0)
+ return -EINVAL; /* invalid setting */
+ *volt = volt_range->start.volt
+ + volt_range->step.volt
+ *((value - start)/step);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int get_voltage(struct regulator_volt_range const *volt_range,
+ int volt_range_len,
+ u8 value)
+{
+ int volt;
+ int i, ret;
+
+ for (i = 0; i < volt_range_len; i++) {
+ ret = _get_voltage(&volt_range[i], value, &volt);
+ if (ret < 0)
+ break; /* invalid setting */
+ if (ret == 1)
+ return volt; /* successful */
+ }
+
+ return -EINVAL;
+}
+
+static int ab8500_regulator_status_print(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ int id, regid;
+ int i;
+ u8 val;
+ int err;
+
+ /* record current state */
+ ab8500_regulator_record_state(AB8500_REGULATOR_STATE_CURRENT);
+
+ /* check if chosen state is recorded */
+ if (!ab8500_register_state_saved[status_state]) {
+ seq_printf(s, "ab8500-regulator status is not recorded.\n");
+ goto exit;
+ }
+
+ /* print dump header */
+ err = seq_printf(s, "ab8500-regulator status:\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+
+ /* print state */
+ err = seq_printf(s, "%12s\n",
+ regulator_state_name[status_state]);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+
+ /* print labels */
+ err = seq_printf(s,
+ "+-----------+----+--------------+-------------------------+\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "| name|man |auto |voltage |\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "+-----------+----+--------------+ +-----------------------+\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "| |mode|mode |0|1|2|3| | 1 | 2 | 3 |\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "+-----------+----+------+-+-+-+-+-+-------+-------+-------+\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+
+ /* dump registers */
+ for (id = 0; id < AB8500_NUM_REGULATORS; id++) {
+ /* print name */
+ err = seq_printf(s, "|%11s|",
+ ab8500_regulator[id].name);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ id, __LINE__);
+
+ /* print manual mode */
+ regid = ab8500_regulator[id].update_regid;
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].update_mask;
+ for (i = 0; i < 4; i++) {
+ if (val == ab8500_regulator[id].update_val[i])
+ break;
+ }
+ err = seq_printf(s, "%4s|",
+ update_val_name[i]);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ id, __LINE__);
+
+ /* print auto mode */
+ regid = ab8500_regulator[id].hw_mode_regid;
+ if (regid) {
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].hw_mode_mask;
+ for (i = 0; i < 4; i++) {
+ if (val == ab8500_regulator[id].hw_mode_val[i])
+ break;
+ }
+ err = seq_printf(s, "%6s|",
+ hw_mode_val_name[i]);
+ } else {
+ err = seq_printf(s, " |");
+ }
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ id, __LINE__);
+
+ /* print valid bits */
+ for (i = 0; i < 4; i++) {
+ regid = ab8500_regulator[id].hw_valid_regid[i];
+ if (regid) {
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].hw_valid_mask[i];
+ if (val)
+ err = seq_printf(s, "1|");
+ else
+ err = seq_printf(s, "0|");
+ } else {
+ err = seq_printf(s, " |");
+ }
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ regid, __LINE__);
+ }
+
+ /* print voltage selection */
+ regid = ab8500_regulator[id].vsel_sel_regid;
+ if (regid) {
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].vsel_sel_mask;
+ for (i = 0; i < 3; i++) {
+ if (val == ab8500_regulator[id].vsel_sel_val[i])
+ break;
+ }
+ if (i < 3)
+ seq_printf(s, "%i|", i + 1);
+ else
+ seq_printf(s, "-|");
+ } else {
+ seq_printf(s, " |");
+ }
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ regid, __LINE__);
+
+ for (i = 0; i < 3; i++) {
+ int volt;
+
+ regid = ab8500_regulator[id].vsel_regid[i];
+ if (regid) {
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].vsel_mask[i];
+ volt = get_voltage(
+ ab8500_regulator[id].vsel_range[i],
+ ab8500_regulator[id].vsel_range_len[i],
+ val);
+ seq_printf(s, "%7i|", volt);
+ } else {
+ seq_printf(s, " |");
+ }
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ regid, __LINE__);
+ }
+
+ err = seq_printf(s, "\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ regid, __LINE__);
+
+ }
+ err = seq_printf(s,
+ "+-----------+----+------+-+-+-+-+-+-------+-------+-------+\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "Note! In HW mode, voltage selection is controlled by HW.\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+
+
+exit:
+ return 0;
+}
+
+static int ab8500_regulator_status_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+
+ /* copy user data */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* convert */
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+
+ /* set suspend force setting */
+ if (user_val > NUM_REGULATOR_STATE) {
+ dev_err(dev, "debugfs error input > number of states\n");
+ return -EINVAL;
+ }
+
+ status_state = user_val;
+
+ return buf_size;
+}
+
+
+static int ab8500_regulator_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_regulator_status_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_regulator_status_fops = {
+ .open = ab8500_regulator_status_open,
+ .write = ab8500_regulator_status_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_PM
+
+struct ab8500_force_reg {
+ char *name;
+ u8 bank;
+ u8 addr;
+ u8 mask;
+ u8 val;
+ bool restore;
+ u8 restore_val;
+};
+
+static struct ab8500_force_reg ab8500_force_reg[] = {
+ {
+ /*
+ * SysClkCtrl
+ * OTP: 0x00, HSI: 0x06, suspend: 0x00/0x07 (value/mask)
+ * [ 2] USBClkEna = disable SysClk path to USB block
+ * [ 1] TVoutClkEna = disable 27Mhz clock to TVout block
+ * [ 0] TVoutPllEna = disable TVout pll
+ * (generate 27Mhz from SysClk)
+ */
+ .name = "SysClkCtrl",
+ .bank = 0x02,
+ .addr = 0x0c,
+ .mask = 0x07,
+ .val = 0x00,
+ },
+ {
+ /*
+ * ReguSysClkReq1HPValid2
+ * OTP: 0x03, HSI: 0x40, suspend: 0x60/0x70 (value/mask)
+ * [ 5] VextSupply2SysClkReq1HPValid = Vext2 set by SysClkReq1
+ */
+ .name = "ReguSysClkReq1HPValid2",
+ .bank = 0x03,
+ .addr = 0x08,
+ .mask = 0x20, /* test and compare with 0x7f */
+ .val = 0x20,
+ },
+ {
+ /*
+ * ReguRequestCtrl3
+ * OTP: 0x00, HSI: 0x00, suspend: 0x05/0x0f (value/mask)
+ * [1:0] VExtSupply2RequestCtrl[1:0] = VExt2 set in HP/OFF mode
+ */
+ .name = "ReguRequestCtrl3",
+ .bank = 0x03,
+ .addr = 0x05,
+ .mask = 0x03, /* test and compare with 0xff */
+ .val = 0x01,
+ },
+ {
+ /*
+ * VsimSysClkCtrl
+ * OTP: 0x01, HSI: 0x21, suspend: 0x01/0xff (value/mask)
+ * [ 7] VsimSysClkReq8Valid = no connection
+ * [ 6] VsimSysClkReq7Valid = no connection
+ * [ 5] VsimSysClkReq6Valid = no connection
+ * [ 4] VsimSysClkReq5Valid = no connection
+ * [ 3] VsimSysClkReq4Valid = no connection
+ * [ 2] VsimSysClkReq3Valid = no connection
+ * [ 1] VsimSysClkReq2Valid = no connection
+ * [ 0] VsimSysClkReq1Valid = Vsim set by SysClkReq1
+ */
+ .name = "VsimSysClkCtrl",
+ .bank = 0x02,
+ .addr = 0x33,
+ .mask = 0xff,
+ .val = 0x01,
+ },
+ {
+ /*
+ * SysUlpClkCtrl1
+ * OTP: 0x00, HSI: 0x00, suspend: 0x00/0x0f (value/mask)
+ * [ 3] 4500SysClkReq = inactive
+ * [ 2] UlpClkReq = inactive
+ * [1:0] SysUlpClkIntSel[1:0] = no internal clock switching.
+ * Internal clock is SysClk.
+ */
+ .name = "SysUlpClkCtrl1",
+ .bank = 0x02,
+ .addr = 0x0b,
+ .mask = 0x0f,
+ .val = 0x00,
+ },
+ {
+ /*
+ * ExtSupplyRegu (HSI: 0x2a on v2-v40?)
+ * OTP: 0x15, HSI: 0x28, suspend: 0x28/0x3f (value/mask)
+ * [3:2] VExtSupply2Regu[1:0] = 10 = Vext2 in HW control
+ * [1:0] VExtSupply1Regu[1:0] = 00 = Vext1 off
+ */
+ .name = "ExtSupplyRegu",
+ .bank = 0x04,
+ .addr = 0x08,
+ .mask = 0x0f,
+ .val = 0x08,
+ },
+ {
+ /*
+ * TVoutCtrl
+ * OTP: N/A, HSI: N/A, suspend: 0x00/0x03 (value/mask)
+ * [ 2] PlugTvOn = plug/unplug detection disabled
+ * [1:0] TvoutDacCtrl[1:0] = "0" forced on DAC input (test)
+ */
+ .name = "TVoutCtrl",
+ .bank = 0x06,
+ .addr = 0x80,
+ .mask = 0x03,
+ .val = 0x00,
+ },
+};
+
+void ab8500_regulator_debug_force(void)
+{
+ int ret, i;
+
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_SUSPEND);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to record suspend state.\n");
+
+ /* check if registers should be forced */
+ if (!setting_suspend_force)
+ goto exit;
+
+ /*
+ * Optimize href v2_v50_pwr board for ApSleep/ApDeepSleep
+ * power consumption measurements
+ */
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_force_reg); i++) {
+ dev_vdbg(&pdev->dev, "Save and set %s: "
+ "0x%02x, 0x%02x, 0x%02x, 0x%02x.\n",
+ ab8500_force_reg[i].name,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ ab8500_force_reg[i].mask,
+ ab8500_force_reg[i].val);
+
+ /* assume that register should be restored */
+ ab8500_force_reg[i].restore = true;
+
+ /* get register value before forcing it */
+ ret = abx500_get_register_interruptible(&pdev->dev,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ &ab8500_force_reg[i].restore_val);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read %s.\n",
+ ab8500_force_reg[i].name);
+ ab8500_force_reg[i].restore = false;
+ break;
+ }
+
+ /* force register value */
+ ret = abx500_mask_and_set_register_interruptible(&pdev->dev,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ ab8500_force_reg[i].mask,
+ ab8500_force_reg[i].val);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to write %s.\n",
+ ab8500_force_reg[i].name);
+ ab8500_force_reg[i].restore = false;
+ }
+ }
+
+exit:
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(
+ AB8500_REGULATOR_STATE_SUSPEND_CORE);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to record suspend state.\n");
+
+ return;
+}
+
+void ab8500_regulator_debug_restore(void)
+{
+ int ret, i;
+
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_RESUME_CORE);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to record resume state.\n");
+ for (i = ARRAY_SIZE(ab8500_force_reg) - 1; i >= 0; i--) {
+ /* restore register value */
+ if (ab8500_force_reg[i].restore) {
+ ret = abx500_mask_and_set_register_interruptible(
+ &pdev->dev,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ ab8500_force_reg[i].mask,
+ ab8500_force_reg[i].restore_val);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to restore %s.\n",
+ ab8500_force_reg[i].name);
+ dev_vdbg(&pdev->dev, "Restore %s: "
+ "0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ ab8500_force_reg[i].name,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ ab8500_force_reg[i].mask,
+ ab8500_force_reg[i].restore_val);
+ }
+ }
+
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_RESUME);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to record resume state.\n");
+
+ return;
+}
+
+#endif
+
+static int ab8500_regulator_suspend_force_show(struct seq_file *s, void *p)
+{
+ /* print suspend standby status */
+ if (setting_suspend_force)
+ return seq_printf(s, "suspend force enabled\n");
+ else
+ return seq_printf(s, "no suspend force\n");
+}
+
+static int ab8500_regulator_suspend_force_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+
+ /* copy user data */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* convert */
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+
+ /* set suspend force setting */
+ if (user_val > 1) {
+ dev_err(dev, "debugfs error input > 1\n");
+ return -EINVAL;
+ }
+
+ if (user_val)
+ setting_suspend_force = true;
+ else
+ setting_suspend_force = false;
+
+ return buf_size;
+}
+
+static int ab8500_regulator_suspend_force_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_regulator_suspend_force_show,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_regulator_suspend_force_fops = {
+ .open = ab8500_regulator_suspend_force_open,
+ .write = ab8500_regulator_suspend_force_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static struct dentry *ab8500_regulator_dir;
+static struct dentry *ab8500_regulator_dump_file;
+static struct dentry *ab8500_regulator_status_file;
+static struct dentry *ab8500_regulator_suspend_force_file;
+
+static int __devinit ab8500_regulator_debug_probe(struct platform_device *plf)
+{
+ void __iomem *boot_info_backupram;
+ int ret, i;
+
+ /* setup dev pointers */
+ dev = &plf->dev;
+ pdev = plf;
+
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_INIT);
+ if (ret < 0)
+ dev_err(&plf->dev, "Failed to record init state.\n");
+
+ /* remove force of external regulators if AB8500 3.0 and DB8500 v2.2 */
+ if ((abx500_get_chip_id(&pdev->dev) >= 0x30) && cpu_is_u8500v22()) {
+ /*
+ * find ExtSupplyRegu register (bank 0x04, addr 0x08)
+ * and update value (Vext1 in low-power, Vext2 off).
+ */
+ for (i = 0; i < ARRAY_SIZE(ab8500_force_reg); i++) {
+ if (ab8500_force_reg[i].bank == 0x04 &&
+ ab8500_force_reg[i].addr == 0x08) {
+ u8 val, val_mask = 0x0f;
+
+ val = ab8500_force_reg[i].val;
+ val = (val & ~val_mask) | (0x03 & val_mask);
+ ab8500_force_reg[i].val = val;
+ }
+ }
+ }
+
+ /* make suspend-force default if board profile is v5x-power */
+ boot_info_backupram = ioremap(BOOT_INFO_BACKUPRAM1, 0x4);
+
+ if (boot_info_backupram) {
+ u8 board_profile;
+ board_profile = readb(
+ boot_info_backupram + BOARD_PROFILE_BACKUPRAM1);
+ dev_dbg(dev, "Board profile is 0x%02x\n", board_profile);
+
+ if (board_profile >= OPTION_BOARD_VERSION_V5X)
+ setting_suspend_force = true;
+
+ iounmap(boot_info_backupram);
+ } else {
+ dev_err(dev, "Failed to read backupram.\n");
+ }
+
+ /* create directory */
+ ab8500_regulator_dir = debugfs_create_dir("ab8500-regulator", NULL);
+ if (!ab8500_regulator_dir)
+ goto exit_no_debugfs;
+
+ /* create "dump" file */
+ ab8500_regulator_dump_file = debugfs_create_file("dump",
+ S_IRUGO, ab8500_regulator_dir, &plf->dev,
+ &ab8500_regulator_dump_fops);
+ if (!ab8500_regulator_dump_file)
+ goto exit_destroy_dir;
+
+ /* create "status" file */
+ ab8500_regulator_status_file = debugfs_create_file("status",
+ S_IRUGO, ab8500_regulator_dir, &plf->dev,
+ &ab8500_regulator_status_fops);
+ if (!ab8500_regulator_status_file)
+ goto exit_destroy_dump_file;
+
+ /*
+ * create "suspend-force-v5x" file. As indicated by the name, this is
+ * only applicable for v2_v5x hardware versions.
+ */
+ ab8500_regulator_suspend_force_file = debugfs_create_file(
+ "suspend-force-v5x",
+ S_IRUGO, ab8500_regulator_dir, &plf->dev,
+ &ab8500_regulator_suspend_force_fops);
+ if (!ab8500_regulator_suspend_force_file)
+ goto exit_destroy_status_file;
+
+ return 0;
+
+exit_destroy_status_file:
+ debugfs_remove(ab8500_regulator_status_file);
+exit_destroy_dump_file:
+ debugfs_remove(ab8500_regulator_dump_file);
+exit_destroy_dir:
+ debugfs_remove(ab8500_regulator_dir);
+exit_no_debugfs:
+ dev_err(&plf->dev, "failed to create debugfs entries.\n");
+ return -ENOMEM;
+}
+
+static int __devexit ab8500_regulator_debug_remove(struct platform_device *plf)
+{
+ debugfs_remove(ab8500_regulator_suspend_force_file);
+ debugfs_remove(ab8500_regulator_status_file);
+ debugfs_remove(ab8500_regulator_dump_file);
+ debugfs_remove(ab8500_regulator_dir);
+
+ return 0;
+}
+
+static struct platform_driver ab8500_regulator_debug_driver = {
+ .driver = {
+ .name = "ab8500-regulator-debug",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_regulator_debug_probe,
+ .remove = __devexit_p(ab8500_regulator_debug_remove),
+};
+
+static int __init ab8500_regulator_debug_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&ab8500_regulator_debug_driver);
+ if (ret)
+ pr_err("Failed to register ab8500 regulator: %d\n", ret);
+
+ return ret;
+}
+subsys_initcall(ab8500_regulator_debug_init);
+
+static void __exit ab8500_regulator_debug_exit(void)
+{
+ platform_driver_unregister(&ab8500_regulator_debug_driver);
+}
+module_exit(ab8500_regulator_debug_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com");
+MODULE_DESCRIPTION("AB8500 Regulator Debug");
+MODULE_ALIAS("platform:ab8500-regulator-debug");
diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c
new file mode 100644
index 00000000000..2bb57dbdbe0
--- /dev/null
+++ b/drivers/regulator/ab8500-ext.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Authors: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ *
+ * This file is based on drivers/regulator/ab8500.c
+ *
+ * AB8500 external regulators
+ *
+ * ab8500-ext supports the following regulators:
+ * - VextSupply3
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/regulator/ab8500.h>
+
+/**
+ * struct ab8500_ext_regulator_info - ab8500 regulator information
+ * @dev: device pointer
+ * @desc: regulator description
+ * @regulator_dev: regulator device
+ * @is_enabled: status of regulator (on/off)
+ * @fixed_uV: typical voltage (for fixed voltage supplies)
+ * @update_bank: bank to control on/off
+ * @update_reg: register to control on/off
+ * @update_mask: mask to enable/disable and set mode of regulator
+ * @update_val: bits holding the regulator current mode
+ * @update_val_en: bits to set EN pin active (LPn pin deactive)
+ * normally this means high power mode
+ * @update_val_en_lp: bits to set EN pin active and LPn pin active
+ * normally this means low power mode
+ * @delay: startup delay in ms
+ */
+struct ab8500_ext_regulator_info {
+ struct device *dev;
+ struct regulator_desc desc;
+ struct regulator_dev *regulator;
+ bool is_enabled;
+ int fixed_uV;
+ u8 update_bank;
+ u8 update_reg;
+ u8 update_mask;
+ u8 update_val;
+ u8 update_val_en;
+ u8 update_val_en_lp;
+};
+
+static int ab8500_ext_regulator_enable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg,
+ info->update_mask, info->update_val);
+ if (ret < 0)
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set enable bits for regulator\n");
+
+ info->is_enabled = true;
+
+ dev_dbg(rdev_get_dev(rdev), "%s-enable (bank, reg, mask, value):"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, info->update_val);
+
+ return ret;
+}
+
+static int ab8500_ext_regulator_disable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg,
+ info->update_mask, 0x0);
+ if (ret < 0)
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set disable bits for regulator\n");
+
+ info->is_enabled = false;
+
+ dev_dbg(rdev_get_dev(rdev), "%s-disable (bank, reg, mask, value):"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, 0x0);
+
+ return ret;
+}
+
+static int ab8500_ext_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = abx500_get_register_interruptible(info->dev,
+ info->update_bank, info->update_reg, &regval);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't read 0x%x register\n", info->update_reg);
+ return ret;
+ }
+
+ dev_dbg(rdev_get_dev(rdev), "%s-is_enabled (bank, reg, mask, value):"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+
+ if (regval & info->update_mask)
+ info->is_enabled = true;
+ else
+ info->is_enabled = false;
+
+ return info->is_enabled;
+}
+
+static int ab8500_ext_fixed_get_voltage(struct regulator_dev *rdev)
+{
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ return info->fixed_uV;
+}
+
+static int ab8500_ext_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ /* return the uV for the fixed regulators */
+ if (info->fixed_uV)
+ return info->fixed_uV;
+
+ return -EINVAL;
+}
+
+static struct regulator_ops ab8500_ext_regulator_ops = {
+ .enable = ab8500_ext_regulator_enable,
+ .disable = ab8500_ext_regulator_disable,
+ .is_enabled = ab8500_ext_regulator_is_enabled,
+ .get_voltage = ab8500_ext_fixed_get_voltage,
+ .list_voltage = ab8500_ext_list_voltage,
+};
+
+
+static struct ab8500_ext_regulator_info
+ ab8500_ext_regulator_info[AB8500_NUM_EXT_REGULATORS] = {
+ [AB8500_EXT_SUPPLY3] = {
+ .desc = {
+ .name = "VEXTSUPPLY3",
+ .ops = &ab8500_ext_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_EXT_SUPPLY3,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 3400000,
+ .update_bank = 0x04,
+ .update_reg = 0x08,
+ .update_mask = 0x30,
+ .update_val = 0x10,
+ .update_val_en = 0x10,
+ .update_val_en_lp = 0x30,
+ },
+};
+
+__devinit int ab8500_ext_regulator_init(struct platform_device *pdev)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
+ struct ab8500_platform_data *ppdata;
+ struct ab8500_regulator_platform_data *pdata;
+ int i, err;
+
+ if (!ab8500) {
+ dev_err(&pdev->dev, "null mfd parent\n");
+ return -EINVAL;
+ }
+ ppdata = dev_get_platdata(ab8500->dev);
+ if (!ppdata) {
+ dev_err(&pdev->dev, "null parent pdata\n");
+ return -EINVAL;
+ }
+
+ pdata = ppdata->regulator;
+ if (!pdata) {
+ dev_err(&pdev->dev, "null pdata\n");
+ return -EINVAL;
+ }
+
+ /* make sure the platform data has the correct size */
+ if (pdata->num_ext_regulator != ARRAY_SIZE(ab8500_ext_regulator_info)) {
+ dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
+ return -EINVAL;
+ }
+
+ /* check for AB8500 2.x */
+ if (abx500_get_chip_id(&pdev->dev) < 0x30) {
+ struct ab8500_ext_regulator_info *info;
+
+ /* VextSupply3LPn is inverted on AB8500 2.x */
+ info = &ab8500_ext_regulator_info[AB8500_EXT_SUPPLY3];
+ info->update_val = 0x30;
+ info->update_val_en = 0x30;
+ info->update_val_en_lp = 0x10;
+ }
+
+ /* register all regulators */
+ for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) {
+ struct ab8500_ext_regulator_info *info = NULL;
+
+ /* assign per-regulator data */
+ info = &ab8500_ext_regulator_info[i];
+ info->dev = &pdev->dev;
+
+ /* register regulator with framework */
+ info->regulator = regulator_register(&info->desc, &pdev->dev,
+ &pdata->ext_regulator[i], info);
+ if (IS_ERR(info->regulator)) {
+ err = PTR_ERR(info->regulator);
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ info->desc.name);
+ /* when we fail, un-register all earlier regulators */
+ while (--i >= 0) {
+ info = &ab8500_ext_regulator_info[i];
+ regulator_unregister(info->regulator);
+ }
+ return err;
+ }
+
+ dev_dbg(rdev_get_dev(info->regulator),
+ "%s-probed\n", info->desc.name);
+ }
+
+ return 0;
+}
+
+__devexit int ab8500_ext_regulator_exit(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) {
+ struct ab8500_ext_regulator_info *info = NULL;
+ info = &ab8500_ext_regulator_info[i];
+
+ dev_vdbg(rdev_get_dev(info->regulator),
+ "%s-remove\n", info->desc.name);
+
+ regulator_unregister(info->regulator);
+ }
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com>");
+MODULE_DESCRIPTION("AB8500 external regulator driver");
+MODULE_ALIAS("platform:ab8500-ext-regulator");
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 6e1ae69646b..c1c84e4a048 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -21,43 +21,54 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
+#include <linux/mfd/ab8500/gpio.h> /* for sysclkreq pins */
/**
* struct ab8500_regulator_info - ab8500 regulator information
* @dev: device pointer
* @desc: regulator description
* @regulator_dev: regulator device
+ * @is_enabled: status of regulator (on/off)
* @max_uV: maximum voltage (for variable voltage supplies)
* @min_uV: minimum voltage (for variable voltage supplies)
* @fixed_uV: typical voltage (for fixed voltage supplies)
+ * @load_lp_uA: maximum load in idle (low power) mode
* @update_bank: bank to control on/off
* @update_reg: register to control on/off
- * @update_mask: mask to enable/disable regulator
- * @update_val_enable: bits to enable the regulator in normal (high power) mode
+ * @update_mask: mask to enable/disable and set mode of regulator
+ * @update_val: bits holding the regulator current mode
+ * @update_val_idle: bits to enable the regulator in idle (low power) mode
+ * @update_val_normal: bits to enable the regulator in normal (high power) mode
* @voltage_bank: bank to control regulator voltage
* @voltage_reg: register to control regulator voltage
* @voltage_mask: mask to control regulator voltage
* @voltages: supported voltage table
* @voltages_len: number of supported voltages for the regulator
* @delay: startup/set voltage delay in us
+ * @gpio_pin: ab8500 gpio pin offset number (for sysclkreq regulator only)
*/
struct ab8500_regulator_info {
struct device *dev;
struct regulator_desc desc;
struct regulator_dev *regulator;
+ bool is_enabled;
int max_uV;
int min_uV;
int fixed_uV;
+ int load_lp_uA;
u8 update_bank;
u8 update_reg;
u8 update_mask;
- u8 update_val_enable;
+ u8 update_val;
+ u8 update_val_idle;
+ u8 update_val_normal;
u8 voltage_bank;
u8 voltage_reg;
u8 voltage_mask;
int const *voltages;
int voltages_len;
unsigned int delay;
+ enum ab8500_pin gpio_pin;
};
/* voltage tables for the vauxn/vintcore supplies */
@@ -113,15 +124,17 @@ static int ab8500_regulator_enable(struct regulator_dev *rdev)
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->update_bank, info->update_reg,
- info->update_mask, info->update_val_enable);
+ info->update_mask, info->update_val);
if (ret < 0)
dev_err(rdev_get_dev(rdev),
"couldn't set enable bits for regulator\n");
+ info->is_enabled = true;
+
dev_vdbg(rdev_get_dev(rdev),
"%s-enable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, info->update_bank, info->update_reg,
- info->update_mask, info->update_val_enable);
+ info->update_mask, info->update_val);
return ret;
}
@@ -143,6 +156,8 @@ static int ab8500_regulator_disable(struct regulator_dev *rdev)
dev_err(rdev_get_dev(rdev),
"couldn't set disable bits for regulator\n");
+ info->is_enabled = false;
+
dev_vdbg(rdev_get_dev(rdev),
"%s-disable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, info->update_bank, info->update_reg,
@@ -151,6 +166,88 @@ static int ab8500_regulator_disable(struct regulator_dev *rdev)
return ret;
}
+static unsigned int ab8500_regulator_get_optimum_mode(
+ struct regulator_dev *rdev, int input_uV,
+ int output_uV, int load_uA)
+{
+ unsigned int mode;
+
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ if (load_uA <= info->load_lp_uA)
+ mode = REGULATOR_MODE_IDLE;
+ else
+ mode = REGULATOR_MODE_NORMAL;
+
+ return mode;
+}
+
+static int ab8500_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ int ret = 0;
+
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ info->update_val = info->update_val_normal;
+ break;
+ case REGULATOR_MODE_IDLE:
+ info->update_val = info->update_val_idle;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (info->is_enabled) {
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg,
+ info->update_mask, info->update_val);
+ if (ret < 0)
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set regulator mode\n");
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-set_mode (bank, reg, mask, value): "
+ "0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, info->update_val);
+ }
+
+ return ret;
+}
+
+static unsigned int ab8500_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ if (info->update_val == info->update_val_normal)
+ ret = REGULATOR_MODE_NORMAL;
+ else if (info->update_val == info->update_val_idle)
+ ret = REGULATOR_MODE_IDLE;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
{
int ret;
@@ -177,9 +274,11 @@ static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
info->update_mask, regval);
if (regval & info->update_mask)
- return true;
+ info->is_enabled = true;
else
- return false;
+ info->is_enabled = false;
+
+ return info->is_enabled;
}
static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
@@ -275,8 +374,13 @@ static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
*selector = ret;
+ /* vintcore register has a different layout */
+ if (info->desc.id == AB8500_LDO_INTCORE)
+ regval = ((u8)ret) << 3;
+ else
+ regval = (u8)ret;
+
/* set the registers for the request */
- regval = (u8)ret;
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->voltage_bank, info->voltage_reg,
info->voltage_mask, regval);
@@ -316,9 +420,12 @@ static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
return info->delay;
}
-static struct regulator_ops ab8500_regulator_ops = {
+static struct regulator_ops ab8500_regulator_volt_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
+ .get_optimum_mode = ab8500_regulator_get_optimum_mode,
+ .set_mode = ab8500_regulator_set_mode,
+ .get_mode = ab8500_regulator_get_mode,
.is_enabled = ab8500_regulator_is_enabled,
.get_voltage = ab8500_regulator_get_voltage,
.set_voltage = ab8500_regulator_set_voltage,
@@ -339,16 +446,115 @@ static int ab8500_fixed_get_voltage(struct regulator_dev *rdev)
return info->fixed_uV;
}
-static struct regulator_ops ab8500_regulator_fixed_ops = {
+static struct regulator_ops ab8500_regulator_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
+ .get_optimum_mode = ab8500_regulator_get_optimum_mode,
+ .set_mode = ab8500_regulator_set_mode,
+ .get_mode = ab8500_regulator_get_mode,
.get_voltage = ab8500_fixed_get_voltage,
.list_voltage = ab8500_list_voltage,
.enable_time = ab8500_regulator_enable_time,
.set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
};
+static struct regulator_ops ab8500_regulator_ops = {
+ .enable = ab8500_regulator_enable,
+ .disable = ab8500_regulator_disable,
+ .is_enabled = ab8500_regulator_is_enabled,
+ .get_voltage = ab8500_fixed_get_voltage,
+ .list_voltage = ab8500_list_voltage,
+};
+
+static int ab8500_sysclkreq_enable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = ab8500_gpio_config_select(info->dev, info->gpio_pin, false);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set sysclkreq pin selection\n");
+ return ret;
+ }
+
+ info->is_enabled = true;
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-enable (gpio_pin, gpio_select): %i, false\n",
+ info->desc.name, info->gpio_pin);
+
+ return ret;
+}
+
+static int ab8500_sysclkreq_disable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = ab8500_gpio_config_select(info->dev, info->gpio_pin, true);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set gpio pin selection\n");
+ return ret;
+ }
+
+ info->is_enabled = false;
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-disable (gpio_pin, gpio_select): %i, true\n",
+ info->desc.name, info->gpio_pin);
+
+ return ret;
+}
+
+static int ab8500_sysclkreq_is_enabled(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ bool gpio_select;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = ab8500_gpio_config_get_select(info->dev, info->gpio_pin,
+ &gpio_select);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't read gpio pin selection\n");
+ return ret;
+ }
+
+ info->is_enabled = !gpio_select;
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-is_enabled (gpio_pin, is_enabled): %i, %i\n",
+ info->desc.name, info->gpio_pin, !gpio_select);
+
+ return info->is_enabled;
+}
+
+static struct regulator_ops ab8500_sysclkreq_ops = {
+ .enable = ab8500_sysclkreq_enable,
+ .disable = ab8500_sysclkreq_disable,
+ .is_enabled = ab8500_sysclkreq_is_enabled,
+ .get_voltage = ab8500_fixed_get_voltage,
+ .list_voltage = ab8500_list_voltage,
+};
+
static struct ab8500_regulator_info
ab8500_regulator_info[AB8500_NUM_REGULATORS] = {
/*
@@ -360,7 +566,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_AUX1] = {
.desc = {
.name = "LDO-AUX1",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX1,
.owner = THIS_MODULE,
@@ -368,10 +574,13 @@ static struct ab8500_regulator_info
},
.min_uV = 1100000,
.max_uV = 3300000,
+ .load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x03,
- .update_val_enable = 0x01,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
.voltage_bank = 0x04,
.voltage_reg = 0x1f,
.voltage_mask = 0x0f,
@@ -381,7 +590,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_AUX2] = {
.desc = {
.name = "LDO-AUX2",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX2,
.owner = THIS_MODULE,
@@ -389,10 +598,13 @@ static struct ab8500_regulator_info
},
.min_uV = 1100000,
.max_uV = 3300000,
+ .load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x0c,
- .update_val_enable = 0x04,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
.voltage_bank = 0x04,
.voltage_reg = 0x20,
.voltage_mask = 0x0f,
@@ -402,7 +614,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_AUX3] = {
.desc = {
.name = "LDO-AUX3",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX3,
.owner = THIS_MODULE,
@@ -410,10 +622,13 @@ static struct ab8500_regulator_info
},
.min_uV = 1100000,
.max_uV = 3300000,
+ .load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x0a,
.update_mask = 0x03,
- .update_val_enable = 0x01,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
.voltage_bank = 0x04,
.voltage_reg = 0x21,
.voltage_mask = 0x07,
@@ -423,7 +638,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_INTCORE] = {
.desc = {
.name = "LDO-INTCORE",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_INTCORE,
.owner = THIS_MODULE,
@@ -431,10 +646,13 @@ static struct ab8500_regulator_info
},
.min_uV = 1100000,
.max_uV = 3300000,
+ .load_lp_uA = 5000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x44,
- .update_val_enable = 0x04,
+ .update_val = 0x44,
+ .update_val_idle = 0x44,
+ .update_val_normal = 0x04,
.voltage_bank = 0x03,
.voltage_reg = 0x80,
.voltage_mask = 0x38,
@@ -450,7 +668,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_TVOUT] = {
.desc = {
.name = "LDO-TVOUT",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_TVOUT,
.owner = THIS_MODULE,
@@ -458,15 +676,18 @@ static struct ab8500_regulator_info
},
.delay = 10000,
.fixed_uV = 2000000,
+ .load_lp_uA = 1000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x82,
- .update_val_enable = 0x02,
+ .update_val = 0x02,
+ .update_val_idle = 0x82,
+ .update_val_normal = 0x02,
},
[AB8500_LDO_USB] = {
.desc = {
.name = "LDO-USB",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_USB,
.owner = THIS_MODULE,
@@ -476,12 +697,14 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x82,
.update_mask = 0x03,
- .update_val_enable = 0x01,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
},
[AB8500_LDO_AUDIO] = {
.desc = {
.name = "LDO-AUDIO",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUDIO,
.owner = THIS_MODULE,
@@ -491,12 +714,12 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x02,
- .update_val_enable = 0x02,
+ .update_val = 0x02,
},
[AB8500_LDO_ANAMIC1] = {
.desc = {
.name = "LDO-ANAMIC1",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_ANAMIC1,
.owner = THIS_MODULE,
@@ -506,12 +729,12 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x08,
- .update_val_enable = 0x08,
+ .update_val = 0x08,
},
[AB8500_LDO_ANAMIC2] = {
.desc = {
.name = "LDO-ANAMIC2",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_ANAMIC2,
.owner = THIS_MODULE,
@@ -521,12 +744,12 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x10,
- .update_val_enable = 0x10,
+ .update_val = 0x10,
},
[AB8500_LDO_DMIC] = {
.desc = {
.name = "LDO-DMIC",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_DMIC,
.owner = THIS_MODULE,
@@ -536,25 +759,58 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x04,
- .update_val_enable = 0x04,
+ .update_val = 0x04,
},
+
+ /*
+ * Regulators with fixed voltage and normal/idle modes
+ */
[AB8500_LDO_ANA] = {
.desc = {
.name = "LDO-ANA",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_ANA,
.owner = THIS_MODULE,
.n_voltages = 1,
},
.fixed_uV = 1200000,
+ .load_lp_uA = 1000,
.update_bank = 0x04,
.update_reg = 0x06,
.update_mask = 0x0c,
- .update_val_enable = 0x04,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
},
-
+ /*
+ * SysClkReq regulators
+ */
+ [AB8500_SYSCLKREQ_2] = {
+ .desc = {
+ .name = "SYSCLKREQ-2",
+ .ops = &ab8500_sysclkreq_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_SYSCLKREQ_2,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1, /* bogus value */
+ .gpio_pin = AB8500_PIN_GPIO1,
+ },
+ [AB8500_SYSCLKREQ_4] = {
+ .desc = {
+ .name = "SYSCLKREQ-4",
+ .ops = &ab8500_sysclkreq_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_SYSCLKREQ_4,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1, /* bogus value */
+ .gpio_pin = AB8500_PIN_GPIO3,
+ },
};
struct ab8500_reg_init {
@@ -572,11 +828,19 @@ struct ab8500_reg_init {
static struct ab8500_reg_init ab8500_reg_init[] = {
/*
+ * 0x03, VarmRequestCtrl
+ * 0x0c, VapeRequestCtrl
+ * 0x30, Vsmps1RequestCtrl
+ * 0xc0, Vsmps2RequestCtrl
+ */
+ REG_INIT(AB8500_REGUREQUESTCTRL1, 0x03, 0x03, 0xff),
+ /*
+ * 0x03, Vsmps3RequestCtrl
+ * 0x0c, VpllRequestCtrl
* 0x30, VanaRequestCtrl
- * 0x0C, VpllRequestCtrl
* 0xc0, VextSupply1RequestCtrl
*/
- REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xfc),
+ REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xff),
/*
* 0x03, VextSupply2RequestCtrl
* 0x0c, VextSupply3RequestCtrl
@@ -590,57 +854,82 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
*/
REG_INIT(AB8500_REGUREQUESTCTRL4, 0x03, 0x06, 0x07),
/*
+ * 0x01, Vsmps1SysClkReq1HPValid
+ * 0x02, Vsmps2SysClkReq1HPValid
+ * 0x04, Vsmps3SysClkReq1HPValid
* 0x08, VanaSysClkReq1HPValid
+ * 0x10, VpllSysClkReq1HPValid
* 0x20, Vaux1SysClkReq1HPValid
* 0x40, Vaux2SysClkReq1HPValid
* 0x80, Vaux3SysClkReq1HPValid
*/
- REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xe8),
+ REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xff),
/*
+ * 0x01, VapeSysClkReq1HPValid
+ * 0x02, VarmSysClkReq1HPValid
+ * 0x04, VbbSysClkReq1HPValid
+ * 0x08, VmodSysClkReq1HPValid
* 0x10, VextSupply1SysClkReq1HPValid
* 0x20, VextSupply2SysClkReq1HPValid
* 0x40, VextSupply3SysClkReq1HPValid
*/
- REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x70),
+ REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x7f),
/*
+ * 0x01, Vsmps1HwHPReq1Valid
+ * 0x02, Vsmps2HwHPReq1Valid
+ * 0x04, Vsmps3HwHPReq1Valid
* 0x08, VanaHwHPReq1Valid
+ * 0x10, VpllHwHPReq1Valid
* 0x20, Vaux1HwHPReq1Valid
* 0x40, Vaux2HwHPReq1Valid
* 0x80, Vaux3HwHPReq1Valid
*/
- REG_INIT(AB8500_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xe8),
+ REG_INIT(AB8500_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xff),
/*
* 0x01, VextSupply1HwHPReq1Valid
* 0x02, VextSupply2HwHPReq1Valid
* 0x04, VextSupply3HwHPReq1Valid
+ * 0x08, VmodHwHPReq1Valid
*/
- REG_INIT(AB8500_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x07),
+ REG_INIT(AB8500_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x0f),
/*
+ * 0x01, Vsmps1HwHPReq2Valid
+ * 0x02, Vsmps2HwHPReq2Valid
+ * 0x03, Vsmps3HwHPReq2Valid
* 0x08, VanaHwHPReq2Valid
+ * 0x10, VpllHwHPReq2Valid
* 0x20, Vaux1HwHPReq2Valid
* 0x40, Vaux2HwHPReq2Valid
* 0x80, Vaux3HwHPReq2Valid
*/
- REG_INIT(AB8500_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xe8),
+ REG_INIT(AB8500_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xff),
/*
* 0x01, VextSupply1HwHPReq2Valid
* 0x02, VextSupply2HwHPReq2Valid
* 0x04, VextSupply3HwHPReq2Valid
+ * 0x08, VmodHwHPReq2Valid
*/
- REG_INIT(AB8500_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x07),
+ REG_INIT(AB8500_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x0f),
/*
+ * 0x01, VapeSwHPReqValid
+ * 0x02, VarmSwHPReqValid
+ * 0x04, Vsmps1SwHPReqValid
+ * 0x08, Vsmps2SwHPReqValid
+ * 0x10, Vsmps3SwHPReqValid
* 0x20, VanaSwHPReqValid
+ * 0x40, VpllSwHPReqValid
* 0x80, Vaux1SwHPReqValid
*/
- REG_INIT(AB8500_REGUSWHPREQVALID1, 0x03, 0x0d, 0xa0),
+ REG_INIT(AB8500_REGUSWHPREQVALID1, 0x03, 0x0d, 0xff),
/*
* 0x01, Vaux2SwHPReqValid
* 0x02, Vaux3SwHPReqValid
* 0x04, VextSupply1SwHPReqValid
* 0x08, VextSupply2SwHPReqValid
* 0x10, VextSupply3SwHPReqValid
+ * 0x20, VmodSwHPReqValid
*/
- REG_INIT(AB8500_REGUSWHPREQVALID2, 0x03, 0x0e, 0x1f),
+ REG_INIT(AB8500_REGUSWHPREQVALID2, 0x03, 0x0e, 0x3f),
/*
* 0x02, SysClkReq2Valid1
* ...
@@ -674,8 +963,28 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
*/
REG_INIT(AB8500_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
/*
- * 0x0c, VanaRegu
+ * 0x03, Vsmps1Regu
+ * 0x0c, Vsmps1SelCtrl
+ * 0x10, Vsmps1AutoMode
+ * 0x20, Vsmps1PWMMode
+ */
+ REG_INIT(AB8500_VSMPS1REGU, 0x04, 0x03, 0x3f),
+ /*
+ * 0x03, Vsmps2Regu
+ * 0x0c, Vsmps2SelCtrl
+ * 0x10, Vsmps2AutoMode
+ * 0x20, Vsmps2PWMMode
+ */
+ REG_INIT(AB8500_VSMPS2REGU, 0x04, 0x04, 0x3f),
+ /*
+ * 0x03, Vsmps3Regu
+ * 0x0c, Vsmps3SelCtrl
+ * NOTE! PRCMU register
+ */
+ REG_INIT(AB8500_VSMPS3REGU, 0x04, 0x05, 0x0f),
+ /*
* 0x03, VpllRegu
+ * 0x0c, VanaRegu
*/
REG_INIT(AB8500_VPLLVANAREGU, 0x04, 0x06, 0x0f),
/*
@@ -697,14 +1006,45 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
*/
REG_INIT(AB8500_VAUX12REGU, 0x04, 0x09, 0x0f),
/*
+ * 0x0c, Vrf1Regu
* 0x03, Vaux3Regu
*/
- REG_INIT(AB8500_VRF1VAUX3REGU, 0x04, 0x0a, 0x03),
+ REG_INIT(AB8500_VRF1VAUX3REGU, 0x04, 0x0a, 0x0f),
/*
* 0x3f, Vsmps1Sel1
*/
REG_INIT(AB8500_VSMPS1SEL1, 0x04, 0x13, 0x3f),
/*
+ * 0x3f, Vsmps1Sel2
+ */
+ REG_INIT(AB8500_VSMPS1SEL2, 0x04, 0x14, 0x3f),
+ /*
+ * 0x3f, Vsmps1Sel3
+ */
+ REG_INIT(AB8500_VSMPS1SEL3, 0x04, 0x15, 0x3f),
+ /*
+ * 0x3f, Vsmps2Sel1
+ */
+ REG_INIT(AB8500_VSMPS2SEL1, 0x04, 0x17, 0x3f),
+ /*
+ * 0x3f, Vsmps2Sel2
+ */
+ REG_INIT(AB8500_VSMPS2SEL2, 0x04, 0x18, 0x3f),
+ /*
+ * 0x3f, Vsmps2Sel3
+ */
+ REG_INIT(AB8500_VSMPS2SEL3, 0x04, 0x19, 0x3f),
+ /*
+ * 0x7f, Vsmps3Sel1
+ * NOTE! PRCMU register
+ */
+ REG_INIT(AB8500_VSMPS3SEL1, 0x04, 0x1b, 0x7f),
+ /*
+ * 0x7f, Vsmps3Sel2
+ * NOTE! PRCMU register
+ */
+ REG_INIT(AB8500_VSMPS3SEL2, 0x04, 0x1c, 0x7f),
+ /*
* 0x0f, Vaux1Sel
*/
REG_INIT(AB8500_VAUX1SEL, 0x04, 0x1f, 0x0f),
@@ -714,13 +1054,16 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
REG_INIT(AB8500_VAUX2SEL, 0x04, 0x20, 0x0f),
/*
* 0x07, Vaux3Sel
+ * 0x30, Vrf1Sel
*/
- REG_INIT(AB8500_VRF1VAUX3SEL, 0x04, 0x21, 0x07),
+ REG_INIT(AB8500_VRF1VAUX3SEL, 0x04, 0x21, 0x37),
/*
* 0x01, VextSupply12LP
*/
REG_INIT(AB8500_REGUCTRL2SPARE, 0x04, 0x22, 0x01),
/*
+ * 0x01, VpllDisch
+ * 0x02, Vrf1Disch
* 0x04, Vaux1Disch
* 0x08, Vaux2Disch
* 0x10, Vaux3Disch
@@ -728,26 +1071,36 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
* 0x40, VTVoutDisch
* 0x80, VaudioDisch
*/
- REG_INIT(AB8500_REGUCTRLDISCH, 0x04, 0x43, 0xfc),
+ REG_INIT(AB8500_REGUCTRLDISCH, 0x04, 0x43, 0xff),
/*
+ * 0x01, VsimDisch
* 0x02, VanaDisch
* 0x04, VdmicPullDownEna
+ * 0x08, VpllPullDownEna
* 0x10, VdmicDisch
*/
- REG_INIT(AB8500_REGUCTRLDISCH2, 0x04, 0x44, 0x16),
+ REG_INIT(AB8500_REGUCTRLDISCH2, 0x04, 0x44, 0x1f),
};
static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_platform_data *pdata;
+ struct ab8500_platform_data *ppdata;
+ struct ab8500_regulator_platform_data *pdata;
int i, err;
if (!ab8500) {
dev_err(&pdev->dev, "null mfd parent\n");
return -EINVAL;
}
- pdata = dev_get_platdata(ab8500->dev);
+
+ ppdata = dev_get_platdata(ab8500->dev);
+ if (!ppdata) {
+ dev_err(&pdev->dev, "null parent pdata\n");
+ return -EINVAL;
+ }
+
+ pdata = ppdata->regulator;
if (!pdata) {
dev_err(&pdev->dev, "null pdata\n");
return -EINVAL;
@@ -760,31 +1113,24 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
}
/* initialize registers */
- for (i = 0; i < pdata->num_regulator_reg_init; i++) {
+ for (i = 0; i < pdata->num_reg_init; i++) {
int id;
- u8 value;
+ u8 mask, value;
- id = pdata->regulator_reg_init[i].id;
- value = pdata->regulator_reg_init[i].value;
+ id = pdata->reg_init[i].id;
+ mask = pdata->reg_init[i].mask;
+ value = pdata->reg_init[i].value;
/* check for configuration errors */
- if (id >= AB8500_NUM_REGULATOR_REGISTERS) {
- dev_err(&pdev->dev,
- "Configuration error: id outside range.\n");
- return -EINVAL;
- }
- if (value & ~ab8500_reg_init[id].mask) {
- dev_err(&pdev->dev,
- "Configuration error: value outside mask.\n");
- return -EINVAL;
- }
+ BUG_ON(id >= AB8500_NUM_REGULATOR_REGISTERS);
+ BUG_ON(value & ~mask);
+ BUG_ON(mask & ~ab8500_reg_init[id].mask);
/* initialize register */
err = abx500_mask_and_set_register_interruptible(&pdev->dev,
ab8500_reg_init[id].bank,
ab8500_reg_init[id].addr,
- ab8500_reg_init[id].mask,
- value);
+ mask, value);
if (err < 0) {
dev_err(&pdev->dev,
"Failed to initialize 0x%02x, 0x%02x.\n",
@@ -796,10 +1142,32 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
" init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
ab8500_reg_init[id].bank,
ab8500_reg_init[id].addr,
- ab8500_reg_init[id].mask,
- value);
+ mask, value);
}
+ /*
+ * This changes the default setting for VextSupply3Regu to low power.
+ * Active high or low is depending on OTP which is changed from ab8500v3.0.
+ * Remove this when ab8500v2.0 is no longer important.
+ * This only affects power consumption and it depends on the
+ * HREF OTP configurations.
+ */
+ if (abx500_get_chip_id(&pdev->dev) < 0x30) {
+ err = abx500_mask_and_set_register_interruptible(&pdev->dev,
+ AB8500_REGU_CTRL2, 0x08, 0x30, 0x30);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Failed to override 0x%02x, 0x%02x.\n",
+ AB8500_REGU_CTRL2, 0x08);
+ return err;
+ }
+ }
+
+ /* register external regulators (before Vaux1, 2 and 3) */
+ err = ab8500_ext_regulator_init(pdev);
+ if (err)
+ return err;
+
/* register all regulators */
for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
struct ab8500_regulator_info *info = NULL;
@@ -844,7 +1212,7 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
static __devexit int ab8500_regulator_remove(struct platform_device *pdev)
{
- int i;
+ int i, err;
for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
struct ab8500_regulator_info *info = NULL;
@@ -856,6 +1224,11 @@ static __devexit int ab8500_regulator_remove(struct platform_device *pdev)
regulator_unregister(info->regulator);
}
+ /* remove external regulators (after Vaux1, 2 and 3) */
+ err = ab8500_ext_regulator_exit(pdev);
+ if (err)
+ return err;
+
return 0;
}
@@ -888,5 +1261,6 @@ module_exit(ab8500_regulator_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com>");
MODULE_DESCRIPTION("Regulator Driver for ST-Ericsson AB8500 Mixed-Sig PMIC");
MODULE_ALIAS("platform:ab8500-regulator");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 938398f3e86..41cca779b3e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -85,6 +85,7 @@ struct regulator {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
+ int use;
};
static int _regulator_is_enabled(struct regulator_dev *rdev);
@@ -176,11 +177,13 @@ static int regulator_check_consumers(struct regulator_dev *rdev,
*/
if (!regulator->min_uV && !regulator->max_uV)
continue;
-
- if (*max_uV > regulator->max_uV)
- *max_uV = regulator->max_uV;
- if (*min_uV < regulator->min_uV)
- *min_uV = regulator->min_uV;
+
+ if (regulator->use) {
+ if (*max_uV > regulator->max_uV)
+ *max_uV = regulator->max_uV;
+ if (*min_uV < regulator->min_uV)
+ *min_uV = regulator->min_uV;
+ }
}
if (*min_uV > *max_uV)
@@ -579,6 +582,32 @@ static ssize_t regulator_suspend_standby_state_show(struct device *dev,
static DEVICE_ATTR(suspend_standby_state, 0444,
regulator_suspend_standby_state_show, NULL);
+static ssize_t regulator_use_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ struct regulator *reg;
+ size_t size = 0;
+
+ if (rdev->use_count == 0)
+ return sprintf(buf, "no users\n");
+
+ list_for_each_entry(reg, &rdev->consumer_list, list) {
+ if (!reg->use)
+ continue;
+
+ if (reg->dev != NULL)
+ size += sprintf((buf + size), "%s (%d) ",
+ dev_name(reg->dev), reg->use);
+ else
+ size += sprintf((buf + size), "unknown (%d) ",
+ reg->use);
+ }
+ size += sprintf((buf + size), "\n");
+
+ return size;
+}
+static DEVICE_ATTR(use, 0444, regulator_use_show, NULL);
/*
* These are the only attributes are present for all regulators.
@@ -1428,6 +1457,8 @@ int regulator_enable(struct regulator *regulator)
if (ret != 0 && rdev->supply)
regulator_disable(rdev->supply);
+ else
+ regulator->use++;
return ret;
}
@@ -1501,6 +1532,9 @@ int regulator_disable(struct regulator *regulator)
if (ret == 0 && rdev->supply)
regulator_disable(rdev->supply);
+ if (ret == 0)
+ regulator->use--;
+
return ret;
}
EXPORT_SYMBOL_GPL(regulator_disable);
@@ -2502,6 +2536,10 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
struct regulator_ops *ops = rdev->desc->ops;
int status = 0;
+ status = device_create_file(dev, &dev_attr_use);
+ if (status < 0)
+ dev_warn(dev, "Create sysfs file \"use\" failed");
+
/* some attributes need specific methods to be displayed */
if (ops->get_voltage || ops->get_voltage_sel) {
status = device_create_file(dev, &dev_attr_microvolts);
diff --git a/drivers/regulator/db5500-prcmu.c b/drivers/regulator/db5500-prcmu.c
new file mode 100644
index 00000000000..bf2aeeee399
--- /dev/null
+++ b/drivers/regulator/db5500-prcmu.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ *
+ * Power domain regulators on DB5500
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/db5500-prcmu.h>
+
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include "dbx500-prcmu.h"
+static int db5500_regulator_enable(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-%s-enable\n",
+ info->desc.name);
+
+ info->is_enabled = true;
+ if (!info->exclude_from_power_state)
+ power_state_active_enable();
+
+ return 0;
+}
+
+static int db5500_regulator_disable(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret = 0;
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-%s-disable\n",
+ info->desc.name);
+
+ info->is_enabled = false;
+ if (!info->exclude_from_power_state)
+ ret = power_state_active_disable();
+
+ return ret;
+}
+
+static int db5500_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-%s-is_enabled (is_enabled):"
+ " %i\n", info->desc.name, info->is_enabled);
+
+ return info->is_enabled;
+}
+
+/* db5500 regulator operations */
+static struct regulator_ops db5500_regulator_ops = {
+ .enable = db5500_regulator_enable,
+ .disable = db5500_regulator_disable,
+ .is_enabled = db5500_regulator_is_enabled,
+};
+
+/*
+ * EPOD control
+ */
+static bool epod_on[NUM_EPOD_ID];
+static bool epod_ramret[NUM_EPOD_ID];
+
+static inline int epod_id_to_index(u16 epod_id)
+{
+ return epod_id - DB5500_EPOD_ID_BASE;
+}
+
+static int enable_epod(u16 epod_id, bool ramret)
+{
+ int idx = epod_id_to_index(epod_id);
+ int ret;
+
+ if (ramret) {
+ if (!epod_on[idx]) {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
+ if (ret < 0)
+ return ret;
+ }
+ epod_ramret[idx] = true;
+ } else {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_ON);
+ if (ret < 0)
+ return ret;
+ epod_on[idx] = true;
+ }
+
+ return 0;
+}
+
+static int disable_epod(u16 epod_id, bool ramret)
+{
+ int idx = epod_id_to_index(epod_id);
+ int ret;
+
+ if (ramret) {
+ if (!epod_on[idx]) {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
+ if (ret < 0)
+ return ret;
+ }
+ epod_ramret[idx] = false;
+ } else {
+ if (epod_ramret[idx]) {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
+ if (ret < 0)
+ return ret;
+ }
+ epod_on[idx] = false;
+ }
+
+ return 0;
+}
+
+/*
+ * Regulator switch
+ */
+static int db5500_regulator_switch_enable(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-enable\n",
+ info->desc.name);
+
+ ret = enable_epod(info->epod_id, info->is_ramret);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "regulator-switch-%s-enable: prcmu call failed\n",
+ info->desc.name);
+ goto out;
+ }
+
+ info->is_enabled = true;
+out:
+ return ret;
+}
+
+static int db5500_regulator_switch_disable(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-disable\n",
+ info->desc.name);
+
+ ret = disable_epod(info->epod_id, info->is_ramret);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "regulator_switch-%s-disable: prcmu call failed\n",
+ info->desc.name);
+ goto out;
+ }
+
+ info->is_enabled = 0;
+out:
+ return ret;
+}
+
+static int db5500_regulator_switch_is_enabled(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "regulator-switch-%s-is_enabled (is_enabled): %i\n",
+ info->desc.name, info->is_enabled);
+
+ return info->is_enabled;
+}
+
+static struct regulator_ops db5500_regulator_switch_ops = {
+ .enable = db5500_regulator_switch_enable,
+ .disable = db5500_regulator_switch_disable,
+ .is_enabled = db5500_regulator_switch_is_enabled,
+};
+
+/*
+ * Regulator information
+ */
+#define DB5500_REGULATOR_SWITCH(_name, reg) \
+ [DB5500_REGULATOR_SWITCH_##reg] = { \
+ .desc = { \
+ .name = _name, \
+ .id = DB5500_REGULATOR_SWITCH_##reg, \
+ .ops = &db5500_regulator_switch_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ .epod_id = DB5500_EPOD_ID_##reg, \
+}
+
+static struct dbx500_regulator_info
+ dbx500_regulator_info[DB5500_NUM_REGULATORS] = {
+ [DB5500_REGULATOR_VAPE] = {
+ .desc = {
+ .name = "db5500-vape",
+ .id = DB5500_REGULATOR_VAPE,
+ .ops = &db5500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ },
+ DB5500_REGULATOR_SWITCH("db5500-sga", SGA),
+ DB5500_REGULATOR_SWITCH("db5500-hva", HVA),
+ DB5500_REGULATOR_SWITCH("db5500-sia", SIA),
+ DB5500_REGULATOR_SWITCH("db5500-disp", DISP),
+ DB5500_REGULATOR_SWITCH("db5500-esram12", ESRAM12),
+};
+
+static int __devinit db5500_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_init_data *db5500_init_data =
+ dev_get_platdata(&pdev->dev);
+ int i, err;
+
+ /* register all regulators */
+ for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
+ struct dbx500_regulator_info *info;
+ struct regulator_init_data *init_data = &db5500_init_data[i];
+
+ /* assign per-regulator data */
+ info = &dbx500_regulator_info[i];
+ info->dev = &pdev->dev;
+
+ /* register with the regulator framework */
+ info->rdev = regulator_register(&info->desc, &pdev->dev,
+ init_data, info);
+ if (IS_ERR(info->rdev)) {
+ err = PTR_ERR(info->rdev);
+ dev_err(&pdev->dev, "failed to register %s: err %i\n",
+ info->desc.name, err);
+
+ /* if failing, unregister all earlier regulators */
+ i--;
+ while (i >= 0) {
+ info = &dbx500_regulator_info[i];
+ regulator_unregister(info->rdev);
+ i--;
+ }
+ return err;
+ }
+
+ dev_dbg(rdev_get_dev(info->rdev),
+ "regulator-%s-probed\n", info->desc.name);
+ }
+
+ return 0;
+}
+
+static int __exit db5500_regulator_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
+ struct dbx500_regulator_info *info;
+ info = &dbx500_regulator_info[i];
+
+ dev_vdbg(rdev_get_dev(info->rdev),
+ "regulator-%s-remove\n", info->desc.name);
+
+ regulator_unregister(info->rdev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver db5500_regulator_driver = {
+ .driver = {
+ .name = "db5500-prcmu-regulators",
+ .owner = THIS_MODULE,
+ },
+ .probe = db5500_regulator_probe,
+ .remove = __exit_p(db5500_regulator_remove),
+};
+
+static int __init db5500_regulator_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&db5500_regulator_driver);
+ if (ret < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit db5500_regulator_exit(void)
+{
+ platform_driver_unregister(&db5500_regulator_driver);
+}
+
+arch_initcall(db5500_regulator_init);
+module_exit(db5500_regulator_exit);
+
+MODULE_AUTHOR("STMicroelectronics/ST-Ericsson");
+MODULE_DESCRIPTION("DB5500 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 78329751af5..8076739ea12 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -18,74 +18,11 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/db8500-prcmu.h>
#include <linux/module.h>
-
-/*
- * power state reference count
- */
-static int power_state_active_cnt; /* will initialize to zero */
-static DEFINE_SPINLOCK(power_state_active_lock);
-
-static void power_state_active_enable(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&power_state_active_lock, flags);
- power_state_active_cnt++;
- spin_unlock_irqrestore(&power_state_active_lock, flags);
-}
-
-static int power_state_active_disable(void)
-{
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&power_state_active_lock, flags);
- if (power_state_active_cnt <= 0) {
- pr_err("power state: unbalanced enable/disable calls\n");
- ret = -EINVAL;
- goto out;
- }
-
- power_state_active_cnt--;
-out:
- spin_unlock_irqrestore(&power_state_active_lock, flags);
- return ret;
-}
-
-/*
- * Exported interface for CPUIdle only. This function is called when interrupts
- * are turned off. Hence, no locking.
- */
-int power_state_active_is_enabled(void)
-{
- return (power_state_active_cnt > 0);
-}
-
-/**
- * struct db8500_regulator_info - db8500 regulator information
- * @dev: device pointer
- * @desc: regulator description
- * @rdev: regulator device pointer
- * @is_enabled: status of the regulator
- * @epod_id: id for EPOD (power domain)
- * @is_ramret: RAM retention switch for EPOD (power domain)
- * @operating_point: operating point (only for vape, to be removed)
- *
- */
-struct db8500_regulator_info {
- struct device *dev;
- struct regulator_desc desc;
- struct regulator_dev *rdev;
- bool is_enabled;
- u16 epod_id;
- bool is_ramret;
- bool exclude_from_power_state;
- unsigned int operating_point;
-};
+#include "dbx500-prcmu.h"
static int db8500_regulator_enable(struct regulator_dev *rdev)
{
- struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL)
return -EINVAL;
@@ -102,7 +39,7 @@ static int db8500_regulator_enable(struct regulator_dev *rdev)
static int db8500_regulator_disable(struct regulator_dev *rdev)
{
- struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
int ret = 0;
if (info == NULL)
@@ -120,7 +57,7 @@ static int db8500_regulator_disable(struct regulator_dev *rdev)
static int db8500_regulator_is_enabled(struct regulator_dev *rdev)
{
- struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL)
return -EINVAL;
@@ -197,7 +134,7 @@ static int disable_epod(u16 epod_id, bool ramret)
*/
static int db8500_regulator_switch_enable(struct regulator_dev *rdev)
{
- struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
int ret;
if (info == NULL)
@@ -221,7 +158,7 @@ out:
static int db8500_regulator_switch_disable(struct regulator_dev *rdev)
{
- struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
int ret;
if (info == NULL)
@@ -245,7 +182,7 @@ out:
static int db8500_regulator_switch_is_enabled(struct regulator_dev *rdev)
{
- struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL)
return -EINVAL;
@@ -266,8 +203,8 @@ static struct regulator_ops db8500_regulator_switch_ops = {
/*
* Regulator information
*/
-static struct db8500_regulator_info
-db8500_regulator_info[DB8500_NUM_REGULATORS] = {
+static struct dbx500_regulator_info
+dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VAPE] = {
.desc = {
.name = "db8500-vape",
@@ -476,12 +413,12 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
int i, err;
/* register all regulators */
- for (i = 0; i < ARRAY_SIZE(db8500_regulator_info); i++) {
- struct db8500_regulator_info *info;
+ for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
+ struct dbx500_regulator_info *info;
struct regulator_init_data *init_data = &db8500_init_data[i];
/* assign per-regulator data */
- info = &db8500_regulator_info[i];
+ info = &dbx500_regulator_info[i];
info->dev = &pdev->dev;
/* register with the regulator framework */
@@ -494,7 +431,7 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
/* if failing, unregister all earlier regulators */
while (--i >= 0) {
- info = &db8500_regulator_info[i];
+ info = &dbx500_regulator_info[i];
regulator_unregister(info->rdev);
}
return err;
@@ -503,17 +440,22 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
dev_dbg(rdev_get_dev(info->rdev),
"regulator-%s-probed\n", info->desc.name);
}
+ err = ux500_regulator_debug_init(pdev,
+ dbx500_regulator_info,
+ ARRAY_SIZE(dbx500_regulator_info));
- return 0;
+ return err;
}
static int __exit db8500_regulator_remove(struct platform_device *pdev)
{
int i;
- for (i = 0; i < ARRAY_SIZE(db8500_regulator_info); i++) {
- struct db8500_regulator_info *info;
- info = &db8500_regulator_info[i];
+ ux500_regulator_debug_exit();
+
+ for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
+ struct dbx500_regulator_info *info;
+ info = &dbx500_regulator_info[i];
dev_vdbg(rdev_get_dev(info->rdev),
"regulator-%s-remove\n", info->desc.name);
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
new file mode 100644
index 00000000000..6b1b2a4431b
--- /dev/null
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ *
+ * UX500 common part of Power domain regulators
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/regulator/driver.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include "dbx500-prcmu.h"
+
+/*
+ * power state reference count
+ */
+static int power_state_active_cnt; /* will initialize to zero */
+static DEFINE_SPINLOCK(power_state_active_lock);
+
+void power_state_active_enable(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&power_state_active_lock, flags);
+ power_state_active_cnt++;
+ spin_unlock_irqrestore(&power_state_active_lock, flags);
+}
+
+int power_state_active_disable(void)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&power_state_active_lock, flags);
+ if (power_state_active_cnt <= 0) {
+ pr_err("power state: unbalanced enable/disable calls\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ power_state_active_cnt--;
+out:
+ spin_unlock_irqrestore(&power_state_active_lock, flags);
+ return ret;
+}
+
+/*
+ * Exported interface for CPUIdle only. This function is called when interrupts
+ * are turned off. Hence, no locking.
+ */
+int power_state_active_is_enabled(void)
+{
+ return (power_state_active_cnt > 0);
+}
+
+struct ux500_regulator {
+ char *name;
+ void (*enable)(void);
+ int (*disable)(void);
+};
+static struct ux500_regulator ux500_atomic_regulators[] = {
+ {
+ .name = "dma40.0",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "ssp0",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "ssp1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi0",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi2",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi3",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "cryp1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "hash1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+};
+
+struct ux500_regulator *__must_check ux500_regulator_get(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ux500_atomic_regulators); i++) {
+ if (!strcmp(dev_name(dev), ux500_atomic_regulators[i].name))
+ return &ux500_atomic_regulators[i];
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(ux500_regulator_get);
+
+int ux500_regulator_atomic_enable(struct ux500_regulator *regulator)
+{
+ if (regulator) {
+ regulator->enable();
+ return 0;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(ux500_regulator_atomic_enable);
+
+int ux500_regulator_atomic_disable(struct ux500_regulator *regulator)
+{
+ if (regulator)
+ return regulator->disable();
+ else
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(ux500_regulator_atomic_disable);
+
+void ux500_regulator_put(struct ux500_regulator *regulator)
+{
+ /* Here for symetric reasons and for possible future use */
+}
+EXPORT_SYMBOL_GPL(ux500_regulator_put);
+
+#ifdef CONFIG_REGULATOR_DEBUG
+
+static struct ux500_regulator_debug {
+ struct dentry *dir;
+ struct dentry *status_file;
+ struct dbx500_regulator_info *regulator_array;
+ int num_regulators;
+ u8 *state_before_suspend;
+ u8 *state_after_suspend;
+} rdebug;
+
+void ux500_regulator_suspend_debug(void)
+{
+ int i;
+ for (i = 0; i < rdebug.num_regulators; i++)
+ rdebug.state_before_suspend[i] =
+ rdebug.regulator_array[i].is_enabled;
+}
+
+void ux500_regulator_resume_debug(void)
+{
+ int i;
+ for (i = 0; i < rdebug.num_regulators; i++)
+ rdebug.state_after_suspend[i] =
+ rdebug.regulator_array[i].is_enabled;
+}
+
+static int ux500_regulator_status_print(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ int err;
+ int i;
+
+ /* print dump header */
+ err = seq_printf(s, "ux500-regulator status:\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+
+ err = seq_printf(s, "%31s : %8s : %8s\n", "current",
+ "before", "after");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+
+ for (i = 0; i < rdebug.num_regulators; i++) {
+ struct dbx500_regulator_info *info;
+ /* Access per-regulator data */
+ info = &rdebug.regulator_array[i];
+
+ /* print status */
+ err = seq_printf(s, "%20s : %8s : %8s : %8s\n", info->desc.name,
+ info->is_enabled ? "enabled" : "disabled",
+ rdebug.state_before_suspend[i] ? "enabled" : "disabled",
+ rdebug.state_after_suspend[i] ? "enabled" : "disabled");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+ }
+
+ return 0;
+}
+
+static int ux500_regulator_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ux500_regulator_status_print,
+ inode->i_private);
+}
+
+static const struct file_operations ux500_regulator_status_fops = {
+ .open = ux500_regulator_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+int __devinit
+ux500_regulator_debug_init(struct platform_device *pdev,
+ struct dbx500_regulator_info *regulator_info,
+ int num_regulators)
+{
+ /* create directory */
+ rdebug.dir = debugfs_create_dir("ux500-regulator", NULL);
+ if (!rdebug.dir)
+ goto exit_no_debugfs;
+
+ /* create "status" file */
+ rdebug.status_file = debugfs_create_file("status",
+ S_IRUGO, rdebug.dir, &pdev->dev,
+ &ux500_regulator_status_fops);
+ if (!rdebug.status_file)
+ goto exit_destroy_dir;
+
+ rdebug.regulator_array = regulator_info;
+ rdebug.num_regulators = num_regulators;
+
+ rdebug.state_before_suspend = kzalloc(num_regulators, GFP_KERNEL);
+ if (!rdebug.state_before_suspend) {
+ dev_err(&pdev->dev,
+ "could not allocate memory for saving state\n");
+ goto exit_destory_status;
+ }
+
+ rdebug.state_after_suspend = kzalloc(num_regulators, GFP_KERNEL);
+ if (!rdebug.state_after_suspend) {
+ dev_err(&pdev->dev,
+ "could not allocate memory for saving state\n");
+ goto exit_free;
+ }
+ return 0;
+
+exit_free:
+ kfree(rdebug.state_before_suspend);
+exit_destory_status:
+ debugfs_remove(rdebug.status_file);
+exit_destroy_dir:
+ debugfs_remove(rdebug.dir);
+exit_no_debugfs:
+ dev_err(&pdev->dev, "failed to create debugfs entries.\n");
+ return -ENOMEM;
+}
+
+int __devexit ux500_regulator_debug_exit(void)
+{
+ debugfs_remove_recursive(rdebug.dir);
+ kfree(rdebug.state_after_suspend);
+ kfree(rdebug.state_before_suspend);
+
+ return 0;
+}
+#endif
diff --git a/drivers/regulator/dbx500-prcmu.h b/drivers/regulator/dbx500-prcmu.h
new file mode 100644
index 00000000000..f7e20fe075a
--- /dev/null
+++ b/drivers/regulator/dbx500-prcmu.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Bengt Jonsson <bengt.jonsson@stericsson.com> for ST-Ericsson,
+ * Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License Terms: GNU General Public License v2
+ *
+ */
+
+#ifndef DBX500_REGULATOR_H
+#define DBX500_REGULATOR_H
+
+#include <linux/platform_device.h>
+
+/**
+ * struct dbx500_regulator_info - dbx500 regulator information
+ * @dev: device pointer
+ * @desc: regulator description
+ * @rdev: regulator device pointer
+ * @is_enabled: status of the regulator
+ * @epod_id: id for EPOD (power domain)
+ * @is_ramret: RAM retention switch for EPOD (power domain)
+ * @operating_point: operating point (only for vape, to be removed)
+ *
+ */
+struct dbx500_regulator_info {
+ struct device *dev;
+ struct regulator_desc desc;
+ struct regulator_dev *rdev;
+ bool is_enabled;
+ u16 epod_id;
+ bool is_ramret;
+ bool exclude_from_power_state;
+ unsigned int operating_point;
+};
+
+void power_state_active_enable(void);
+int power_state_active_disable(void);
+
+
+#ifdef CONFIG_REGULATOR_DEBUG
+int ux500_regulator_debug_init(struct platform_device *pdev,
+ struct dbx500_regulator_info *regulator_info,
+ int num_regulators);
+
+int ux500_regulator_debug_exit(void);
+#else
+
+static inline int ux500_regulator_debug_init(struct platform_device *pdev,
+ struct dbx500_regulator_info *regulator_info,
+ int num_regulators) {}
+
+static inline int ux500_regulator_debug_exit(void) {}
+#endif
+#endif
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 53eb4e55b28..17cd5c2932b 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -697,6 +697,13 @@ config RTC_DRV_PCF50633
If you say yes here you get support for the RTC subsystem of the
NXP PCF50633 used in embedded systems.
+config RTC_DRV_AB
+ tristate "ST-Ericsson AB5500 RTC"
+ depends on AB5500_CORE
+ help
+ Select this to enable the ST-Ericsson AB5500 Mixed Signal IC RTC
+ support. This chip contains a battery- and capacitor-backed RTC.
+
config RTC_DRV_AB3100
tristate "ST-Ericsson AB3100 RTC"
depends on AB3100_CORE
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 6e6982335c1..a69992dd1cb 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -16,6 +16,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
# Keep the list ordered.
obj-$(CONFIG_RTC_DRV_88PM860X) += rtc-88pm860x.o
+obj-$(CONFIG_RTC_DRV_AB) += rtc-ab.o
obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
diff --git a/drivers/rtc/rtc-ab.c b/drivers/rtc/rtc-ab.c
new file mode 100644
index 00000000000..db1992632fa
--- /dev/null
+++ b/drivers/rtc/rtc-ab.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+
+#define AB5500_RTC_CLOCK_RATE 32768
+#define AB5500_RTC 0x00
+#define AB5500_RTC_ALARM (1 << 1)
+#define AB5500_READREQ 0x01
+#define AB5500_READREQ_REQ 0x01
+#define AB5500_AL0 0x02
+#define AB5500_TI0 0x06
+
+/**
+ * struct ab_rtc - variant specific data
+ * @irqname: optional name for the alarm interrupt resource
+ * @epoch: epoch to adjust year to
+ * @bank: AB bank where this block is present
+ * @rtc: address of the "RTC" (control) register
+ * @rtc_alarmon: mask of the alarm enable bit in the above register
+ * @ti0: address of the TI0 register. The rest of the TI
+ * registers are assumed to contiguously follow this one.
+ * @nr_ti: number of TI* registers
+ * @al0: address of the AL0 register. The rest of the
+ * AL registers are assumed to contiguously follow this one.
+ * @nr_al: number of AL* registers
+ * @startup: optional function to initialize the RTC
+ * @alarm_to_regs: function to convert alarm time in seconds
+ * to a list of AL register values
+ * @time_to_regs: function to convert alarm time in seconds
+ * to a list of TI register values
+ * @regs_to_alarm: function to convert a list of AL register
+ * values to the alarm time in seconds
+ * @regs_to_time: function to convert a list of TI register
+ * values to the alarm time in seconds
+ * @request_read: optional function to request a read from the TI* registers
+ * @request_write: optional function to request a write to the TI* registers
+ */
+struct ab_rtc {
+ const char *irqname;
+ unsigned int epoch;
+
+ u8 bank;
+ u8 rtc;
+ u8 rtc_alarmon;
+ u8 ti0;
+ int nr_ti;
+ u8 al0;
+ int nr_al;
+
+ int (*startup)(struct device *dev);
+ void (*alarm_to_regs)(struct device *dev, unsigned long secs, u8 *regs);
+ void (*time_to_regs)(struct device *dev, unsigned long secs, u8 *regs);
+ unsigned long (*regs_to_alarm)(struct device *dev, u8 *regs);
+ unsigned long (*regs_to_time)(struct device *dev, u8 *regs);
+ int (*request_read)(struct device *dev);
+ int (*request_write)(struct device *dev);
+};
+
+static const struct ab_rtc *to_ab_rtc(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ return (struct ab_rtc *)pdev->id_entry->driver_data;
+}
+
+/* Calculate the number of seconds since year, for epoch adjustment */
+static unsigned long ab_rtc_get_elapsed_seconds(unsigned int year)
+{
+ unsigned long secs;
+ struct rtc_time tm = {
+ .tm_year = year - 1900,
+ .tm_mday = 1,
+ };
+
+ rtc_tm_to_time(&tm, &secs);
+
+ return secs;
+}
+
+static int ab5500_rtc_request_read(struct device *dev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned long timeout;
+ int err;
+
+ err = abx500_set_register_interruptible(dev, variant->bank,
+ AB5500_READREQ,
+ AB5500_READREQ_REQ);
+ if (err < 0)
+ return err;
+
+ timeout = jiffies + HZ;
+ while (time_before(jiffies, timeout)) {
+ u8 value;
+
+ err = abx500_get_register_interruptible(dev, variant->bank,
+ AB5500_READREQ, &value);
+ if (err < 0)
+ return err;
+
+ if (!(value & AB5500_READREQ_REQ))
+ return 0;
+
+ msleep(1);
+ }
+
+ return -EIO;
+}
+
+static void
+ab5500_rtc_time_to_regs(struct device *dev, unsigned long secs, u8 *regs)
+{
+ unsigned long mins = secs / 60;
+ u64 fat_time;
+
+ secs %= 60;
+
+ fat_time = secs * AB5500_RTC_CLOCK_RATE;
+ fat_time |= (u64)mins << 21;
+
+ regs[0] = (fat_time) & 0xFF;
+ regs[1] = (fat_time >> 8) & 0xFF;
+ regs[2] = (fat_time >> 16) & 0xFF;
+ regs[3] = (fat_time >> 24) & 0xFF;
+ regs[4] = (fat_time >> 32) & 0xFF;
+ regs[5] = (fat_time >> 40) & 0xFF;
+}
+
+static unsigned long
+ab5500_rtc_regs_to_time(struct device *dev, u8 *regs)
+{
+ u64 fat_time = ((u64)regs[5] << 40) | ((u64)regs[4] << 32) |
+ ((u64)regs[3] << 24) | ((u64)regs[2] << 16) |
+ ((u64)regs[1] << 8) | regs[0];
+ unsigned long secs = (fat_time & 0x1fffff) / AB5500_RTC_CLOCK_RATE;
+ unsigned long mins = fat_time >> 21;
+
+ return mins * 60 + secs;
+}
+
+static void
+ab5500_rtc_alarm_to_regs(struct device *dev, unsigned long secs, u8 *regs)
+{
+ unsigned long mins = secs / 60;
+
+#ifdef CONFIG_ANDROID
+ /*
+ * Needed because Android believes all hw have a wake-up resolution in
+ * seconds.
+ */
+ mins++;
+#endif
+
+ regs[0] = mins & 0xFF;
+ regs[1] = (mins >> 8) & 0xFF;
+ regs[2] = (mins >> 16) & 0xFF;
+}
+
+static unsigned long
+ab5500_rtc_regs_to_alarm(struct device *dev, u8 *regs)
+{
+ unsigned long mins = ((unsigned long)regs[2] << 16) |
+ ((unsigned long)regs[1] << 8) |
+ regs[0];
+ unsigned long secs = mins * 60;
+
+ return secs;
+}
+
+static const struct ab_rtc ab5500_rtc = {
+ .irqname = "RTC_Alarm",
+ .bank = AB5500_BANK_RTC,
+ .rtc = AB5500_RTC,
+ .rtc_alarmon = AB5500_RTC_ALARM,
+ .ti0 = AB5500_TI0,
+ .nr_ti = 6,
+ .al0 = AB5500_AL0,
+ .nr_al = 3,
+ .epoch = 2000,
+ .time_to_regs = ab5500_rtc_time_to_regs,
+ .regs_to_time = ab5500_rtc_regs_to_time,
+ .alarm_to_regs = ab5500_rtc_alarm_to_regs,
+ .regs_to_alarm = ab5500_rtc_regs_to_alarm,
+ .request_read = ab5500_rtc_request_read,
+};
+
+static int ab_rtc_request_read(struct device *dev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ if (!variant->request_read)
+ return 0;
+
+ return variant->request_read(dev);
+}
+
+static int ab_rtc_request_write(struct device *dev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ if (!variant->request_write)
+ return 0;
+
+ return variant->request_write(dev);
+}
+
+static bool ab_rtc_valid_time(struct device *dev, struct rtc_time *time)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ if (!variant->epoch)
+ return true;
+
+ return time->tm_year >= variant->epoch - 1900;
+}
+
+static int
+ab_rtc_tm_to_time(struct device *dev, struct rtc_time *tm, unsigned long *secs)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ rtc_tm_to_time(tm, secs);
+
+ if (variant->epoch)
+ *secs -= ab_rtc_get_elapsed_seconds(variant->epoch);
+
+ return 0;
+}
+
+static int
+ab_rtc_time_to_tm(struct device *dev, unsigned long secs, struct rtc_time *tm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ if (variant->epoch)
+ secs += ab_rtc_get_elapsed_seconds(variant->epoch);
+
+ rtc_time_to_tm(secs, tm);
+
+ return 0;
+}
+
+static int ab_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned char buf[variant->nr_ti];
+ unsigned long secs;
+ int err;
+
+ err = ab_rtc_request_read(dev);
+ if (err)
+ return err;
+
+ err = abx500_get_register_page_interruptible(dev, variant->bank,
+ variant->ti0,
+ buf, variant->nr_ti);
+ if (err)
+ return err;
+
+ secs = variant->regs_to_time(dev, buf);
+ ab_rtc_time_to_tm(dev, secs, tm);
+
+ return rtc_valid_tm(tm);
+}
+
+static int ab_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned char buf[variant->nr_ti];
+ unsigned long secs;
+ u8 reg = variant->ti0;
+ int err;
+ int i;
+
+ if (!ab_rtc_valid_time(dev, tm))
+ return -EINVAL;
+
+ ab_rtc_tm_to_time(dev, tm, &secs);
+ variant->time_to_regs(dev, secs, buf);
+
+ for (i = 0; i < variant->nr_ti; i++, reg++) {
+ err = abx500_set_register_interruptible(dev, variant->bank,
+ reg, buf[i]);
+ if (err)
+ return err;
+ }
+
+ return ab_rtc_request_write(dev);
+}
+
+static int ab_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned long secs;
+ u8 buf[variant->nr_al];
+ u8 rtcval;
+ int err;
+
+ err = abx500_get_register_interruptible(dev, variant->bank,
+ variant->rtc, &rtcval);
+ if (err)
+ return err;
+
+ alarm->enabled = !!(rtcval & variant->rtc_alarmon);
+ alarm->pending = 0;
+
+ err = abx500_get_register_page_interruptible(dev, variant->bank,
+ variant->al0, buf,
+ variant->nr_al);
+ if (err)
+ return err;
+
+ secs = variant->regs_to_alarm(dev, buf);
+ ab_rtc_time_to_tm(dev, secs, &alarm->time);
+
+ return rtc_valid_tm(&alarm->time);
+}
+
+static int ab_rtc_alarm_enable(struct device *dev, unsigned int enabled)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ u8 mask = variant->rtc_alarmon;
+ u8 value = enabled ? mask : 0;
+
+ return abx500_mask_and_set_register_interruptible(dev, variant->bank,
+ variant->rtc, mask,
+ value);
+}
+
+static int ab_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned char buf[variant->nr_al];
+ unsigned long secs;
+ u8 reg = variant->al0;
+ int err;
+ int i;
+
+ if (!ab_rtc_valid_time(dev, &alarm->time))
+ return -EINVAL;
+
+ ab_rtc_tm_to_time(dev, &alarm->time, &secs);
+ variant->alarm_to_regs(dev, secs, buf);
+
+ /*
+ * Disable alarm first. Otherwise the RTC may not detect an alarm
+ * reprogrammed for the same time without disabling the alarm in
+ * between the programmings.
+ */
+ err = ab_rtc_alarm_enable(dev, false);
+ if (err)
+ return err;
+
+ for (i = 0; i < variant->nr_al; i++, reg++) {
+ err = abx500_set_register_interruptible(dev, variant->bank,
+ reg, buf[i]);
+ if (err)
+ return err;
+ }
+
+ return ab_rtc_alarm_enable(dev, true);
+}
+
+static const struct rtc_class_ops ab_rtc_ops = {
+ .read_time = ab_rtc_read_time,
+ .set_time = ab_rtc_set_time,
+ .read_alarm = ab_rtc_read_alarm,
+ .set_alarm = ab_rtc_set_alarm,
+ .alarm_irq_enable = ab_rtc_alarm_enable,
+};
+
+static irqreturn_t ab_rtc_irq(int irq, void *dev_id)
+{
+ unsigned long events = RTC_IRQF | RTC_AF;
+ struct rtc_device *rtc = dev_id;
+
+ rtc_update_irq(rtc, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit ab_rtc_probe(struct platform_device *pdev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(&pdev->dev);
+ int err;
+ struct rtc_device *rtc;
+ int irq = -ENXIO;
+
+ if (variant->irqname) {
+ irq = platform_get_irq_byname(pdev, variant->irqname);
+ if (irq < 0)
+ return irq;
+ }
+
+ if (variant->startup) {
+ err = variant->startup(&pdev->dev);
+ if (err)
+ return err;
+ }
+
+ rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ dev_err(&pdev->dev, "Registration failed\n");
+ err = PTR_ERR(rtc);
+ return err;
+ }
+
+ if (irq >= 0) {
+ err = request_any_context_irq(irq, ab_rtc_irq,
+ IRQF_NO_SUSPEND,
+ pdev->id_entry->name,
+ rtc);
+ if (err < 0) {
+ dev_err(&pdev->dev, "could not get irq: %d\n", err);
+ goto out_unregister;
+ }
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ return 0;
+
+out_unregister:
+ rtc_device_unregister(rtc);
+ return err;
+}
+
+static int __devexit ab_rtc_remove(struct platform_device *pdev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(&pdev->dev);
+ struct rtc_device *rtc = platform_get_drvdata(pdev);
+ int irq = platform_get_irq_byname(pdev, variant->irqname);
+
+ if (irq >= 0)
+ free_irq(irq, rtc);
+ rtc_device_unregister(rtc);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_device_id ab_rtc_id_table[] = {
+ { "ab5500-rtc", (kernel_ulong_t)&ab5500_rtc, },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, ab_rtc_id_table);
+
+static struct platform_driver ab_rtc_driver = {
+ .driver.name = "ab-rtc",
+ .driver.owner = THIS_MODULE,
+ .id_table = ab_rtc_id_table,
+ .probe = ab_rtc_probe,
+ .remove = __devexit_p(ab_rtc_remove),
+};
+
+static int __init ab_rtc_init(void)
+{
+ return platform_driver_register(&ab_rtc_driver);
+}
+module_init(ab_rtc_init);
+
+static void __exit ab_rtc_exit(void)
+{
+ platform_driver_unregister(&ab_rtc_driver);
+}
+module_exit(ab_rtc_exit);
+
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
+MODULE_DESCRIPTION("AB5500 RTC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index e346705aae9..3160d9b5613 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -90,7 +90,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
/* Early AB8500 chips will not clear the rtc read request bit */
if (abx500_get_chip_id(dev) == 0) {
- msleep(1);
+ mdelay(1);
} else {
/* Wait for some cycles after enabling the rtc read in ab8500 */
while (time_before(jiffies, timeout)) {
@@ -102,7 +102,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (!(value & RTC_READ_REQUEST))
break;
- msleep(1);
+ mdelay(1);
}
}
@@ -241,8 +241,19 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
*/
secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
+#ifndef CONFIG_ANDROID
+ secs += 30; /* Round to nearest minute */
+#endif
+
mins = secs / 60;
+#ifdef CONFIG_ANDROID
+ /*
+ * Needed due to Android believes all hw have a wake-up resolution
+ * in seconds.
+ */
+ mins++;
+#endif
buf[2] = mins & 0xFF;
buf[1] = (mins >> 8) & 0xFF;
buf[0] = (mins >> 16) & 0xFF;
@@ -258,6 +269,106 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
return ab8500_rtc_irq_enable(dev, alarm->enabled);
}
+
+static int ab8500_rtc_set_calibration(struct device *dev, int calibration)
+{
+ int retval;
+ u8 rtccal = 0;
+
+ /*
+ * Check that the calibration value (which is in units of 0.5 parts-per-million)
+ * is in the AB8500's range for RtcCalibration register.
+ */
+ if ((calibration < -127) || (calibration > 127)) {
+ dev_err(dev, "RtcCalibration value outside permitted range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The AB8500 uses sign (in bit7) and magnitude (in bits0-7)
+ * so need to convert to this sort of representation before writing
+ * into RtcCalibration register...
+ */
+ if (calibration >= 0)
+ rtccal = 0x7F & calibration;
+ else
+ rtccal = ~(calibration -1) | 0x80;
+
+ retval = abx500_set_register_interruptible(dev, AB8500_RTC,
+ AB8500_RTC_CALIB_REG, rtccal);
+
+ return retval;
+}
+
+static int ab8500_rtc_get_calibration(struct device *dev, int *calibration)
+{
+ int retval;
+ u8 rtccal = 0;
+
+ retval = abx500_get_register_interruptible(dev, AB8500_RTC,
+ AB8500_RTC_CALIB_REG, &rtccal);
+ if (retval >= 0) {
+ /*
+ * The AB8500 uses sign (in bit7) and magnitude (in bits0-7)
+ * so need to convert value from RtcCalibration register into
+ * a two's complement signed value...
+ */
+ if (rtccal & 0x80)
+ *calibration = 0 - (rtccal & 0x7F);
+ else
+ *calibration = 0x7F & rtccal;
+ }
+
+ return retval;
+}
+
+static ssize_t ab8500_sysfs_store_rtc_calibration(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int retval;
+ int calibration = 0;
+
+ if (sscanf(buf, " %i ", &calibration) != 1) {
+ dev_err(dev, "Failed to store RTC calibration attribute\n");
+ return -EINVAL;
+ }
+
+ retval = ab8500_rtc_set_calibration(dev, calibration);
+
+ return retval ? retval : count;
+}
+
+static ssize_t ab8500_sysfs_show_rtc_calibration(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval = 0;
+ int calibration = 0;
+
+ retval = ab8500_rtc_get_calibration(dev, &calibration);
+ if (retval < 0) {
+ dev_err(dev, "Failed to read RTC calibration attribute\n");
+ sprintf(buf, "0\n");
+ return retval;
+ }
+
+ return sprintf(buf, "%d\n", calibration);
+}
+
+static DEVICE_ATTR(rtc_calibration, S_IRUGO | S_IWUSR,
+ ab8500_sysfs_show_rtc_calibration,
+ ab8500_sysfs_store_rtc_calibration);
+
+static int ab8500_sysfs_rtc_register(struct device *dev)
+{
+ return device_create_file(dev, &dev_attr_rtc_calibration);
+}
+
+static void ab8500_sysfs_rtc_unregister(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_rtc_calibration);
+}
+
static irqreturn_t rtc_alarm_handler(int irq, void *data)
{
struct rtc_device *rtc = data;
@@ -295,7 +406,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return err;
/* Wait for reset by the PorRtc */
- msleep(1);
+ mdelay(1);
err = abx500_get_register_interruptible(&pdev->dev, AB8500_RTC,
AB8500_RTC_STAT_REG, &rtc_ctrl);
@@ -308,6 +419,8 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
+ device_init_wakeup(&pdev->dev, true);
+
rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc)) {
@@ -316,8 +429,8 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return err;
}
- err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0,
- "ab8500-rtc", rtc);
+ err = request_threaded_irq(irq, NULL, rtc_alarm_handler,
+ IRQF_NO_SUSPEND, "ab8500-rtc", rtc);
if (err < 0) {
rtc_device_unregister(rtc);
return err;
@@ -325,6 +438,13 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
+
+ err = ab8500_sysfs_rtc_register(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "sysfs RTC failed to register\n");
+ return err;
+ }
+
return 0;
}
@@ -333,6 +453,8 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
struct rtc_device *rtc = platform_get_drvdata(pdev);
int irq = platform_get_irq_byname(pdev, "ALARM");
+ ab8500_sysfs_rtc_unregister(&pdev->dev);
+
free_irq(irq, rtc);
rtc_device_unregister(rtc);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 5559b229919..b2417604707 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -512,6 +512,11 @@ static void giveback(struct pl022 *pl022)
msg->state = NULL;
if (msg->complete)
msg->complete(msg->context);
+
+ /* disable the SPI/SSP operation */
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
/* This message is completed, so let's turn off the clocks & power */
pm_runtime_put(&pl022->adev->dev);
}
@@ -918,6 +923,12 @@ static int configure_dma(struct pl022 *pl022)
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
+ /* DMA burstsize should be same as the FIFO trigger level */
+ rx_conf.src_maxburst = pl022->rx_lev_trig ? 1 <<
+ (pl022->rx_lev_trig + 1) : pl022->rx_lev_trig;
+ tx_conf.dst_maxburst = pl022->tx_lev_trig ? 1 <<
+ (pl022->tx_lev_trig + 1) : pl022->tx_lev_trig;
+
/* Check that the channels are available */
if (!rxchan || !txchan)
return -ENODEV;
@@ -1881,7 +1892,7 @@ static int pl022_setup(struct spi_device *spi)
{
struct pl022_config_chip const *chip_info;
struct chip_data *chip;
- struct ssp_clock_params clk_freq = {0, };
+ struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
int status = 0;
struct pl022 *pl022 = spi_master_get_devdata(spi->master);
unsigned int bits = spi->bits_per_word;
@@ -2171,6 +2182,9 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
adev->res.start, pl022->virtbase);
+ pm_runtime_enable(dev);
+ pm_runtime_resume(dev);
+
pl022->clk = clk_get(&adev->dev, NULL);
if (IS_ERR(pl022->clk)) {
status = PTR_ERR(pl022->clk);
@@ -2284,6 +2298,7 @@ pl022_remove(struct amba_device *adev)
clk_disable(pl022->clk);
clk_unprepare(pl022->clk);
clk_put(pl022->clk);
+ pm_runtime_disable(&adev->dev);
iounmap(pl022->virtbase);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
@@ -2305,11 +2320,6 @@ static int pl022_suspend(struct device *dev)
return status;
}
- amba_vcore_enable(pl022->adev);
- amba_pclk_enable(pl022->adev);
- load_ssp_default_config(pl022);
- amba_pclk_disable(pl022->adev);
- amba_vcore_disable(pl022->adev);
dev_dbg(dev, "suspended\n");
return 0;
}
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index 11728a03f8a..e8afb75590a 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -5,7 +5,7 @@
*
* Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
- * Copyright 2010 (c) ST-Ericsson AB
+ * Copyright 2010 (c) ST-Ericsson SA
*/
/*
* This file is licensed under the GPL2 license.
@@ -27,6 +27,7 @@
#include <linux/input.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
@@ -36,8 +37,10 @@
/* TODO: for multiple device support will need a per-device mutex */
#define DRIVER_NAME "synaptics_rmi4_i2c"
+#define DELTA 8
#define MAX_ERROR_REPORT 6
-#define MAX_TOUCH_MAJOR 15
+#define TIMEOUT_PERIOD 1
+#define MAX_WIDTH_MAJOR 255
#define MAX_RETRY_COUNT 5
#define STD_QUERY_LEN 21
#define PAGE_LEN 2
@@ -45,6 +48,7 @@
#define BUF_LEN 37
#define QUERY_LEN 9
#define DATA_LEN 12
+#define RESUME_DELAY 100 /* msecs */
#define HAS_TAP 0x01
#define HAS_PALMDETECT 0x01
#define HAS_ROTATE 0x02
@@ -164,6 +168,8 @@ struct synaptics_rmi4_device_info {
* @regulator: pointer to the regulator structure
* @wait: wait queue structure variable
* @touch_stopped: flag to stop the thread function
+ * @enable: flag to enable/disable the driver event.
+ * @resume_wq_handler: work queue for resume the device
*
* This structure gives the device data information.
*/
@@ -184,6 +190,8 @@ struct synaptics_rmi4_data {
struct regulator *regulator;
wait_queue_head_t wait;
bool touch_stopped;
+ bool enable;
+ struct work_struct resume_wq_handler;
};
/**
@@ -291,6 +299,133 @@ exit:
}
/**
+ * synaptics_rmi4_enable() - enable the touchpad driver event
+ * @pdata: pointer to synaptics_rmi4_data structure
+ *
+ * This function is to enable the touchpad driver event and returns integer.
+ */
+static int synaptics_rmi4_enable(struct synaptics_rmi4_data *pdata)
+{
+ int retval;
+ unsigned char intr_status;
+
+ if (pdata->board->regulator_en)
+ regulator_enable(pdata->regulator);
+ enable_irq(pdata->board->irq_number);
+ pdata->touch_stopped = false;
+
+ msleep(RESUME_DELAY);
+ retval = synaptics_rmi4_i2c_block_read(pdata,
+ pdata->fn01_data_base_addr + 1,
+ &intr_status,
+ pdata->number_of_interrupt_register);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_i2c_byte_write(pdata,
+ pdata->fn01_ctrl_base_addr + 1,
+ (intr_status | TOUCHPAD_CTRL_INTR));
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+/**
+ * synaptics_rmi4_disable() - disable the touchpad driver event
+ * @pdata: pointer to synaptics_rmi4_data structure
+ *
+ * This function is to disable the driver event and returns integer.
+ */
+
+static int synaptics_rmi4_disable(struct synaptics_rmi4_data *pdata)
+{
+ int retval;
+ unsigned char intr_status;
+
+ pdata->touch_stopped = true;
+ disable_irq(pdata->board->irq_number);
+
+ retval = synaptics_rmi4_i2c_block_read(pdata,
+ pdata->fn01_data_base_addr + 1,
+ &intr_status,
+ pdata->number_of_interrupt_register);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_i2c_byte_write(pdata,
+ pdata->fn01_ctrl_base_addr + 1,
+ (intr_status & ~TOUCHPAD_CTRL_INTR));
+ if (retval < 0)
+ return retval;
+ if (pdata->board->regulator_en)
+ regulator_disable(pdata->regulator);
+
+ return 0;
+}
+
+/**
+ * synaptics_rmi4_show_attr_enable() - show the touchpad enable value
+ * @dev: pointer to device data structure
+ * @attr: pointer to attribute structure
+ * @buf: pointer to character buffer
+ *
+ * This function is to show the touchpad enable value and returns ssize_t.
+ */
+static ssize_t synaptics_rmi4_show_attr_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *pdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", pdata->enable);
+}
+
+/**
+ * synaptics_rmi4_store_attr_enable() - store the touchpad enable value
+ * @dev: pointer to device data structure
+ * @attr: pointer to attribute structure
+ * @buf: pointer to character buffer
+ * @count: number fo arguments
+ *
+ * This function is to store the touchpad enable value and returns ssize_t.
+ */
+static ssize_t synaptics_rmi4_store_attr_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct synaptics_rmi4_data *pdata = dev_get_drvdata(dev);
+ unsigned long val;
+ int retval = 0;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (pdata->enable != val) {
+ pdata->enable = val ? true : false;
+ if (pdata->enable)
+ retval = synaptics_rmi4_enable(pdata);
+ else
+ retval = synaptics_rmi4_disable(pdata);
+
+ }
+ return ((retval < 0) ? retval : count);
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
+ synaptics_rmi4_show_attr_enable, synaptics_rmi4_store_attr_enable);
+
+static struct attribute *synaptics_rmi4_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static struct attribute_group synaptics_rmi4_attr_group = {
+ .attrs = synaptics_rmi4_attrs,
+};
+
+/**
* synpatics_rmi4_touchpad_report() - reports for the rmi4 touchpad device
* @pdata: pointer to synaptics_rmi4_data structure
* @rfi: pointer to synaptics_rmi4_fn structure
@@ -316,8 +451,9 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
unsigned char data[DATA_LEN];
int x[RMI4_NUMBER_OF_MAX_FINGERS];
int y[RMI4_NUMBER_OF_MAX_FINGERS];
- int wx[RMI4_NUMBER_OF_MAX_FINGERS];
- int wy[RMI4_NUMBER_OF_MAX_FINGERS];
+ int w[RMI4_NUMBER_OF_MAX_FINGERS];
+ static int prv_x[RMI4_NUMBER_OF_MAX_FINGERS];
+ static int prv_y[RMI4_NUMBER_OF_MAX_FINGERS];
struct i2c_client *client = pdata->i2c_client;
/* get 2D sensor finger data */
@@ -376,11 +512,7 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
y[touch_count] =
(data[1] << 4) |
((data[2] >> 4) & MASK_4BIT);
- wy[touch_count] =
- (data[3] >> 4) & MASK_4BIT;
- wx[touch_count] =
- (data[3] & MASK_4BIT);
-
+ w[touch_count] = data[3];
if (pdata->board->x_flip)
x[touch_count] =
pdata->sensor_max_x -
@@ -389,6 +521,25 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
y[touch_count] =
pdata->sensor_max_y -
y[touch_count];
+ if (x[touch_count] < 0)
+ x[touch_count] = 0;
+ else if (x[touch_count] >= pdata->sensor_max_x)
+ x[touch_count] =
+ pdata->sensor_max_x - 1;
+
+ if (y[touch_count] < 0)
+ y[touch_count] = 0;
+ else if (y[touch_count] >= pdata->sensor_max_y)
+ y[touch_count] =
+ pdata->sensor_max_y - 1;
+ }
+ if ((abs(x[finger] - prv_x[finger]) < DELTA) &&
+ (abs(y[finger] - prv_y[finger]) < DELTA)) {
+ x[finger] = prv_x[finger];
+ y[finger] = prv_y[finger];
+ } else {
+ prv_x[finger] = x[finger];
+ prv_y[finger] = y[finger];
}
/* number of active touch points */
touch_count++;
@@ -399,7 +550,9 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
if (touch_count) {
for (finger = 0; finger < touch_count; finger++) {
input_report_abs(pdata->input_dev, ABS_MT_TOUCH_MAJOR,
- max(wx[finger] , wy[finger]));
+ max(x[finger] , y[finger]));
+ input_report_abs(pdata->input_dev, ABS_MT_WIDTH_MAJOR,
+ w[finger]);
input_report_abs(pdata->input_dev, ABS_MT_POSITION_X,
x[finger]);
input_report_abs(pdata->input_dev, ABS_MT_POSITION_Y,
@@ -502,7 +655,7 @@ static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
touch_count = synaptics_rmi4_sensor_report(pdata);
if (touch_count)
wait_event_timeout(pdata->wait, pdata->touch_stopped,
- msecs_to_jiffies(1));
+ msecs_to_jiffies(TIMEOUT_PERIOD));
else
break;
} while (!pdata->touch_stopped);
@@ -881,9 +1034,27 @@ static int synaptics_rmi4_i2c_query_device(struct synaptics_rmi4_data *pdata)
}
/**
+ * synaptics_rmi4_resume_handler() - work queue for resume handler
+ * @work:work_struct structure pointer
+ *
+ * This work queue handler used to resume the device and returns none
+ */
+static void synaptics_rmi4_resume_handler(struct work_struct *work)
+{
+ struct synaptics_rmi4_data *prmi4_data = container_of(work,
+ struct synaptics_rmi4_data, resume_wq_handler);
+ struct i2c_client *client = prmi4_data->i2c_client;
+ int retval;
+
+ retval = synaptics_rmi4_enable(prmi4_data);
+ if (retval < 0)
+ dev_err(&client->dev, "%s: resume failed\n", __func__);
+}
+
+/**
* synaptics_rmi4_probe() - Initialze the i2c-client touchscreen driver
- * @i2c: i2c client structure pointer
- * @id:i2c device id pointer
+ * @client: i2c client structure pointer
+ * @dev_id:i2c device id pointer
*
* This function will allocate and initialize the instance
* data and request the irq and set the instance data as the clients
@@ -927,19 +1098,17 @@ static int __devinit synaptics_rmi4_probe
goto err_input;
}
- rmi4_data->regulator = regulator_get(&client->dev, "vdd");
- if (IS_ERR(rmi4_data->regulator)) {
- dev_err(&client->dev, "%s:get regulator failed\n",
- __func__);
- retval = PTR_ERR(rmi4_data->regulator);
- goto err_get_regulator;
- }
- retval = regulator_enable(rmi4_data->regulator);
- if (retval < 0) {
- dev_err(&client->dev, "%s:regulator enable failed\n",
- __func__);
- goto err_regulator_enable;
+ if (platformdata->regulator_en) {
+ rmi4_data->regulator = regulator_get(&client->dev, "vdd");
+ if (IS_ERR(rmi4_data->regulator)) {
+ dev_err(&client->dev, "%s:get regulator failed\n",
+ __func__);
+ retval = PTR_ERR(rmi4_data->regulator);
+ goto err_regulator;
+ }
+ regulator_enable(rmi4_data->regulator);
}
+
init_waitqueue_head(&rmi4_data->wait);
/*
* Copy i2c_client pointer into RTID's i2c_client pointer for
@@ -987,7 +1156,16 @@ static int __devinit synaptics_rmi4_probe
input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_Y, 0,
rmi4_data->sensor_max_y, 0, 0);
input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0,
- MAX_TOUCH_MAJOR, 0, 0);
+ max(rmi4_data->sensor_max_y, rmi4_data->sensor_max_y),
+ 0, 0);
+ input_set_abs_params(rmi4_data->input_dev, ABS_MT_WIDTH_MAJOR, 0,
+ MAX_WIDTH_MAJOR, 0, 0);
+
+ retval = input_register_device(rmi4_data->input_dev);
+ if (retval) {
+ dev_err(&client->dev, "%s:input register failed\n", __func__);
+ goto err_input_register;
+ }
/* Clear interrupts */
synaptics_rmi4_i2c_block_read(rmi4_data,
@@ -1000,24 +1178,34 @@ static int __devinit synaptics_rmi4_probe
if (retval) {
dev_err(&client->dev, "%s:Unable to get attn irq %d\n",
__func__, platformdata->irq_number);
- goto err_query_dev;
+ goto err_request_irq;
}
- retval = input_register_device(rmi4_data->input_dev);
+ INIT_WORK(&rmi4_data->resume_wq_handler, synaptics_rmi4_resume_handler);
+
+ /* sysfs implementation for dynamic enable/disable the input event */
+ retval = sysfs_create_group(&client->dev.kobj,
+ &synaptics_rmi4_attr_group);
if (retval) {
- dev_err(&client->dev, "%s:input register failed\n", __func__);
- goto err_free_irq;
+ dev_err(&client->dev, "failed to create sysfs entries\n");
+ goto err_sysfs;
}
-
+ rmi4_data->enable = true;
return retval;
-err_free_irq:
+err_sysfs:
+ cancel_work_sync(&rmi4_data->resume_wq_handler);
+err_request_irq:
free_irq(platformdata->irq_number, rmi4_data);
+ input_unregister_device(rmi4_data->input_dev);
+err_input_register:
+ i2c_set_clientdata(client, NULL);
err_query_dev:
- regulator_disable(rmi4_data->regulator);
-err_regulator_enable:
- regulator_put(rmi4_data->regulator);
-err_get_regulator:
+ if (platformdata->regulator_en) {
+ regulator_disable(rmi4_data->regulator);
+ regulator_put(rmi4_data->regulator);
+ }
+err_regulator:
input_free_device(rmi4_data->input_dev);
rmi4_data->input_dev = NULL;
err_input:
@@ -1037,12 +1225,16 @@ static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
+ sysfs_remove_group(&client->dev.kobj, &synaptics_rmi4_attr_group);
rmi4_data->touch_stopped = true;
wake_up(&rmi4_data->wait);
+ cancel_work_sync(&rmi4_data->resume_wq_handler);
free_irq(pdata->irq_number, rmi4_data);
input_unregister_device(rmi4_data->input_dev);
- regulator_disable(rmi4_data->regulator);
- regulator_put(rmi4_data->regulator);
+ if (pdata->regulator_en) {
+ regulator_disable(rmi4_data->regulator);
+ regulator_put(rmi4_data->regulator);
+ }
kfree(rmi4_data);
return 0;
@@ -1059,31 +1251,11 @@ static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
static int synaptics_rmi4_suspend(struct device *dev)
{
/* Touch sleep mode */
- int retval;
- unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
- const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
- rmi4_data->touch_stopped = true;
- disable_irq(pdata->irq_number);
-
- retval = synaptics_rmi4_i2c_block_read(rmi4_data,
- rmi4_data->fn01_data_base_addr + 1,
- &intr_status,
- rmi4_data->number_of_interrupt_register);
- if (retval < 0)
- return retval;
-
- retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
- rmi4_data->fn01_ctrl_base_addr + 1,
- (intr_status & ~TOUCHPAD_CTRL_INTR));
- if (retval < 0)
- return retval;
-
- regulator_disable(rmi4_data->regulator);
-
- return 0;
+ return synaptics_rmi4_disable(rmi4_data);
}
+
/**
* synaptics_rmi4_resume() - resume the touch screen controller
* @dev: pointer to device structure
@@ -1093,28 +1265,9 @@ static int synaptics_rmi4_suspend(struct device *dev)
*/
static int synaptics_rmi4_resume(struct device *dev)
{
- int retval;
- unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
- const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
-
- regulator_enable(rmi4_data->regulator);
- enable_irq(pdata->irq_number);
- rmi4_data->touch_stopped = false;
-
- retval = synaptics_rmi4_i2c_block_read(rmi4_data,
- rmi4_data->fn01_data_base_addr + 1,
- &intr_status,
- rmi4_data->number_of_interrupt_register);
- if (retval < 0)
- return retval;
-
- retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
- rmi4_data->fn01_ctrl_base_addr + 1,
- (intr_status | TOUCHPAD_CTRL_INTR));
- if (retval < 0)
- return retval;
+ schedule_work(&rmi4_data->resume_wq_handler);
return 0;
}
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
index 384436ef806..973abc97374 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
@@ -42,6 +42,7 @@ struct synaptics_rmi4_platform_data {
int irq_type;
bool x_flip;
bool y_flip;
+ bool regulator_en;
};
#endif
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
new file mode 100644
index 00000000000..a452e888d77
--- /dev/null
+++ b/drivers/tee/Kconfig
@@ -0,0 +1,13 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Martin Hovang (martin.xm.hovang@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+# Trursted Execution Environment Configuration
+config TEE_SUPPORT
+ bool "Trusted Execution Environment Support"
+ default y
+ ---help---
+ This implements the Trusted Execution Environment (TEE) Client
+ API Specification from GlobalPlatform Device Technology.
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
new file mode 100644
index 00000000000..b937eb19d72
--- /dev/null
+++ b/drivers/tee/Makefile
@@ -0,0 +1,8 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Martin Hovang (martin.xm.hovang@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+obj-$(CONFIG_TEE_SUPPORT) += tee_service.o
+obj-$(CONFIG_TEE_SUPPORT) += tee_driver.o
diff --git a/drivers/tee/tee_driver.c b/drivers/tee/tee_driver.c
new file mode 100644
index 00000000000..5ae0cc9508b
--- /dev/null
+++ b/drivers/tee/tee_driver.c
@@ -0,0 +1,643 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Martin Hovang <martin.xm.hovang@stericsson.com>
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/tee.h>
+#include <linux/slab.h>
+
+#define TEED_NAME "tee"
+
+#define TEED_STATE_OPEN_DEV 0
+#define TEED_STATE_OPEN_SESSION 1
+
+static struct mutex sync;
+
+static int tee_open(struct inode *inode, struct file *file);
+static int tee_release(struct inode *inode, struct file *file);
+static int tee_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *offset);
+static int tee_write(struct file *filp, const char __user *buffer,
+ size_t length, loff_t *offset);
+
+static inline void set_emsg(struct tee_session *ts, u32 msg)
+{
+ ts->err = msg;
+ ts->origin = TEED_ORIGIN_DRIVER;
+}
+
+static void reset_session(struct tee_session *ts)
+{
+ ts->state = TEED_STATE_OPEN_DEV;
+ ts->err = TEED_SUCCESS;
+ ts->origin = TEED_ORIGIN_DRIVER;
+ ts->id = 0;
+ ts->ta = NULL;
+ ts->uuid = NULL;
+ ts->cmd = 0;
+ ts->driver_cmd = TEED_OPEN_SESSION;
+ ts->ta_size = 0;
+ ts->op = NULL;
+}
+
+static int copy_ta(struct tee_session *ts,
+ struct tee_session *ku_buffer)
+{
+ ts->ta = kmalloc(ku_buffer->ta_size, GFP_KERNEL);
+ if (ts->ta == NULL) {
+ pr_err("[%s] error, out of memory (ta)\n",
+ __func__);
+ set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY);
+ return -ENOMEM;
+ }
+
+ ts->ta_size = ku_buffer->ta_size;
+
+ memcpy(ts->ta, ku_buffer->ta, ku_buffer->ta_size);
+ return 0;
+}
+
+static int copy_uuid(struct tee_session *ts,
+ struct tee_session *ku_buffer)
+{
+ ts->uuid = kmalloc(sizeof(struct tee_uuid), GFP_KERNEL);
+
+ if (ts->uuid == NULL) {
+ pr_err("[%s] error, out of memory (uuid)\n",
+ __func__);
+ set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY);
+ return -ENOMEM;
+ }
+
+ memcpy(ts->uuid, ku_buffer->uuid, sizeof(struct tee_uuid));
+
+ return 0;
+}
+
+static inline void free_operation(struct tee_session *ts)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ kfree(ts->op->shm[i].buffer);
+ ts->op->shm[i].buffer = NULL;
+ }
+
+ kfree(ts->op);
+ ts->op = NULL;
+}
+
+static inline void memrefs_phys_to_virt(struct tee_session *ts)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ if (ts->op->flags & (1 << i)) {
+ ts->op->shm[i].buffer =
+ phys_to_virt((unsigned long)
+ ts->op->shm[i].buffer);
+ }
+ }
+}
+
+static int copy_memref_to_user(struct tee_operation *op,
+ struct tee_operation *ubuf_op,
+ int memref)
+{
+ unsigned long bytes_left;
+
+ bytes_left = copy_to_user(ubuf_op->shm[memref].buffer,
+ op->shm[memref].buffer,
+ op->shm[memref].size);
+
+ if (bytes_left != 0) {
+ pr_err("[%s] Failed to copy result to user space (%lu "
+ "bytes left of buffer).\n", __func__, bytes_left);
+ return bytes_left;
+ }
+
+ bytes_left = put_user(op->shm[memref].size, &ubuf_op->shm[memref].size);
+
+ if (bytes_left != 0) {
+ pr_err("[%s] Failed to copy result to user space (%lu "
+ "bytes left of size).\n", __func__, bytes_left);
+ return -EINVAL;
+ }
+
+ bytes_left = put_user(op->shm[memref].flags,
+ &ubuf_op->shm[memref].flags);
+ if (bytes_left != 0) {
+ pr_err("[%s] Failed to copy result to user space (%lu "
+ "bytes left of flags).\n", __func__, bytes_left);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int copy_memref_to_kernel(struct tee_operation *op,
+ struct tee_operation *kbuf_op,
+ int memref)
+{
+ /* Buffer freed in invoke_command if this function fails */
+ op->shm[memref].buffer = kmalloc(kbuf_op->shm[memref].size, GFP_KERNEL);
+
+ if (!op->shm[memref].buffer) {
+ pr_err("[%s] out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ /*
+ * Copy shared memory operations to a local kernel
+ * buffer if they are of type input.
+ */
+ if (kbuf_op->shm[memref].flags & TEEC_MEM_INPUT) {
+ memcpy(op->shm[memref].buffer,
+ kbuf_op->shm[memref].buffer,
+ kbuf_op->shm[memref].size);
+ }
+
+ op->shm[memref].size = kbuf_op->shm[memref].size;
+ op->shm[memref].flags = kbuf_op->shm[memref].flags;
+
+ /* Secure world expects physical addresses. */
+ op->shm[memref].buffer = (void *)virt_to_phys(op->shm[memref].buffer);
+
+ return 0;
+}
+
+static int open_tee_device(struct tee_session *ts,
+ struct tee_session *ku_buffer)
+{
+ int ret;
+
+ if (ku_buffer->driver_cmd != TEED_OPEN_SESSION) {
+ set_emsg(ts, TEED_ERROR_BAD_STATE);
+ return -EINVAL;
+ }
+
+ if (ku_buffer->ta) {
+ ret = copy_ta(ts, ku_buffer);
+ } else if (ku_buffer->uuid) {
+ ret = copy_uuid(ts, ku_buffer);
+ } else {
+ set_emsg(ts, TEED_ERROR_COMMUNICATION);
+ return -EINVAL;
+ }
+
+ ts->id = 0;
+ ts->state = TEED_STATE_OPEN_SESSION;
+ return ret;
+}
+
+static int invoke_command(struct tee_session *ts,
+ struct tee_session *ku_buffer,
+ struct tee_session __user *u_buffer)
+{
+ int i;
+ int ret = 0;
+ struct tee_operation *kbuf_op =
+ (struct tee_operation *)ku_buffer->op;
+
+ ts->op = kmalloc(sizeof(struct tee_operation), GFP_KERNEL);
+
+ if (!ts->op) {
+ if (ts->op == NULL) {
+ pr_err("[%s] error, out of memory "
+ "(op)\n", __func__);
+ set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY);
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ /* Copy memrefs to kernel space. */
+ ts->op->flags = kbuf_op->flags;
+ ts->cmd = ku_buffer->cmd;
+
+ for (i = 0; i < 4; ++i) {
+ /* We only want to copy memrefs in use. */
+ if (kbuf_op->flags & (1 << i)) {
+ ret = copy_memref_to_kernel(ts->op, kbuf_op, i);
+
+ if (ret)
+ goto err;
+ } else {
+ ts->op->shm[i].buffer = NULL;
+ ts->op->shm[i].size = 0;
+ ts->op->shm[i].flags = 0;
+ }
+ }
+
+ /* To call secure world */
+ if (call_sec_world(ts, TEED_INVOKE)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Convert physical addresses back to virtual address so the
+ * kernel can free the buffers when closing the session.
+ */
+ memrefs_phys_to_virt(ts);
+
+ for (i = 0; i < 4; ++i) {
+ if ((kbuf_op->flags & (1 << i)) &&
+ (kbuf_op->shm[i].flags & TEEC_MEM_OUTPUT)) {
+ struct tee_operation *ubuf_op =
+ (struct tee_operation *)u_buffer->op;
+
+ ret = copy_memref_to_user(ts->op, ubuf_op, i);
+ }
+ }
+err:
+ free_operation(ts);
+
+ return ret;
+}
+
+static int tee_open(struct inode *inode, struct file *filp)
+{
+ struct tee_session *ts;
+
+ filp->private_data = kmalloc(sizeof(struct tee_session),
+ GFP_KERNEL);
+
+ if (filp->private_data == NULL)
+ return -ENOMEM;
+
+ ts = (struct tee_session *) (filp->private_data);
+
+ reset_session(ts);
+
+ return 0;
+}
+
+static int tee_release(struct inode *inode, struct file *filp)
+{
+ struct tee_session *ts;
+ int i;
+
+ ts = (struct tee_session *) (filp->private_data);
+
+ if (ts == NULL)
+ goto no_ts;
+
+ if (ts->op) {
+ for (i = 0; i < 4; ++i) {
+ kfree(ts->op->shm[i].buffer);
+ ts->op->shm[i].buffer = NULL;
+ }
+ }
+
+ kfree(ts->op);
+ ts->op = NULL;
+
+ kfree(ts->ta);
+ ts->ta = NULL;
+
+no_ts:
+ kfree(filp->private_data);
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+/*
+ * Called when a process, which already opened the dev file, attempts
+ * to read from it. This function gets the current status of the session.
+ */
+static int tee_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *offset)
+{
+ struct tee_read buf;
+ struct tee_session *ts;
+
+ if (length != sizeof(struct tee_read)) {
+ pr_err("[%s] error, incorrect input length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ts = (struct tee_session *) (filp->private_data);
+
+ if (ts == NULL) {
+ pr_err("[%s] error, private_data not "
+ "initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&sync);
+
+ buf.err = ts->err;
+ buf.origin = ts->origin;
+
+ mutex_unlock(&sync);
+
+ if (copy_to_user(buffer, &buf, length)) {
+ pr_err("[%s] error, copy_to_user failed!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return length;
+}
+
+/*
+ * Called when a process writes to a dev file
+ */
+static int tee_write(struct file *filp, const char __user *buffer,
+ size_t length, loff_t *offset)
+{
+ struct tee_session ku_buffer;
+ struct tee_session *ts;
+ int ret = length;
+
+ if (length != sizeof(struct tee_session)) {
+ pr_err("[%s] error, incorrect input length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ku_buffer, buffer, length)) {
+ pr_err("[%s] error, tee_session "
+ "copy_from_user failed\n", __func__);
+ return -EINVAL;
+ }
+
+ ts = (struct tee_session *) (filp->private_data);
+
+ if (ts == NULL) {
+ pr_err("[%s] error, private_data not "
+ "initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&sync);
+
+ switch (ts->state) {
+ case TEED_STATE_OPEN_DEV:
+ ret = open_tee_device(ts, &ku_buffer);
+ break;
+
+ case TEED_STATE_OPEN_SESSION:
+ switch (ku_buffer.driver_cmd) {
+ case TEED_INVOKE:
+ ret = invoke_command(ts, &ku_buffer,
+ (struct tee_session *)buffer);
+ break;
+
+ case TEED_CLOSE_SESSION:
+ /* no caching implemented yet... */
+ if (call_sec_world(ts, TEED_CLOSE_SESSION))
+ ret = -EINVAL;
+
+ kfree(ts->ta);
+ ts->ta = NULL;
+
+ reset_session(ts);
+ break;
+
+ default:
+ set_emsg(ts, TEED_ERROR_BAD_PARAMETERS);
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ pr_err("[%s] unknown state\n", __func__);
+ set_emsg(ts, TEED_ERROR_BAD_STATE);
+ ret = -EINVAL;
+ }
+
+ /*
+ * We expect that ret has value zero when reaching the end here.
+ * If it has any other value some error must have occured.
+ */
+ if (!ret)
+ ret = length;
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&sync);
+
+ return ret;
+}
+
+int teec_initialize_context(const char *name, struct tee_context *context)
+{
+ return TEED_SUCCESS;
+}
+EXPORT_SYMBOL(teec_initialize_context);
+
+int teec_finalize_context(struct tee_context *context)
+{
+ return TEED_SUCCESS;
+}
+EXPORT_SYMBOL(teec_finalize_context);
+
+int teec_open_session(struct tee_context *context,
+ struct tee_session *session,
+ const struct tee_uuid *destination,
+ unsigned int connection_method,
+ void *connection_data, struct tee_operation *operation,
+ unsigned int *error_origin)
+{
+ int res = TEED_SUCCESS;
+
+ if (session == NULL || destination == NULL) {
+ pr_err("[%s] session or destination == NULL\n", __func__);
+ if (error_origin != NULL)
+ *error_origin = TEED_ORIGIN_DRIVER;
+ res = TEED_ERROR_BAD_PARAMETERS;
+ goto exit;
+ }
+
+ reset_session(session);
+
+ /*
+ * Open a session towards an application already loaded inside
+ * the TEE
+ */
+ session->uuid = kmalloc(sizeof(struct tee_uuid), GFP_KERNEL);
+
+ if (session->uuid == NULL) {
+ pr_err("[%s] error, out of memory (uuid)\n",
+ __func__);
+ if (error_origin != NULL)
+ *error_origin = TEED_ORIGIN_DRIVER;
+ res = TEED_ERROR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ memcpy(session->uuid, destination, sizeof(struct tee_uuid));
+
+ session->ta = NULL;
+ session->id = 0;
+
+exit:
+ return res;
+}
+EXPORT_SYMBOL(teec_open_session);
+
+int teec_close_session(struct tee_session *session)
+{
+ int res = TEED_SUCCESS;
+
+ mutex_lock(&sync);
+
+ if (session == NULL) {
+ pr_err("[%s] error, session == NULL\n", __func__);
+ res = TEED_ERROR_BAD_PARAMETERS;
+ goto exit;
+ }
+
+ if (call_sec_world(session, TEED_CLOSE_SESSION)) {
+ pr_err("[%s] error, call_sec_world failed\n", __func__);
+ res = TEED_ERROR_GENERIC;
+ goto exit;
+ }
+
+exit:
+ if (session != NULL) {
+ kfree(session->uuid);
+ session->uuid = NULL;
+ }
+
+ mutex_unlock(&sync);
+ return res;
+}
+EXPORT_SYMBOL(teec_close_session);
+
+int teec_invoke_command(
+ struct tee_session *session, unsigned int command_id,
+ struct tee_operation *operation,
+ unsigned int *error_origin)
+{
+ int res = TEED_SUCCESS;
+ int i;
+
+ mutex_lock(&sync);
+
+ if (session == NULL || operation == NULL || error_origin == NULL) {
+ pr_err("[%s] error, input parameters == NULL\n", __func__);
+ if (error_origin != NULL)
+ *error_origin = TEED_ORIGIN_DRIVER;
+ res = TEED_ERROR_BAD_PARAMETERS;
+ goto exit;
+ }
+
+ for (i = 0; i < 4; ++i) {
+ /* We only want to translate memrefs in use. */
+ if (operation->flags & (1 << i)) {
+ operation->shm[i].buffer =
+ (void *)virt_to_phys(
+ operation->shm[i].buffer);
+ }
+ }
+ session->op = operation;
+ session->cmd = command_id;
+
+ /*
+ * Call secure world
+ */
+ if (call_sec_world(session, TEED_INVOKE)) {
+ pr_err("[%s] error, call_sec_world failed\n", __func__);
+ if (error_origin != NULL)
+ *error_origin = TEED_ORIGIN_DRIVER;
+ res = TEED_ERROR_GENERIC;
+ }
+ if (session->err != TEED_SUCCESS) {
+ pr_err("[%s] error, call_sec_world failed\n", __func__);
+ if (error_origin != NULL)
+ *error_origin = session->origin;
+ res = session->err;
+ }
+
+ memrefs_phys_to_virt(session);
+ session->op = NULL;
+
+exit:
+ mutex_unlock(&sync);
+ return res;
+}
+EXPORT_SYMBOL(teec_invoke_command);
+
+int teec_allocate_shared_memory(struct tee_context *context,
+ struct tee_sharedmemory *shared_memory)
+{
+ int res = TEED_SUCCESS;
+
+ if (shared_memory == NULL) {
+ res = TEED_ERROR_BAD_PARAMETERS;
+ goto exit;
+ }
+
+ shared_memory->buffer = kmalloc(shared_memory->size,
+ GFP_KERNEL);
+
+ if (shared_memory->buffer == NULL) {
+ res = TEED_ERROR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+exit:
+ return res;
+}
+EXPORT_SYMBOL(teec_allocate_shared_memory);
+
+void teec_release_shared_memory(struct tee_sharedmemory *shared_memory)
+{
+ kfree(shared_memory->buffer);
+}
+EXPORT_SYMBOL(teec_release_shared_memory);
+
+static const struct file_operations tee_fops = {
+ .owner = THIS_MODULE,
+ .read = tee_read,
+ .write = tee_write,
+ .open = tee_open,
+ .release = tee_release,
+};
+
+static struct miscdevice tee_dev = {
+ MISC_DYNAMIC_MINOR,
+ TEED_NAME,
+ &tee_fops
+};
+
+static int __init tee_init(void)
+{
+ int err = 0;
+
+ err = misc_register(&tee_dev);
+
+ if (err) {
+ pr_err("[%s] error %d adding character device "
+ "TEE\n", __func__, err);
+ }
+
+ mutex_init(&sync);
+
+ return err;
+}
+
+static void __exit tee_exit(void)
+{
+ misc_deregister(&tee_dev);
+}
+
+subsys_initcall(tee_init);
+module_exit(tee_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Trusted Execution Enviroment driver");
diff --git a/drivers/tee/tee_service.c b/drivers/tee/tee_service.c
new file mode 100644
index 00000000000..b01e9d0ac39
--- /dev/null
+++ b/drivers/tee/tee_service.c
@@ -0,0 +1,17 @@
+/*
+ * TEE service to handle the calls to trusted applications.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/kernel.h>
+#include <linux/tee.h>
+#include <linux/device.h>
+
+int __weak call_sec_world(struct tee_session *ts, int sec_cmd)
+{
+ pr_info("[%s] Generic call_sec_world called!\n", __func__);
+
+ return 0;
+}
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 925a1e547a8..6fb92f30e33 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -330,6 +330,14 @@ config SERIAL_AMBA_PL011_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
+config SERIAL_AMBA_PL011_CLOCK_CONTROL
+ bool "Support for clock control on AMBA serial port"
+ depends on SERIAL_AMBA_PL011
+ select CONSOLE_POLL
+ ---help---
+ Say Y here if you wish to use amba set_termios function to control
+ the pl011 clock. Any positive baudrate passed enables clock,
+
config SERIAL_SB1250_DUART
tristate "BCM1xxx on-chip DUART serial support"
depends on SIBYTE_SB1xxx_SOC=y
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 00233af1acc..23b1bbe581a 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -47,12 +47,13 @@
#include <linux/amba/serial.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/delay.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/sizes.h>
#define UART_NR 14
@@ -63,9 +64,41 @@
#define AMBA_ISR_PASS_LIMIT 256
-#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
+#define UART_DR_ERROR (UART011_DR_OE | UART011_DR_BE | \
+ UART011_DR_PE | UART011_DR_FE)
#define UART_DUMMY_DR_RX (1 << 16)
+/*
+ * The console UART is handled differently for power management (it doesn't
+ * take the regulator, in order to allow the system to go to sleep even if the
+ * console is open). This should be removed once cable detect is in place.
+ */
+#ifdef CONFIG_SERIAL_CORE_CONSOLE
+#define uart_console(port) ((port)->cons \
+ && (port)->cons->index == (port)->line)
+#else
+#define uart_console(port) (0)
+#endif
+
+/* Available amba pl011 port clock states */
+enum pl011_clk_states {
+ PL011_CLK_OFF = 0, /* clock disabled */
+ PL011_CLK_REQUEST_OFF, /* disable after TX flushed */
+ PL011_CLK_ON, /* clock enabled */
+ PL011_PORT_OFF, /* port disabled */
+};
+
+/*
+ * Backup registers to be used during regulator startup/shutdown
+ */
+static const u32 backup_regs[] = {
+ UART011_IBRD,
+ UART011_FBRD,
+ ST_UART011_LCRH_RX,
+ ST_UART011_LCRH_TX,
+ UART011_CR,
+ UART011_IMSC,
+};
#define UART_WA_SAVE_NR 14
@@ -157,9 +190,17 @@ struct uart_amba_port {
unsigned int im; /* interrupt mask */
unsigned int old_status;
unsigned int fifosize; /* vendor-specific */
+ unsigned int ifls; /* vendor-specific */
unsigned int lcrh_tx; /* vendor-specific */
unsigned int lcrh_rx; /* vendor-specific */
bool autorts;
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+ enum pl011_clk_states clk_state; /* actual clock state */
+ struct delayed_work clk_off_work; /* work used for clock off */
+ unsigned int clk_off_delay; /* clock off delay */
+#endif
+ struct regulator *regulator;
+ u32 backup[ARRAY_SIZE(backup_regs)];
char type[12];
bool interrupt_may_hang; /* vendor-specific */
#ifdef CONFIG_DMA_ENGINE
@@ -305,7 +346,8 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
.src_maxburst = uap->fifosize >> 1,
};
- chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
+ chan = dma_request_channel(mask,
+ plat->dma_filter, plat->dma_rx_param);
if (!chan) {
dev_err(uap->port.dev, "no RX DMA channel!\n");
return;
@@ -749,8 +791,9 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
*/
if (dma_count == pending && readfifo) {
/* Clear any error flags */
- writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
- uap->port.membase + UART011_ICR);
+ writew(UART011_OEIS | UART011_BEIS |
+ UART011_PEIS | UART011_FEIS,
+ uap->port.membase + UART011_ICR);
/*
* If we read all the DMA'd characters, and we had an
@@ -947,11 +990,13 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
spin_unlock_irq(&uap->port.lock);
if (uap->using_tx_dma) {
- /* In theory, this should already be done by pl011_dma_flush_buffer */
+ /* In theory, this should already be done by
+ * pl011_dma_flush_buffer
+ */
dmaengine_terminate_all(uap->dmatx.chan);
if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_sg(uap->dmatx.chan->device->dev,
+ &uap->dmatx.sg, 1, DMA_TO_DEVICE);
uap->dmatx.queued = false;
}
@@ -962,8 +1007,10 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
if (uap->using_rx_dma) {
dmaengine_terminate_all(uap->dmarx.chan);
/* Clean up the RX DMA */
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
+ DMA_FROM_DEVICE);
uap->using_rx_dma = false;
}
}
@@ -1100,6 +1147,246 @@ static void pl011_lockup_wa(unsigned long data)
tty->hw_stopped = 0;
}
+static void __pl011_startup(struct uart_amba_port *uap)
+{
+ unsigned int cr;
+
+ writew(uap->ifls, uap->port.membase + UART011_IFLS);
+
+ /*
+ * Provoke TX FIFO interrupt into asserting.
+ */
+ cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
+ writew(cr, uap->port.membase + UART011_CR);
+ writew(0, uap->port.membase + UART011_FBRD);
+ writew(1, uap->port.membase + UART011_IBRD);
+ writew(0, uap->port.membase + uap->lcrh_rx);
+ if (uap->lcrh_tx != uap->lcrh_rx) {
+ int i;
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX register,
+ * to get this delay write read only register 10 times
+ */
+ for (i = 0; i < 10; ++i)
+ writew(0xff, uap->port.membase + UART011_MIS);
+ writew(0, uap->port.membase + uap->lcrh_tx);
+ }
+ writew(0, uap->port.membase + UART01x_DR);
+ while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
+ barrier();
+}
+
+/* Backup the registers during regulator startup/shutdown */
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+static int pl011_backup(struct uart_amba_port *uap, bool suspend)
+{
+ int i, cnt;
+
+ if (!suspend) {
+ __pl011_startup(uap);
+ writew(0, uap->port.membase + UART011_CR);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(backup_regs); i++) {
+ if (suspend)
+ uap->backup[i] = readw(uap->port.membase +
+ backup_regs[i]);
+ else {
+ if (backup_regs[i] == ST_UART011_LCRH_TX) {
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX
+ * register, to get this delay write read
+ * only register 10 times
+ */
+ for (cnt = 0; cnt < 10; ++cnt)
+ writew(0xff, uap->port.membase +
+ UART011_MIS);
+ }
+
+ writew(uap->backup[i],
+ uap->port.membase + backup_regs[i]);
+ }
+ }
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+/* Turn clock off if TX buffer is empty, otherwise reschedule */
+static void pl011_clock_off(struct work_struct *work)
+{
+ struct uart_amba_port *uap = container_of(work, struct uart_amba_port,
+ clk_off_work.work);
+ struct uart_port *port = &uap->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long flags;
+ bool disable_regulator = false;
+ unsigned int busy, interrupt_status;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ interrupt_status = readw(uap->port.membase + UART011_MIS);
+ busy = readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY;
+
+ if (uap->clk_state == PL011_CLK_REQUEST_OFF) {
+ if (uart_circ_empty(xmit) && !interrupt_status && !busy) {
+ if (!uart_console(&uap->port) && uap->regulator) {
+ pl011_backup(uap, true);
+ disable_regulator = true;
+ }
+ uap->clk_state = PL011_CLK_OFF;
+ clk_disable(uap->clk);
+ } else
+ schedule_delayed_work(&uap->clk_off_work,
+ uap->clk_off_delay);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (disable_regulator)
+ regulator_disable(uap->regulator);
+}
+
+/* Request to turn off uart clock once pending TX is flushed */
+static void pl011_clock_request_off(struct uart_port *port)
+{
+ unsigned long flags;
+ struct uart_amba_port *uap = (struct uart_amba_port *)(port);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (uap->clk_state == PL011_CLK_ON) {
+ uap->clk_state = PL011_CLK_REQUEST_OFF;
+ /* Turn off later */
+ schedule_delayed_work(&uap->clk_off_work,
+ uap->clk_off_delay);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* Request to immediately turn on uart clock */
+static void pl011_clock_on(struct uart_port *port)
+{
+ unsigned long flags;
+ struct uart_amba_port *uap = (struct uart_amba_port *)(port);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ switch (uap->clk_state) {
+ case PL011_CLK_OFF:
+ clk_enable(uap->clk);
+ if (!uart_console(&uap->port) && uap->regulator) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ regulator_enable(uap->regulator);
+ spin_lock_irqsave(&port->lock, flags);
+ pl011_backup(uap, false);
+ }
+ /* fallthrough */
+ case PL011_CLK_REQUEST_OFF:
+ cancel_delayed_work(&uap->clk_off_work);
+ uap->clk_state = PL011_CLK_ON;
+ break;
+ default:
+ break;
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void pl011_clock_check(struct uart_amba_port *uap)
+{
+ /* Reshedule work during off request */
+ if (uap->clk_state == PL011_CLK_REQUEST_OFF)
+ /* New TX - restart work */
+ if (cancel_delayed_work(&uap->clk_off_work))
+ schedule_delayed_work(&uap->clk_off_work,
+ uap->clk_off_delay);
+}
+
+static int pl011_power_startup(struct uart_amba_port *uap)
+{
+ int retval = 0;
+
+ if (uap->clk_state == PL011_PORT_OFF) {
+ if (!uart_console(&uap->port) && uap->regulator)
+ regulator_enable(uap->regulator);
+ retval = clk_enable(uap->clk);
+ if (!retval)
+ uap->clk_state = PL011_CLK_ON;
+ else
+ uap->clk_state = PL011_PORT_OFF;
+ }
+
+ return retval;
+}
+
+static void pl011_power_shutdown(struct uart_amba_port *uap)
+{
+ bool disable_regulator = false;
+
+ cancel_delayed_work_sync(&uap->clk_off_work);
+
+ spin_lock_irq(&uap->port.lock);
+ if (uap->clk_state == PL011_CLK_ON ||
+ uap->clk_state == PL011_CLK_REQUEST_OFF) {
+ clk_disable(uap->clk);
+ if (!uart_console(&uap->port) && uap->regulator)
+ disable_regulator = true;
+ }
+ uap->clk_state = PL011_PORT_OFF;
+ spin_unlock_irq(&uap->port.lock);
+
+ if (disable_regulator)
+ regulator_disable(uap->regulator);
+}
+
+static void
+pl011_clock_control(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ speed_t new_baud = tty_termios_baud_rate(termios);
+
+ if (new_baud == 0)
+ pl011_clock_request_off(port);
+ else
+ pl011_clock_on(port);
+}
+
+static void pl011_clock_control_init(struct uart_amba_port *uap)
+{
+ uap->clk_state = PL011_PORT_OFF;
+ INIT_DELAYED_WORK(&uap->clk_off_work, pl011_clock_off);
+ uap->clk_off_delay = HZ / 10; /* 100 ms */
+}
+
+#else
+/* Blank functions for clock control */
+static inline void pl011_clock_check(struct uart_amba_port *uap)
+{
+}
+
+static inline int pl011_power_startup(struct uart_amba_port *uap)
+{
+ return clk_enable(uap->clk);
+}
+
+static inline void pl011_power_shutdown(struct uart_amba_port *uap)
+{
+ clk_disable(uap->clk);
+}
+
+static inline void
+pl011_clock_control(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+}
+
+static inline void pl011_clock_control_init(struct uart_amba_port *uap)
+{
+}
+#endif
+
static void pl011_stop_tx(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -1191,6 +1478,9 @@ static void pl011_tx_chars(struct uart_amba_port *uap)
break;
} while (--count > 0);
+ if (count)
+ pl011_clock_check(uap);
+
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
@@ -1236,7 +1526,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
do {
writew(status & ~(UART011_TXIS|UART011_RTIS|
UART011_RXIS),
- uap->port.membase + UART011_ICR);
+ uap->port.membase + UART011_ICR);
if (status & (UART011_RTIS|UART011_RXIS)) {
if (pl011_dma_rx_running(uap))
@@ -1281,7 +1571,7 @@ static unsigned int pl01x_get_mctrl(struct uart_port *port)
#define TIOCMBIT(uartbit, tiocmbit) \
if (status & uartbit) \
- result |= tiocmbit
+ (result |= tiocmbit)
TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
@@ -1299,10 +1589,12 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
cr = readw(uap->port.membase + UART011_CR);
#define TIOCMBIT(tiocmbit, uartbit) \
- if (mctrl & tiocmbit) \
- cr |= uartbit; \
- else \
- cr &= ~uartbit
+ do {\
+ if (mctrl & tiocmbit) \
+ cr |= uartbit; \
+ else \
+ cr &= ~uartbit; \
+ } while (0)
TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
@@ -1372,9 +1664,9 @@ static int pl011_startup(struct uart_port *port)
goto out;
/*
- * Try to enable the clock producer.
+ * Try to enable the clock producer and the regulator.
*/
- retval = clk_enable(uap->clk);
+ retval = pl011_power_startup(uap);
if (retval)
goto clk_unprep;
@@ -1387,29 +1679,7 @@ static int pl011_startup(struct uart_port *port)
if (retval)
goto clk_dis;
- writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
-
- /*
- * Provoke TX FIFO interrupt into asserting.
- */
- cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
- writew(cr, uap->port.membase + UART011_CR);
- writew(0, uap->port.membase + UART011_FBRD);
- writew(1, uap->port.membase + UART011_IBRD);
- writew(0, uap->port.membase + uap->lcrh_rx);
- if (uap->lcrh_tx != uap->lcrh_rx) {
- int i;
- /*
- * Wait 10 PCLKs before writing LCRH_TX register,
- * to get this delay write read only register 10 times
- */
- for (i = 0; i < 10; ++i)
- writew(0xff, uap->port.membase + UART011_MIS);
- writew(0, uap->port.membase + uap->lcrh_tx);
- }
- writew(0, uap->port.membase + UART01x_DR);
- while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
- barrier();
+ __pl011_startup(uap);
cr = UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
writew(cr, uap->port.membase + UART011_CR);
@@ -1421,7 +1691,8 @@ static int pl011_startup(struct uart_port *port)
/*
* initialise the old status of the modem signals
*/
- uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
+ uap->old_status = readw(uap->port.membase + UART01x_FR) &
+ UART01x_FR_MODEM_ANY;
/* Startup DMA */
pl011_dma_startup(uap);
@@ -1449,7 +1720,7 @@ static int pl011_startup(struct uart_port *port)
return 0;
clk_dis:
- clk_disable(uap->clk);
+ pl011_power_shutdown(uap);
clk_unprep:
clk_unprepare(uap->clk);
out:
@@ -1459,11 +1730,11 @@ static int pl011_startup(struct uart_port *port)
static void pl011_shutdown_channel(struct uart_amba_port *uap,
unsigned int lcrh)
{
- unsigned long val;
+ unsigned long val;
- val = readw(uap->port.membase + lcrh);
- val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
- writew(val, uap->port.membase + lcrh);
+ val = readw(uap->port.membase + lcrh);
+ val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
+ writew(val, uap->port.membase + lcrh);
}
static void pl011_shutdown(struct uart_port *port)
@@ -1490,7 +1761,8 @@ static void pl011_shutdown(struct uart_port *port)
* disable the port
*/
uap->autorts = false;
- writew(UART01x_CR_UARTEN | UART011_CR_TXE, uap->port.membase + UART011_CR);
+ writew(UART01x_CR_UARTEN | UART011_CR_TXE,
+ uap->port.membase + UART011_CR);
/*
* disable break condition and fifos
@@ -1499,10 +1771,18 @@ static void pl011_shutdown(struct uart_port *port)
if (uap->lcrh_rx != uap->lcrh_tx)
pl011_shutdown_channel(uap, uap->lcrh_tx);
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->exit)
+ plat->exit();
+ }
+
/*
- * Shut down the clock producer
+ * Shut down the clock producer and the producer
*/
- clk_disable(uap->clk);
+ pl011_power_shutdown(uap);
clk_unprepare(uap->clk);
if (uap->port.dev->platform_data) {
@@ -1515,6 +1795,32 @@ static void pl011_shutdown(struct uart_port *port)
}
+/* Power/Clock management. */
+static void pl011_serial_pm(struct uart_port *port, unsigned int state,
+unsigned int oldstate)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ switch (state) {
+ case 0: /*fully on */
+ /*
+ * Enable the peripheral clock for this serial port.
+ * This is called on uart_open() or a resume event.
+ */
+ pl011_power_startup(uap);
+ break;
+ case 3: /* powered down */
+ /*
+ * Disable the peripheral clock for this serial port.
+ * This is called on uart_close() or a suspend event.
+ */
+ pl011_power_shutdown(uap);
+ break;
+ default:
+ printk(KERN_ERR "pl011_serial: unknown pm %d\n", state);
+ }
+}
+
static void
pl011_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
@@ -1528,7 +1834,12 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
clkdiv = 8;
else
clkdiv = 16;
-
+ /*
+ * Must be before uart_get_baud_rate() call, because
+ * this function changes baudrate to default in case of 0
+ * B0 hangup !!!
+ */
+ pl011_clock_control(port, termios, old);
/*
* Ask the core to calculate the divisor for us.
*/
@@ -1550,7 +1861,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
case CS7:
lcr_h = UART01x_LCRH_WLEN_7;
break;
- default: // CS8
+ default: /* CS8 */
lcr_h = UART01x_LCRH_WLEN_8;
break;
}
@@ -1716,14 +2027,13 @@ static struct uart_ops amba_pl011_pops = {
.request_port = pl010_request_port,
.config_port = pl010_config_port,
.verify_port = pl010_verify_port,
+ .pm = pl011_serial_pm,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = pl010_get_poll_char,
.poll_put_char = pl010_put_poll_char,
#endif
};
-static struct uart_amba_port *amba_ports[UART_NR];
-
#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
static void pl011_console_putchar(struct uart_port *port, int ch)
@@ -1867,7 +2177,6 @@ static struct uart_driver amba_reg = {
.nr = UART_NR,
.cons = AMBA_CONSOLE,
};
-
static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
@@ -1896,6 +2205,12 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
goto free;
}
+ uap->regulator = regulator_get(&dev->dev, "v-uart");
+ if (IS_ERR(uap->regulator)) {
+ dev_warn(&dev->dev, "could not get uart regulator\n");
+ uap->regulator = NULL;
+ }
+
uap->clk = clk_get(&dev->dev, NULL);
if (IS_ERR(uap->clk)) {
ret = PTR_ERR(uap->clk);
@@ -1903,6 +2218,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
}
uap->vendor = vendor;
+ uap->ifls = vendor->ifls;
uap->lcrh_rx = vendor->lcrh_rx;
uap->lcrh_tx = vendor->lcrh_tx;
uap->fifosize = vendor->fifosize;
@@ -1923,6 +2239,9 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
amba_ports[i] = uap;
amba_set_drvdata(dev, uap);
+
+ pl011_clock_control_init(uap);
+
ret = uart_add_one_port(&amba_reg, &uap->port);
if (ret) {
amba_set_drvdata(dev, NULL);
@@ -1930,6 +2249,8 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
pl011_dma_remove(uap);
clk_put(uap->clk);
unmap:
+ if (uap->regulator)
+ regulator_put(uap->regulator);
iounmap(base);
free:
kfree(uap);
@@ -1953,6 +2274,8 @@ static int pl011_remove(struct amba_device *dev)
pl011_dma_remove(uap);
iounmap(uap->port.membase);
+ if (uap->regulator)
+ regulator_put(uap->regulator);
clk_put(uap->clk);
kfree(uap);
return 0;
@@ -1965,7 +2288,12 @@ static int pl011_suspend(struct amba_device *dev, pm_message_t state)
if (!uap)
return -EINVAL;
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+ cancel_delayed_work_sync(&uap->clk_off_work);
+ if (uap->clk_state == PL011_CLK_OFF)
+ return 0;
+#endif
return uart_suspend_port(&amba_reg, &uap->port);
}
@@ -1975,6 +2303,10 @@ static int pl011_resume(struct amba_device *dev)
if (!uap)
return -EINVAL;
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+ if (uap->clk_state == PL011_CLK_OFF)
+ return 0;
+#endif
return uart_resume_port(&amba_reg, &uap->port);
}
@@ -2031,7 +2363,7 @@ static void __exit pl011_exit(void)
* While this can be a module, if builtin it's most likely the console
* So let's leave module_exit but move module_init to an earlier place
*/
-arch_initcall(pl011_init);
+subsys_initcall(pl011_init);
module_exit(pl011_exit);
MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 79781461eec..c5d5f8d8573 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -30,6 +30,12 @@
#include "usb.h"
+#ifdef CONFIG_ARCH_U8500
+#define MAX_TOPO_LEVEL_U8500 2
+#define MAX_USB_DEVICE_U8500 8
+int usb_device_count;
+#endif
+
/* if we are in debug mode, always announce new devices */
#ifdef DEBUG
#ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
@@ -1298,11 +1304,20 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (!hub_is_superspeed(hdev) || !hdev->parent)
usb_enable_autosuspend(hdev);
+#ifdef CONFIG_ARCH_U8500
+ if (hdev->level > MAX_TOPO_LEVEL_U8500) {
+ dev_err(&intf->dev,
+ "Unsupported bus topology: > %d "
+ " hub nesting\n", MAX_TOPO_LEVEL_U8500);
+ return -E2BIG;
+ }
+#else
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
"Unsupported bus topology: hub nested too deep\n");
return -E2BIG;
}
+#endif
#ifdef CONFIG_USB_OTG_BLACKLIST_HUB
if (hdev->parent) {
@@ -1585,12 +1600,14 @@ static void choose_devnum(struct usb_device *udev)
* bus->devnum_next. */
devnum = find_next_zero_bit(bus->devmap.devicemap, 128,
bus->devnum_next);
- if (devnum >= 128)
+ /* Due to Hardware bugs we need to reserve a device address
+ * for flushing of endpoints. */
+ if (devnum >= 127)
devnum = find_next_zero_bit(bus->devmap.devicemap,
128, 1);
- bus->devnum_next = ( devnum >= 127 ? 1 : devnum + 1);
+ bus->devnum_next = devnum >= 126 ? 1 : devnum + 1;
}
- if (devnum < 128) {
+ if (devnum < 127) {
set_bit(devnum, bus->devmap.devicemap);
udev->devnum = devnum;
}
@@ -3317,6 +3334,22 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
goto loop;
}
+#ifdef CONFIG_ARCH_U8500
+ if (hdev->parent == NULL)
+ usb_device_count = 1;
+
+ if (usb_device_count > MAX_USB_DEVICE_U8500) {
+
+ dev_err(&udev->dev,
+ "device connected is more than %d\n",
+ MAX_USB_DEVICE_U8500);
+
+ status = -ENOTCONN; /* Don't retry */
+ goto loop;
+ }
+#endif
+
+
/* reset (non-USB 3.0 devices) and get descriptor */
status = hub_port_init(hub, udev, port1, i);
if (status < 0)
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7728c91dfa2..a5fdc3ac0d7 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -46,11 +46,18 @@ EXPORT_SYMBOL_GPL(usb_unregister_notify);
void usb_notify_add_device(struct usb_device *udev)
{
+#ifdef CONFIG_ARCH_U8500
+ usb_device_count++;
+#endif
+
blocking_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev);
}
void usb_notify_remove_device(struct usb_device *udev)
{
+#ifdef CONFIG_ARCH_U8500
+ usb_device_count--;
+#endif
/* Protect against simultaneous usbfs open */
mutex_lock(&usbfs_mutex);
blocking_notifier_call_chain(&usb_notifier_list,
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 3888778582c..c9f8b00e06e 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,5 +1,9 @@
#include <linux/pm.h>
+#ifdef CONFIG_ARCH_U8500
+extern int usb_device_count;
+#endif
+
/* Functions local to drivers/usb/core/ */
extern int usb_create_sysfs_dev_files(struct usb_device *dev);
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 11c07cb7d33..1cd2c1cf1cb 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -97,6 +97,19 @@ static inline unsigned ecm_bitrate(struct usb_gadget *g)
/* interface descriptor: */
+static struct usb_interface_assoc_descriptor
+ecm_iad_descriptor = {
+ .bLength = sizeof ecm_iad_descriptor,
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+
+ /* .bFirstInterface = DYNAMIC, */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = USB_CLASS_COMM,
+ .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bFunctionProtocol = USB_CDC_PROTO_NONE,
+ /* .iFunction = DYNAMIC */
+};
+
static struct usb_interface_descriptor ecm_control_intf = {
.bLength = sizeof ecm_control_intf,
.bDescriptorType = USB_DT_INTERFACE,
@@ -199,6 +212,7 @@ static struct usb_endpoint_descriptor fs_ecm_out_desc = {
static struct usb_descriptor_header *ecm_fs_function[] = {
/* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_iad_descriptor,
(struct usb_descriptor_header *) &ecm_control_intf,
(struct usb_descriptor_header *) &ecm_header_desc,
(struct usb_descriptor_header *) &ecm_union_desc,
@@ -247,6 +261,7 @@ static struct usb_endpoint_descriptor hs_ecm_out_desc = {
static struct usb_descriptor_header *ecm_hs_function[] = {
/* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_iad_descriptor,
(struct usb_descriptor_header *) &ecm_control_intf,
(struct usb_descriptor_header *) &ecm_header_desc,
(struct usb_descriptor_header *) &ecm_union_desc,
@@ -339,6 +354,7 @@ static struct usb_string ecm_string_defs[] = {
[0].s = "CDC Ethernet Control Model (ECM)",
[1].s = NULL /* DYNAMIC */,
[2].s = "CDC Ethernet Data",
+ [3].s = "CDC ECM",
{ } /* end of list */
};
@@ -674,6 +690,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
if (status < 0)
goto fail;
ecm->ctrl_id = status;
+ ecm_iad_descriptor.bFirstInterface = status;
ecm_control_intf.bInterfaceNumber = status;
ecm_union_desc.bMasterInterface0 = status;
@@ -864,6 +881,13 @@ ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
return status;
ecm_string_defs[1].id = status;
ecm_desc.iMACAddress = status;
+
+ /* IAD label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_string_defs[3].id = status;
+ ecm_iad_descriptor.iFunction = status;
}
/* allocate and initialize one new instance */
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 1a6f415c0d0..f1d511163fc 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -1373,32 +1373,7 @@ static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
limit = 65535; /* Should really be FSG_BUFLEN */
}
- /* No block descriptors */
-
- /*
- * The mode pages, in numerical order. The only page we support
- * is the Caching page.
- */
- if (page_code == 0x08 || all_pages) {
- valid_page = 1;
- buf[0] = 0x08; /* Page code */
- buf[1] = 10; /* Page length */
- memset(buf+2, 0, 10); /* None of the fields are changeable */
-
- if (!changeable_values) {
- buf[2] = 0x04; /* Write cache enable, */
- /* Read cache not disabled */
- /* No cache retention priorities */
- put_unaligned_be16(0xffff, &buf[4]);
- /* Don't disable prefetch */
- /* Minimum prefetch = 0 */
- put_unaligned_be16(0xffff, &buf[8]);
- /* Maximum prefetch */
- put_unaligned_be16(0xffff, &buf[10]);
- /* Maximum prefetch ceiling */
- }
- buf += 12;
- }
+ valid_page = 1;
/*
* Check that a valid page was requested and the mode data length
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 704d1d94f72..f381a604114 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -174,8 +174,8 @@ rndis_iad_descriptor = {
.bFirstInterface = 0, /* XXX, hardcoded */
.bInterfaceCount = 2, // control + data
.bFunctionClass = USB_CLASS_COMM,
- .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bFunctionProtocol = USB_CDC_PROTO_NONE,
+ .bFunctionSubClass = USB_CDC_SUBCLASS_ACM,
+ .bFunctionProtocol = USB_CDC_ACM_PROTO_VENDOR,
/* .iFunction = DYNAMIC */
};
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 07a03460a59..4c6cfab52fb 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -61,7 +61,7 @@ config USB_MUSB_BLACKFIN
config USB_MUSB_UX500
tristate "U8500 and U5500"
- depends on (ARCH_U8500 && AB8500_USB)
+ depends on (ARCH_U8500) || (ARCH_U5500)
endchoice
@@ -108,3 +108,18 @@ config USB_TUSB_OMAP_DMA
help
Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
+config USB_UX500_DMA
+ bool
+ depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ default USB_MUSB_UX500
+ help
+ Enable DMA transfers on UX500 platforms.
+
+config USB_MUSB_DEBUG
+ depends on USB_MUSB_HDRC
+ bool "Enable debugging messages"
+ default n
+ help
+ This enables musb debugging. To set the logging level use the debug
+ module parameter. Starting at level 3, per-transfer (urb, usb_request,
+ packet, or dma transfer) tracing may kick in.
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index b63ab157010..426cbd86fc1 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1103,8 +1103,8 @@ static struct musb_fifo_cfg __initdata mode_4_cfg[] = {
/* mode 5 - fits in 8KB */
static struct musb_fifo_cfg __initdata mode_5_cfg[] = {
-{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
@@ -1867,7 +1867,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
status = -ENODEV;
goto fail0;
}
-
/* allocate */
musb = allocate_instance(dev, plat->config, ctrl);
if (!musb) {
@@ -2305,7 +2304,7 @@ static int musb_suspend(struct device *dev)
return 0;
}
-static int musb_resume_noirq(struct device *dev)
+static int musb_resume(struct device *dev)
{
/* for static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
@@ -2346,12 +2345,12 @@ static int musb_runtime_resume(struct device *dev)
static const struct dev_pm_ops musb_dev_pm_ops = {
.suspend = musb_suspend,
- .resume_noirq = musb_resume_noirq,
+ .resume = musb_resume,
.runtime_suspend = musb_runtime_suspend,
.runtime_resume = musb_runtime_resume,
};
-#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
+#define MUSB_DEV_PM_OPS NULL
#else
#define MUSB_DEV_PM_OPS NULL
#endif
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 79cb0af779f..04677af00e3 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -46,7 +46,6 @@
#include "musb_core.h"
#include "musb_host.h"
-
/* MUSB HOST status 22-mar-2006
*
* - There's still lots of partial code duplication for fault paths, so
@@ -108,24 +107,41 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
{
struct musb *musb = ep->musb;
void __iomem *epio = ep->regs;
+ void __iomem *regs = ep->musb->mregs;
u16 csr;
- u16 lastcsr = 0;
- int retries = 1000;
+ u8 addr;
+ int retries = 3000; /* 3ms */
+ /*
+ * NOTE: We are using a hack here because the FIFO-FLUSH
+ * bit is broken in hardware! The hack consists of changing
+ * the TXFUNCADDR to an unused device address and waiting
+ * for any pending USB packets to hit the 3-strikes and your
+ * gone rule.
+ */
+ addr = musb_readb(regs, MUSB_BUSCTL_OFFSET(ep->epnum, MUSB_TXFUNCADDR));
csr = musb_readw(epio, MUSB_TXCSR);
while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
- if (csr != lastcsr)
- dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
- lastcsr = csr;
- csr |= MUSB_TXCSR_FLUSHFIFO;
- musb_writew(epio, MUSB_TXCSR, csr);
+ musb_writeb(regs, MUSB_BUSCTL_OFFSET(ep->epnum,
+ MUSB_TXFUNCADDR), 127);
csr = musb_readw(epio, MUSB_TXCSR);
- if (WARN(retries-- < 1,
- "Could not flush host TX%d fifo: csr: %04x\n",
- ep->epnum, csr))
- return;
- mdelay(1);
+ retries--;
+ if (retries == 0) {
+ /* can happen if the USB clocks are OFF */
+ dev_dbg(musb->controller, "Could not flush host TX%d "
+ "fifo: csr=0x%04x\n", ep->epnum, csr);
+ break;
+ }
+ udelay(1);
}
+ /* clear any errors */
+ csr &= ~(MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_NAKTIMEOUT);
+ musb_writew(epio, MUSB_TXCSR, csr);
+
+ /* restore endpoint address */
+ musb_writeb(regs, MUSB_BUSCTL_OFFSET(ep->epnum, MUSB_TXFUNCADDR), addr);
}
static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
@@ -615,12 +631,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
u16 csr;
u8 mode;
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
if (length > channel->max_len)
length = channel->max_len;
csr = musb_readw(epio, MUSB_TXCSR);
- if (length > pkt_size) {
+ if (length >= pkt_size) {
mode = 1;
csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
/* autoset shouldn't be set in high bandwidth */
@@ -802,6 +818,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
if (load_count) {
/* PIO to load FIFO */
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
qh->segsize = load_count;
musb_write_fifo(hw_ep, load_count, buf);
}
@@ -894,6 +912,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
if (fifo_count < len)
urb->status = -EOVERFLOW;
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
musb_read_fifo(hw_ep, fifo_count, fifo_dest);
urb->actual_length += fifo_count;
@@ -933,6 +953,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
fifo_count,
(fifo_count == 1) ? "" : "s",
fifo_dest);
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
musb_write_fifo(hw_ep, fifo_count, fifo_dest);
urb->actual_length += fifo_count;
@@ -1134,6 +1156,22 @@ void musb_host_tx(struct musb *musb, u8 epnum)
dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
status = -ETIMEDOUT;
+ } else if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
+ /* BUSY - can happen during USB transfer cancel */
+
+ /* MUSB_TXCSR_TXPKTRDY indicates that the data written
+ * to the FIFO by DMA has not still gone on the USB bus.
+ * DMA completion callback doesn't indicate that data has
+ * gone on the USB bus. So, if we reach this case, need to
+ * wait for the MUSB_TXCSR_TXPKTRDY to be cleared and then
+ * proceed.
+ */
+ dev_dbg(musb->controller, "TXPKTRDY set. Data transfer ongoing. Wait...\n");
+
+ do {
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+ } while ((tx_csr & MUSB_TXCSR_TXPKTRDY) != 0);
+ dev_dbg(musb->controller, "TXPKTRDY Cleared. Continue...\n");
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
@@ -1427,7 +1465,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
size_t xfer_len;
void __iomem *mbase = musb->mregs;
int pipe;
- u16 rx_csr, val;
+ u16 rx_csr, val, restore_csr;
bool iso_err = false;
bool done = false;
u32 status;
@@ -1537,7 +1575,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* FIXME this is _way_ too much in-line logic for Mentor DMA */
-#ifndef CONFIG_USB_INVENTRA_DMA
+#if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA)
if (rx_csr & MUSB_RXCSR_H_REQPKT) {
/* REVISIT this happened for a while on some short reads...
* the cleanup still needs investigation... looks bad...
@@ -1569,7 +1607,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
| MUSB_RXCSR_RXPKTRDY);
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
@@ -1625,7 +1663,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
}
/* we are expecting IN packets */
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
if (dma) {
struct dma_controller *c;
u16 rx_count;
@@ -1709,6 +1747,11 @@ void musb_host_rx(struct musb *musb, u8 epnum)
*/
val = musb_readw(epio, MUSB_RXCSR);
+
+ /* retain the original value,
+ * which will be used to reset CSR
+ */
+ restore_csr = val;
val &= ~MUSB_RXCSR_H_REQPKT;
if (dma->desired_mode == 0)
@@ -1736,7 +1779,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
c->channel_release(dma);
hw_ep->rx_channel = NULL;
dma = NULL;
- /* REVISIT reset CSR */
+ musb_writew(epio, MUSB_RXCSR, restore_csr);
}
}
#endif /* Mentor DMA */
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index f7e04bf34a1..2b63da5d48a 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -25,9 +25,13 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <mach/usb.h>
#include "musb_core.h"
+#define DEFAULT_DEVCTL 0x81
+static void ux500_musb_set_vbus(struct musb *musb, int is_on);
+
struct ux500_glue {
struct device *dev;
struct platform_device *musb;
@@ -35,17 +39,369 @@ struct ux500_glue {
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
+static struct timer_list notify_timer;
+struct musb_context_registers context;
+struct musb *_musb;
+void ux500_store_context(struct musb *musb)
+{
+#ifdef CONFIG_PM
+ int i;
+ void __iomem *musb_base;
+ void __iomem *epio;
+
+ if (musb != NULL)
+ _musb = musb;
+ else
+ return;
+
+ musb_base = musb->mregs;
+
+ if (is_host_enabled(musb)) {
+ context.frame = musb_readw(musb_base, MUSB_FRAME);
+ context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
+ context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
+ }
+ context.power = musb_readb(musb_base, MUSB_POWER);
+ context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
+ context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE);
+ context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
+ context.index = musb_readb(musb_base, MUSB_INDEX);
+ context.devctl = DEFAULT_DEVCTL;
+
+ for (i = 0; i < musb->config->num_eps; ++i) {
+ struct musb_hw_ep *hw_ep;
+
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ hw_ep = &musb->endpoints[i];
+ if (!hw_ep)
+ continue;
+
+ epio = hw_ep->regs;
+ if (!epio)
+ continue;
+
+ context.index_regs[i].txmaxp =
+ musb_readw(epio, MUSB_TXMAXP);
+ context.index_regs[i].txcsr =
+ musb_readw(epio, MUSB_TXCSR);
+ context.index_regs[i].rxmaxp =
+ musb_readw(epio, MUSB_RXMAXP);
+ context.index_regs[i].rxcsr =
+ musb_readw(epio, MUSB_RXCSR);
+
+ if (musb->dyn_fifo) {
+ context.index_regs[i].txfifoadd =
+ musb_read_txfifoadd(musb_base);
+ context.index_regs[i].rxfifoadd =
+ musb_read_rxfifoadd(musb_base);
+ context.index_regs[i].txfifosz =
+ musb_read_txfifosz(musb_base);
+ context.index_regs[i].rxfifosz =
+ musb_read_rxfifosz(musb_base);
+ }
+ if (is_host_enabled(musb)) {
+ context.index_regs[i].txtype =
+ musb_readb(epio, MUSB_TXTYPE);
+ context.index_regs[i].txinterval =
+ musb_readb(epio, MUSB_TXINTERVAL);
+ context.index_regs[i].rxtype =
+ musb_readb(epio, MUSB_RXTYPE);
+ context.index_regs[i].rxinterval =
+ musb_readb(epio, MUSB_RXINTERVAL);
+
+ context.index_regs[i].txfunaddr =
+ musb_read_txfunaddr(musb_base, i);
+ context.index_regs[i].txhubaddr =
+ musb_read_txhubaddr(musb_base, i);
+ context.index_regs[i].txhubport =
+ musb_read_txhubport(musb_base, i);
+
+ context.index_regs[i].rxfunaddr =
+ musb_read_rxfunaddr(musb_base, i);
+ context.index_regs[i].rxhubaddr =
+ musb_read_rxhubaddr(musb_base, i);
+ context.index_regs[i].rxhubport =
+ musb_read_rxhubport(musb_base, i);
+ }
+ }
+#endif
+}
+void ux500_restore_context(void)
+{
+#ifdef CONFIG_PM
+ int i;
+ struct musb *musb;
+ void __iomem *musb_base;
+ void __iomem *ep_target_regs;
+ void __iomem *epio;
+
+ if (_musb != NULL)
+ musb = _musb;
+ else
+ return;
+
+ musb_base = musb->mregs;
+ if (is_host_enabled(musb)) {
+ musb_writew(musb_base, MUSB_FRAME, context.frame);
+ musb_writeb(musb_base, MUSB_TESTMODE, context.testmode);
+ musb_write_ulpi_buscontrol(musb->mregs, context.busctl);
+ }
+ musb_writeb(musb_base, MUSB_POWER, context.power);
+ musb_writew(musb_base, MUSB_INTRTXE, context.intrtxe);
+ musb_writew(musb_base, MUSB_INTRRXE, context.intrrxe);
+ musb_writeb(musb_base, MUSB_INTRUSBE, context.intrusbe);
+ musb_writeb(musb_base, MUSB_DEVCTL, context.devctl);
+
+ for (i = 0; i < musb->config->num_eps; ++i) {
+ struct musb_hw_ep *hw_ep;
+
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ hw_ep = &musb->endpoints[i];
+ if (!hw_ep)
+ continue;
+
+ epio = hw_ep->regs;
+ if (!epio)
+ continue;
+
+ musb_writew(epio, MUSB_TXMAXP,
+ context.index_regs[i].txmaxp);
+ musb_writew(epio, MUSB_TXCSR,
+ context.index_regs[i].txcsr);
+ musb_writew(epio, MUSB_RXMAXP,
+ context.index_regs[i].rxmaxp);
+ musb_writew(epio, MUSB_RXCSR,
+ context.index_regs[i].rxcsr);
+
+ if (musb->dyn_fifo) {
+ musb_write_txfifosz(musb_base,
+ context.index_regs[i].txfifosz);
+ musb_write_rxfifosz(musb_base,
+ context.index_regs[i].rxfifosz);
+ musb_write_txfifoadd(musb_base,
+ context.index_regs[i].txfifoadd);
+ musb_write_rxfifoadd(musb_base,
+ context.index_regs[i].rxfifoadd);
+ }
+
+ if (is_host_enabled(musb)) {
+ musb_writeb(epio, MUSB_TXTYPE,
+ context.index_regs[i].txtype);
+ musb_writeb(epio, MUSB_TXINTERVAL,
+ context.index_regs[i].txinterval);
+ musb_writeb(epio, MUSB_RXTYPE,
+ context.index_regs[i].rxtype);
+ musb_writeb(epio, MUSB_RXINTERVAL,
+
+ musb->context.index_regs[i].rxinterval);
+ musb_write_txfunaddr(musb_base, i,
+ context.index_regs[i].txfunaddr);
+ musb_write_txhubaddr(musb_base, i,
+ context.index_regs[i].txhubaddr);
+ musb_write_txhubport(musb_base, i,
+ context.index_regs[i].txhubport);
+
+ ep_target_regs =
+ musb_read_target_reg_base(i, musb_base);
+
+ musb_write_rxfunaddr(ep_target_regs,
+ context.index_regs[i].rxfunaddr);
+ musb_write_rxhubaddr(ep_target_regs,
+ context.index_regs[i].rxhubaddr);
+ musb_write_rxhubport(ep_target_regs,
+ context.index_regs[i].rxhubport);
+ }
+ }
+ musb_writeb(musb_base, MUSB_INDEX, context.index);
+#endif
+}
+
+static void musb_notify_idle(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ unsigned long flags;
+
+ u8 devctl;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_WAIT_BCON:
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ } else {
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ break;
+
+ case OTG_STATE_A_SUSPEND:
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+/* blocking notifier support */
+static int musb_otg_notifications(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ struct musb *musb = container_of(nb, struct musb, nb);
+
+ switch (event) {
+ case USB_EVENT_ID:
+ dev_dbg(musb->controller, "ID GND\n");
+ if (is_otg_enabled(musb)) {
+ ux500_musb_set_vbus(musb, 1);
+ }
+ break;
+
+ case USB_EVENT_VBUS:
+ dev_dbg(musb->controller, "VBUS Connect\n");
+
+ break;
+
+ case USB_EVENT_NONE:
+ dev_dbg(musb->controller, "VBUS Disconnect\n");
+ if (is_otg_enabled(musb))
+ ux500_musb_set_vbus(musb, 0);
+
+ break;
+ default:
+ dev_dbg(musb->controller, "ID float\n");
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static void ux500_musb_set_vbus(struct musb *musb, int is_on)
+{
+ u8 devctl;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ int ret = 1;
+ /* HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ if (musb->xceiv->state == OTG_STATE_A_IDLE) {
+ /* start the session */
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ /*
+ * Wait for the musb to set as A device to enable the
+ * VBUS
+ */
+ while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) {
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(musb->controller,
+ "configured as A device timeout");
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ } else {
+ musb->is_active = 1;
+ musb->xceiv->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+ MUSB_HST_MODE(musb);
+ }
+ } else {
+ musb->is_active = 0;
+
+ /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and
+ * jumping right to B_IDLE...
+ */
+
+ musb->xceiv->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+
+ MUSB_DEV_MODE(musb);
+ }
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x "
+ /* otg %3x conf %08x prcm %08x */ "\n",
+ otg_state_string(musb->xceiv->state),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+
+static void ux500_musb_try_idle(struct musb *musb, unsigned long timeout)
+{
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = jiffies + msecs_to_jiffies(3);
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || ((musb->a_wait_bcon == 0)
+ && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ otg_state_string(musb->xceiv->state));
+ del_timer(&notify_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout)) {
+ if (!timer_pending(&notify_timer))
+ last_timer = timeout;
+ else {
+ dev_dbg(musb->controller, "Longer idle timer "
+ "already pending, ignoring\n");
+ return;
+ }
+ }
+ last_timer = timeout;
+
+ dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
+ otg_state_string(musb->xceiv->state),
+ (unsigned long)jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&notify_timer, timeout);
+}
+static void ux500_musb_enable(struct musb *musb)
+{
+ ux500_store_context(musb);
+}
static int ux500_musb_init(struct musb *musb)
{
+ int status;
musb->xceiv = otg_get_transceiver();
if (!musb->xceiv) {
pr_err("HS USB OTG: no transceiver configured\n");
return -ENODEV;
}
+ musb->nb.notifier_call = musb_otg_notifications;
+ status = otg_register_notifier(musb->xceiv, &musb->nb);
+
+ if (status < 0) {
+ dev_dbg(musb->controller, "notification register failed\n");
+ goto err1;
+ }
+
+ setup_timer(&notify_timer, musb_notify_idle, (unsigned long) musb);
+
return 0;
+err1:
+ return status;
}
+/**
+ * ux500_musb_exit() - unregister the platform USB driver.
+ * @musb: struct musb pointer.
+ *
+ * This function unregisters the USB controller.
+ */
static int ux500_musb_exit(struct musb *musb)
{
otg_put_transceiver(musb->xceiv);
@@ -56,8 +412,20 @@ static int ux500_musb_exit(struct musb *musb)
static const struct musb_platform_ops ux500_ops = {
.init = ux500_musb_init,
.exit = ux500_musb_exit,
+
+ .set_vbus = ux500_musb_set_vbus,
+ .try_idle = ux500_musb_try_idle,
+
+ .enable = ux500_musb_enable,
};
+/**
+ * ux500_probe() - Allocate the resources.
+ * @pdev: struct platform_device.
+ *
+ * This function allocates the required memory for the
+ * structures and initialize interrupts.
+ */
static int __init ux500_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
@@ -155,6 +523,13 @@ static int __exit ux500_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
+/**
+ * ux500_suspend() - Handles the platform suspend.
+ * @dev: struct device
+ *
+ * This function gets triggered when the platform
+ * is going to suspend
+ */
static int ux500_suspend(struct device *dev)
{
struct ux500_glue *glue = dev_get_drvdata(dev);
@@ -166,6 +541,13 @@ static int ux500_suspend(struct device *dev)
return 0;
}
+/**
+ * ux500_resume() - Handles the platform resume.
+ * @dev: struct device
+ *
+ * This function gets triggered when the platform
+ * is going to resume
+ */
static int ux500_resume(struct device *dev)
{
struct ux500_glue *glue = dev_get_drvdata(dev);
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index ef4333f4bbe..ecab2bf997e 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -56,7 +56,13 @@ struct ux500_dma_controller {
dma_addr_t phy_base;
};
-/* Work function invoked from DMA callback to handle tx transfers. */
+/**
+ * ux500_tx_work() - Invoked by worker thread
+ * @data: worker queue data
+ *
+ * This function is invoked by worker thread when the DMA transfer
+ * is completed in the transmit direction.
+*/
static void ux500_tx_work(struct work_struct *data)
{
struct ux500_dma_channel *ux500_channel = container_of(data,
@@ -76,7 +82,13 @@ static void ux500_tx_work(struct work_struct *data)
spin_unlock_irqrestore(&musb->lock, flags);
}
-/* Work function invoked from DMA callback to handle rx transfers. */
+/**
+ * ux500_rx_work() - Invoked by worker thread
+ * @data: worker queue data
+ *
+ * This function is invoked by worker thread when the
+ * DMA transfer is completed in the receive direction.
+*/
static void ux500_rx_work(struct work_struct *data)
{
struct ux500_dma_channel *ux500_channel = container_of(data,
@@ -96,6 +108,12 @@ static void ux500_rx_work(struct work_struct *data)
spin_unlock_irqrestore(&musb->lock, flags);
}
+/**
+ * ux500_dma_callback() - callback invoked when there is DMA data transfer
+ * @private_data: pointer to DMA channel.
+ *
+ * This callback is invoked when the DMA tranfer is completed.
+*/
void ux500_dma_callback(void *private_data)
{
struct dma_channel *channel = (struct dma_channel *)private_data;
@@ -104,6 +122,18 @@ void ux500_dma_callback(void *private_data)
schedule_work(&ux500_channel->channel_work);
}
+/**
+ * ux500_configure_channel() - configures the source, destination addresses and
+ * starts the transfer
+ * @channel: pointer to DMA channel
+ * @packet_sz: packet size
+ * @mode: Dma mode
+ * @dma_addr: DMA source address for transmit direction
+ * or DMA destination address for receive direction
+ * @len: length
+ * This function configures the source and destination addresses for DMA
+ * operation and initiates the DMA transfer
+*/
static bool ux500_configure_channel(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
@@ -162,6 +192,15 @@ static bool ux500_configure_channel(struct dma_channel *channel,
return true;
}
+/**
+ * ux500_dma_controller_allocate() - allocates the DMA channels
+ * @c: pointer to DMA controller
+ * @hw_ep: pointer to endpoint
+ * @is_tx: transmit or receive direction
+ *
+ * This function allocates the DMA channel and initializes
+ * the channel
+*/
static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
struct musb_hw_ep *hw_ep, u8 is_tx)
{
@@ -200,7 +239,13 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
return &(ux500_channel->channel);
}
-
+/**
+ * ux500_dma_channel_release() - releases the DMA channel
+ * @channel: channel to be released
+ *
+ * This function releases the DMA channel
+ *
+*/
static void ux500_dma_channel_release(struct dma_channel *channel)
{
struct ux500_dma_channel *ux500_channel = channel->private_data;
@@ -227,6 +272,16 @@ static int ux500_dma_is_compatible(struct dma_channel *channel,
return true;
}
+/**
+ * ux500_dma_channel_program() - Configures the channel and initiates transfer
+ * @channel: pointer to DMA channel
+ * @packet_sz: packet size
+ * @mode: mode
+ * @dma_addr: physical address of memory
+ * @len: length
+ *
+ * This function configures the channel and initiates the DMA transfer
+*/
static int ux500_dma_channel_program(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
@@ -248,6 +303,12 @@ static int ux500_dma_channel_program(struct dma_channel *channel,
return ret;
}
+/**
+ * ux500_dma_channel_abort() - aborts the DMA transfer
+ * @channel: pointer to DMA channel.
+ *
+ * This function aborts the DMA transfer.
+*/
static int ux500_dma_channel_abort(struct dma_channel *channel)
{
struct ux500_dma_channel *ux500_channel = channel->private_data;
@@ -282,6 +343,12 @@ static int ux500_dma_channel_abort(struct dma_channel *channel)
return 0;
}
+/**
+ * ux500_dma_controller_stop() - releases all the channels and frees the DMA pipes
+ * @c: pointer to DMA controller
+ *
+ * This function frees all of the logical channels and frees the DMA pipes
+*/
static int ux500_dma_controller_stop(struct dma_controller *c)
{
struct ux500_dma_controller *controller = container_of(c,
@@ -313,6 +380,15 @@ static int ux500_dma_controller_stop(struct dma_controller *c)
return 0;
}
+
+/**
+ * ux500_dma_controller_start() - creates the logical channels pool and registers callbacks
+ * @c: pointer to DMA Controller
+ *
+ * This function requests the logical channels from the DMA driver and creates
+ * logical channels based on event lines and also registers the callbacks which
+ * are invoked after data transfer in the transmit or receive direction.
+*/
static int ux500_dma_controller_start(struct dma_controller *c)
{
struct ux500_dma_controller *controller = container_of(c,
@@ -389,6 +465,12 @@ static int ux500_dma_controller_start(struct dma_controller *c)
return 0;
}
+/**
+ * dma_controller_destroy() - deallocates the DMA controller
+ * @c: pointer to dma controller.
+ *
+ * This function deallocates the DMA controller.
+*/
void dma_controller_destroy(struct dma_controller *c)
{
struct ux500_dma_controller *controller = container_of(c,
@@ -397,6 +479,15 @@ void dma_controller_destroy(struct dma_controller *c)
kfree(controller);
}
+/**
+ * dma_controller_create() - creates the dma controller and initializes callbacks
+ *
+ * @musb: pointer to mentor core driver data instance|
+ * @base: base address of musb registers.
+ *
+ * This function creates the DMA controller and initializes the callbacks
+ * that are invoked from the Mentor IP core.
+*/
struct dma_controller *__init
dma_controller_create(struct musb *musb, void __iomem *base)
{
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index c66481ad98d..3db06194f1f 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -122,6 +122,15 @@ config AB8500_USB
This transceiver supports high and full speed devices plus,
in host mode, low speed.
+config AB5500_USB
+ tristate "AB5500 USB Transceiver Driver"
+ depends on AB5500_CORE
+ select USB_OTG_UTILS
+ help
+ Enable this to support the USB OTG transceiver in AB5500 chip.
+ This transceiver supports high and full speed devices plus,
+ in host mode, low speed.
+
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 566655c5333..90249f46153 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -19,5 +19,6 @@ obj-$(CONFIG_USB_ULPI) += ulpi.o
obj-$(CONFIG_USB_ULPI_VIEWPORT) += ulpi_viewport.o
obj-$(CONFIG_USB_MSM_OTG) += msm_otg.o
obj-$(CONFIG_AB8500_USB) += ab8500-usb.o
+obj-$(CONFIG_AB5500_USB) += ab5500-usb.o
fsl_usb2_otg-objs := fsl_otg.o otg_fsm.o
obj-$(CONFIG_FSL_USB2_OTG) += fsl_usb2_otg.o
diff --git a/drivers/usb/otg/ab5500-usb.c b/drivers/usb/otg/ab5500-usb.c
new file mode 100644
index 00000000000..2f3c48a8336
--- /dev/null
+++ b/drivers/usb/otg/ab5500-usb.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Avinash Kumar <avinash.kumar@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/platform_device.h>
+#include <linux/usb/otg.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <mach/usb.h>
+#include <linux/kernel_stat.h>
+#include <mach/gpio.h>
+
+/* AB5500 USB macros
+ */
+#define AB5500_USB_HOST_ENABLE 0x1
+#define AB5500_USB_DEVICE_ENABLE 0x2
+#define AB5500_MAIN_WATCHDOG_ENABLE 0x1
+#define AB5500_MAIN_WATCHDOG_KICK 0x2
+#define AB5500_MAIN_WATCHDOG_DISABLE 0x0
+#define AB5500_USB_ADP_ENABLE 0x1
+#define AB5500_WATCHDOG_DELAY 10
+#define AB5500_WATCHDOG_DELAY_US 100
+#define AB5500_PHY_DELAY_US 100
+#define AB5500_MAIN_WDOG_CTRL_REG 0x01
+#define AB5500_USB_LINE_STAT_REG 0x80
+#define AB5500_USB_PHY_CTRL_REG 0x8A
+#define AB5500_MAIN_WATCHDOG_ENABLE 0x1
+#define AB5500_MAIN_WATCHDOG_KICK 0x2
+#define AB5500_MAIN_WATCHDOG_DISABLE 0x0
+#define AB5500_SYS_CTRL2_BLOCK 0x2
+
+/* UsbLineStatus register bit masks */
+#define AB5500_USB_LINK_STATUS_MASK_V1 0x78
+#define AB5500_USB_LINK_STATUS_MASK_V2 0xF8
+
+#define USB_PROBE_DELAY 1000 /* 1 seconds */
+
+/* UsbLineStatus register - usb types */
+enum ab8500_usb_link_status {
+ USB_LINK_NOT_CONFIGURED,
+ USB_LINK_STD_HOST_NC,
+ USB_LINK_STD_HOST_C_NS,
+ USB_LINK_STD_HOST_C_S,
+ USB_LINK_HOST_CHG_NM,
+ USB_LINK_HOST_CHG_HS,
+ USB_LINK_HOST_CHG_HS_CHIRP,
+ USB_LINK_DEDICATED_CHG,
+ USB_LINK_ACA_RID_A,
+ USB_LINK_ACA_RID_B,
+ USB_LINK_ACA_RID_C_NM,
+ USB_LINK_ACA_RID_C_HS,
+ USB_LINK_ACA_RID_C_HS_CHIRP,
+ USB_LINK_HM_IDGND,
+ USB_LINK_OTG_HOST_NO_CURRENT,
+ USB_LINK_NOT_VALID_LINK,
+ USB_LINK_HM_IDGND_V2 = 18,
+};
+
+/**
+ * ab5500_usb_mode - Different states of ab usb_chip
+ *
+ * Used for USB cable plug-in state machine
+ */
+enum ab5500_usb_mode {
+ USB_IDLE,
+ USB_DEVICE,
+ USB_HOST,
+ USB_DEDICATED_CHG,
+};
+struct ab5500_usb {
+ struct otg_transceiver otg;
+ struct device *dev;
+ int irq_num_id_fall;
+ int irq_num_vbus_rise;
+ int irq_num_vbus_fall;
+ int irq_num_link_status;
+ unsigned vbus_draw;
+ struct delayed_work dwork;
+ struct work_struct phy_dis_work;
+ unsigned long link_status_wait;
+ int rev;
+ int usb_cs_gpio;
+ enum ab5500_usb_mode mode;
+ struct clk *sysclk;
+ struct regulator *v_ape;
+ struct abx500_usbgpio_platform_data *usb_gpio;
+ struct delayed_work work_usb_workaround;
+ bool phy_enabled;
+};
+
+static int ab5500_usb_irq_setup(struct platform_device *pdev,
+ struct ab5500_usb *ab);
+static int ab5500_usb_boot_detect(struct ab5500_usb *ab);
+static int ab5500_usb_link_status_update(struct ab5500_usb *ab);
+
+static void ab5500_usb_phy_enable(struct ab5500_usb *ab, bool sel_host);
+
+static inline struct ab5500_usb *xceiv_to_ab(struct otg_transceiver *x)
+{
+ return container_of(x, struct ab5500_usb, otg);
+}
+
+/**
+ * ab5500_usb_wd_workaround() - Kick the watch dog timer
+ *
+ * This function used to Kick the watch dog timer
+ */
+static void ab5500_usb_wd_workaround(struct ab5500_usb *ab)
+{
+ abx500_set_register_interruptible(ab->dev,
+ AB5500_SYS_CTRL2_BLOCK,
+ AB5500_MAIN_WDOG_CTRL_REG,
+ AB5500_MAIN_WATCHDOG_ENABLE);
+
+ udelay(AB5500_WATCHDOG_DELAY_US);
+
+ abx500_set_register_interruptible(ab->dev,
+ AB5500_SYS_CTRL2_BLOCK,
+ AB5500_MAIN_WDOG_CTRL_REG,
+ (AB5500_MAIN_WATCHDOG_ENABLE
+ | AB5500_MAIN_WATCHDOG_KICK));
+
+ udelay(AB5500_WATCHDOG_DELAY_US);
+
+ abx500_set_register_interruptible(ab->dev,
+ AB5500_SYS_CTRL2_BLOCK,
+ AB5500_MAIN_WDOG_CTRL_REG,
+ AB5500_MAIN_WATCHDOG_DISABLE);
+
+ udelay(AB5500_WATCHDOG_DELAY_US);
+}
+
+static void ab5500_usb_phy_enable(struct ab5500_usb *ab, bool sel_host)
+{
+ u8 bit;
+ /* Workaround for spurious interrupt to be checked with Hardware Team*/
+ if (ab->phy_enabled == true)
+ return;
+ ab->phy_enabled = true;
+ bit = sel_host ? AB5500_USB_HOST_ENABLE :
+ AB5500_USB_DEVICE_ENABLE;
+
+ ab->usb_gpio->enable();
+ clk_enable(ab->sysclk);
+ regulator_enable(ab->v_ape);
+
+ if (!sel_host) {
+ schedule_delayed_work_on(0,
+ &ab->work_usb_workaround,
+ msecs_to_jiffies(USB_PROBE_DELAY));
+ }
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB5500_BANK_USB,
+ AB5500_USB_PHY_CTRL_REG,
+ bit, bit);
+}
+
+static void ab5500_usb_phy_disable(struct ab5500_usb *ab, bool sel_host)
+{
+ u8 bit;
+ /* Workaround for spurious interrupt to be checked with Hardware Team*/
+ if (ab->phy_enabled == false)
+ return;
+ ab->phy_enabled = false;
+ bit = sel_host ? AB5500_USB_HOST_ENABLE :
+ AB5500_USB_DEVICE_ENABLE;
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB5500_BANK_USB,
+ AB5500_USB_PHY_CTRL_REG,
+ bit, 0);
+ /* Needed to disable the phy.*/
+ ab5500_usb_wd_workaround(ab);
+ clk_disable(ab->sysclk);
+ regulator_disable(ab->v_ape);
+ ab->usb_gpio->disable();
+}
+
+#define ab5500_usb_peri_phy_en(ab) ab5500_usb_phy_enable(ab, false)
+#define ab5500_usb_peri_phy_dis(ab) ab5500_usb_phy_disable(ab, false)
+#define ab5500_usb_host_phy_en(ab) ab5500_usb_phy_enable(ab, true)
+#define ab5500_usb_host_phy_dis(ab) ab5500_usb_phy_disable(ab, true)
+
+/* Work created after an link status update handler*/
+static int ab5500_usb_link_status_update(struct ab5500_usb *ab)
+{
+ u8 val = 0;
+ int ret = 0;
+ int gpioval = 0;
+ enum ab8500_usb_link_status lsts;
+ enum usb_xceiv_events event = USB_IDLE;
+
+ (void)abx500_get_register_interruptible(ab->dev,
+ AB5500_BANK_USB, AB5500_USB_LINE_STAT_REG, &val);
+
+ if (ab->rev == AB5500_2_0)
+ lsts = (val & AB5500_USB_LINK_STATUS_MASK_V2) >> 3;
+ else
+ lsts = (val & AB5500_USB_LINK_STATUS_MASK_V1) >> 3;
+
+ switch (lsts) {
+
+ case USB_LINK_STD_HOST_NC:
+ case USB_LINK_STD_HOST_C_NS:
+ case USB_LINK_STD_HOST_C_S:
+ case USB_LINK_HOST_CHG_NM:
+ case USB_LINK_HOST_CHG_HS:
+ case USB_LINK_HOST_CHG_HS_CHIRP:
+ break;
+
+ case USB_LINK_HM_IDGND:
+ if (ab->rev == AB5500_2_0)
+ break;
+
+ /* enable usb chip Select */
+ ret = gpio_direction_output(ab->usb_cs_gpio, gpioval);
+ if (ret < 0) {
+ dev_err(ab->dev, "usb_cs_gpio: gpio direction failed\n");
+ gpio_free(ab->usb_cs_gpio);
+ return ret;
+ }
+ gpio_set_value(ab->usb_cs_gpio, 1);
+
+ ab5500_usb_host_phy_en(ab);
+
+ ab->otg.default_a = true;
+ event = USB_EVENT_ID;
+
+ break;
+
+ case USB_LINK_HM_IDGND_V2:
+ if (!(ab->rev == AB5500_2_0))
+ break;
+
+ /* enable usb chip Select */
+ ret = gpio_direction_output(ab->usb_cs_gpio, gpioval);
+ if (ret < 0) {
+ dev_err(ab->dev, "usb_cs_gpio: gpio direction failed\n");
+ gpio_free(ab->usb_cs_gpio);
+ return ret;
+ }
+ gpio_set_value(ab->usb_cs_gpio, 1);
+
+ ab5500_usb_host_phy_en(ab);
+
+ ab->otg.default_a = true;
+ event = USB_EVENT_ID;
+
+ break;
+ default:
+ break;
+ }
+
+ atomic_notifier_call_chain(&ab->otg.notifier, event, &ab->vbus_draw);
+
+ return 0;
+}
+
+static void ab5500_usb_delayed_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct ab5500_usb *ab = container_of(dwork, struct ab5500_usb, dwork);
+
+ ab5500_usb_link_status_update(ab);
+}
+
+/**
+ * This function is used to signal the completion of
+ * USB Link status register update
+ */
+static irqreturn_t ab5500_usb_link_status_irq(int irq, void *data)
+{
+ struct ab5500_usb *ab = (struct ab5500_usb *) data;
+ ab5500_usb_link_status_update(ab);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ab5500_usb_device_insert_irq(int irq, void *data)
+{
+ int ret = 0, val = 1;
+ struct ab5500_usb *ab = (struct ab5500_usb *) data;
+
+ enum usb_xceiv_events event;
+
+ ab->mode = USB_DEVICE;
+
+ ab5500_usb_peri_phy_en(ab);
+
+ /* enable usb chip Select */
+ event = USB_EVENT_VBUS;
+ ret = gpio_direction_output(ab->usb_cs_gpio, val);
+ if (ret < 0) {
+ dev_err(ab->dev, "usb_cs_gpio: gpio direction failed\n");
+ gpio_free(ab->usb_cs_gpio);
+ return ret;
+ }
+ gpio_set_value(ab->usb_cs_gpio, 1);
+
+ atomic_notifier_call_chain(&ab->otg.notifier, event, &ab->vbus_draw);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * This function used to remove the voltage for USB ab->dev mode.
+ */
+static irqreturn_t ab5500_usb_device_disconnect_irq(int irq, void *data)
+{
+ struct ab5500_usb *ab = (struct ab5500_usb *) data;
+ /* disable usb chip Select */
+ gpio_set_value(ab->usb_cs_gpio, 0);
+ ab5500_usb_peri_phy_dis(ab);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_usb_host_disconnect_irq : work handler for host cable insert.
+ * @work: work structure
+ *
+ * This function is used to handle the host cable insert work.
+ */
+static irqreturn_t ab5500_usb_host_disconnect_irq(int irq, void *data)
+{
+ struct ab5500_usb *ab = (struct ab5500_usb *) data;
+ /* disable usb chip Select */
+ gpio_set_value(ab->usb_cs_gpio, 0);
+ ab5500_usb_host_phy_dis(ab);
+ return IRQ_HANDLED;
+}
+
+static void ab5500_usb_irq_free(struct ab5500_usb *ab)
+{
+ if (ab->irq_num_id_fall)
+ free_irq(ab->irq_num_id_fall, ab);
+
+ if (ab->irq_num_vbus_rise)
+ free_irq(ab->irq_num_vbus_rise, ab);
+
+ if (ab->irq_num_vbus_fall)
+ free_irq(ab->irq_num_vbus_fall, ab);
+
+ if (ab->irq_num_link_status)
+ free_irq(ab->irq_num_link_status, ab);
+}
+
+/**
+ * ab5500_usb_irq_setup : register USB callback handlers for ab5500
+ * @mode: value for mode.
+ *
+ * This function is used to register USB callback handlers for ab5500.
+ */
+static int ab5500_usb_irq_setup(struct platform_device *pdev,
+ struct ab5500_usb *ab)
+{
+ int ret = 0;
+ int irq, err;
+
+ if (!ab->dev)
+ return -EINVAL;
+
+ irq = platform_get_irq_byname(pdev, "usb_idgnd_f");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "ID fall irq not found\n");
+ err = irq;
+ goto irq_fail;
+ }
+ ab->irq_num_id_fall = irq;
+
+ irq = platform_get_irq_byname(pdev, "VBUS_F");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "VBUS fall irq not found\n");
+ err = irq;
+ goto irq_fail;
+
+ }
+ ab->irq_num_vbus_fall = irq;
+
+ irq = platform_get_irq_byname(pdev, "VBUS_R");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "VBUS raise irq not found\n");
+ err = irq;
+ goto irq_fail;
+
+ }
+ ab->irq_num_vbus_rise = irq;
+
+ irq = platform_get_irq_byname(pdev, "Link_Update");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Link Update irq not found\n");
+ err = irq;
+ goto irq_fail;
+ }
+ ab->irq_num_link_status = irq;
+
+ ret = request_threaded_irq(ab->irq_num_link_status,
+ NULL, ab5500_usb_link_status_irq,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "usb-link-status-update", ab);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to set the callback"
+ " handler for usb charge"
+ " detect done\n");
+ err = ret;
+ goto irq_fail;
+ }
+
+ ret = request_threaded_irq(ab->irq_num_vbus_rise, NULL,
+ ab5500_usb_device_insert_irq,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "usb-vbus-rise", ab);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to set the callback"
+ " handler for usb ab->dev"
+ " insertion\n");
+ err = ret;
+ goto irq_fail;
+ }
+
+ ret = request_threaded_irq(ab->irq_num_vbus_fall, NULL,
+ ab5500_usb_device_disconnect_irq,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "usb-vbus-fall", ab);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to set the callback"
+ " handler for usb ab->dev"
+ " removal\n");
+ err = ret;
+ goto irq_fail;
+ }
+
+ ret = request_threaded_irq((ab->irq_num_id_fall), NULL,
+ ab5500_usb_host_disconnect_irq,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "usb-id-fall", ab);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to set the callback"
+ " handler for usb host"
+ " removal\n");
+ err = ret;
+ goto irq_fail;
+ }
+
+ ab5500_usb_wd_workaround(ab);
+ return 0;
+
+irq_fail:
+ ab5500_usb_irq_free(ab);
+ return err;
+}
+
+/**
+ * ab5500_usb_boot_detect : detect the USB cable during boot time.
+ * @mode: value for mode.
+ *
+ * This function is used to detect the USB cable during boot time.
+ */
+static int ab5500_usb_boot_detect(struct ab5500_usb *ab)
+{
+ int ret;
+ int val = 1;
+ int usb_status = 0;
+ int gpioval = 0;
+ enum ab8500_usb_link_status lsts;
+ if (!ab->dev)
+ return -EINVAL;
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB5500_BANK_USB,
+ AB5500_USB_PHY_CTRL_REG,
+ AB5500_USB_DEVICE_ENABLE,
+ AB5500_USB_DEVICE_ENABLE);
+
+ udelay(AB5500_PHY_DELAY_US);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB5500_BANK_USB,
+ AB5500_USB_PHY_CTRL_REG,
+ AB5500_USB_DEVICE_ENABLE, 0);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB5500_BANK_USB,
+ AB5500_USB_PHY_CTRL_REG,
+ AB5500_USB_HOST_ENABLE,
+ AB5500_USB_HOST_ENABLE);
+
+ udelay(AB5500_PHY_DELAY_US);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB5500_BANK_USB,
+ AB5500_USB_PHY_CTRL_REG,
+ AB5500_USB_HOST_ENABLE, 0);
+
+ (void)abx500_get_register_interruptible(ab->dev,
+ AB5500_BANK_USB, AB5500_USB_LINE_STAT_REG, &usb_status);
+
+ if (ab->rev == AB5500_2_0)
+ lsts = (usb_status & AB5500_USB_LINK_STATUS_MASK_V2) >> 3;
+ else
+ lsts = (usb_status & AB5500_USB_LINK_STATUS_MASK_V1) >> 3;
+
+ switch (lsts) {
+
+ case USB_LINK_STD_HOST_NC:
+ case USB_LINK_STD_HOST_C_NS:
+ case USB_LINK_STD_HOST_C_S:
+ case USB_LINK_HOST_CHG_NM:
+ case USB_LINK_HOST_CHG_HS:
+ case USB_LINK_HOST_CHG_HS_CHIRP:
+
+ ab5500_usb_peri_phy_en(ab);
+
+ /* enable usb chip Select */
+ ret = gpio_direction_output(ab->usb_cs_gpio, val);
+ if (ret < 0) {
+ dev_err(ab->dev, "usb_cs_gpio: gpio direction failed\n");
+ gpio_free(ab->usb_cs_gpio);
+ return ret;
+ }
+ gpio_set_value(ab->usb_cs_gpio, 1);
+
+ break;
+
+ case USB_LINK_HM_IDGND:
+ case USB_LINK_HM_IDGND_V2:
+ /* enable usb chip Select */
+ ret = gpio_direction_output(ab->usb_cs_gpio, gpioval);
+ if (ret < 0) {
+ dev_err(ab->dev, "usb_cs_gpio: gpio direction failed\n");
+ gpio_free(ab->usb_cs_gpio);
+ return ret;
+ }
+ gpio_set_value(ab->usb_cs_gpio, 1);
+ ab5500_usb_host_phy_en(ab);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ab5500_usb_set_power(struct otg_transceiver *otg, unsigned mA)
+{
+ struct ab5500_usb *ab;
+
+ if (!otg)
+ return -ENODEV;
+
+ ab = xceiv_to_ab(otg);
+
+ ab->vbus_draw = mA;
+
+ atomic_notifier_call_chain(&ab->otg.notifier,
+ USB_EVENT_VBUS, &ab->vbus_draw);
+ return 0;
+}
+
+static int ab5500_usb_set_suspend(struct otg_transceiver *x, int suspend)
+{
+ /* TODO */
+ return 0;
+}
+
+static int ab5500_usb_set_host(struct otg_transceiver *otg,
+ struct usb_bus *host)
+{
+ struct ab5500_usb *ab;
+
+ if (!otg)
+ return -ENODEV;
+
+ ab = xceiv_to_ab(otg);
+
+ /* Some drivers call this function in atomic context.
+ * Do not update ab5500 registers directly till this
+ * is fixed.
+ */
+
+ if (!host) {
+ ab->otg.host = NULL;
+ schedule_work(&ab->phy_dis_work);
+ } else {
+ ab->otg.host = host;
+ }
+
+ return 0;
+}
+
+static int ab5500_usb_set_peripheral(struct otg_transceiver *otg,
+ struct usb_gadget *gadget)
+{
+ struct ab5500_usb *ab;
+
+ if (!otg)
+ return -ENODEV;
+
+ ab = xceiv_to_ab(otg);
+
+ /* Some drivers call this function in atomic context.
+ * Do not update ab5500 registers directly till this
+ * is fixed.
+ */
+
+ if (!gadget) {
+ ab->otg.gadget = NULL;
+ schedule_work(&ab->phy_dis_work);
+ } else {
+ ab->otg.gadget = gadget;
+ }
+
+ return 0;
+}
+
+static int __devinit ab5500_usb_probe(struct platform_device *pdev)
+{
+ struct ab5500_usb *ab;
+ struct abx500_usbgpio_platform_data *usb_pdata =
+ pdev->dev.platform_data;
+ int err;
+ int ret = -1;
+ ab = kzalloc(sizeof *ab, GFP_KERNEL);
+ if (!ab)
+ return -ENOMEM;
+
+ ab->dev = &pdev->dev;
+ ab->otg.dev = ab->dev;
+ ab->otg.label = "ab5500";
+ ab->otg.state = OTG_STATE_B_IDLE;
+ ab->otg.set_host = ab5500_usb_set_host;
+ ab->otg.set_peripheral = ab5500_usb_set_peripheral;
+ ab->otg.set_suspend = ab5500_usb_set_suspend;
+ ab->otg.set_power = ab5500_usb_set_power;
+ ab->usb_gpio = usb_pdata;
+ ab->mode = USB_IDLE;
+
+ platform_set_drvdata(pdev, ab);
+
+ ATOMIC_INIT_NOTIFIER_HEAD(&ab->otg.notifier);
+
+ /* v1: Wait for link status to become stable.
+ * all: Updates form set_host and set_peripheral as they are atomic.
+ */
+ INIT_DELAYED_WORK(&ab->dwork, ab5500_usb_delayed_work);
+
+ err = otg_set_transceiver(&ab->otg);
+ if (err)
+ dev_err(&pdev->dev, "Can't register transceiver\n");
+
+ ab->usb_cs_gpio = ab->usb_gpio->usb_cs;
+
+ ab->rev = abx500_get_chip_id(ab->dev);
+
+ ab->sysclk = clk_get(ab->dev, "sysclk");
+ if (IS_ERR(ab->sysclk)) {
+ ret = PTR_ERR(ab->sysclk);
+ ab->sysclk = NULL;
+ return ret;
+ }
+
+ ab->v_ape = regulator_get(ab->dev, "v-ape");
+ if (!ab->v_ape) {
+ dev_err(ab->dev, "Could not get v-ape supply\n");
+
+ return -EINVAL;
+ }
+
+ ab5500_usb_irq_setup(pdev, ab);
+
+ ret = gpio_request(ab->usb_cs_gpio, "usb-cs");
+ if (ret < 0)
+ dev_err(&pdev->dev, "usb gpio request fail\n");
+
+ /* Aquire GPIO alternate config struct for USB */
+ err = ab->usb_gpio->get(ab->dev);
+ if (err < 0)
+ goto fail1;
+
+ err = ab5500_usb_boot_detect(ab);
+ if (err < 0)
+ goto fail1;
+
+ return 0;
+
+fail1:
+ ab5500_usb_irq_free(ab);
+ kfree(ab);
+ return err;
+}
+
+static int __devexit ab5500_usb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver ab5500_usb_driver = {
+ .driver = {
+ .name = "ab5500-usb",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_usb_probe,
+ .remove = __devexit_p(ab5500_usb_remove),
+};
+
+static int __init ab5500_usb_init(void)
+{
+ return platform_driver_register(&ab5500_usb_driver);
+}
+subsys_initcall(ab5500_usb_init);
+
+static void __exit ab5500_usb_exit(void)
+{
+ platform_driver_unregister(&ab5500_usb_driver);
+}
+module_exit(ab5500_usb_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/otg/ab8500-usb.c b/drivers/usb/otg/ab8500-usb.c
index 07ccea9ada4..881729da89c 100644
--- a/drivers/usb/otg/ab8500-usb.c
+++ b/drivers/usb/otg/ab8500-usb.c
@@ -29,24 +29,55 @@
#include <linux/notifier.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/ab8500.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/io.h>
+
+#include <mach/usb.h>
#define AB8500_MAIN_WD_CTRL_REG 0x01
#define AB8500_USB_LINE_STAT_REG 0x80
#define AB8500_USB_PHY_CTRL_REG 0x8A
+#define AB8500_VBUS_CTRL_REG 0x82
+#define AB8500_IT_SOURCE2_REG 0x01
+#define AB8500_IT_SOURCE20_REG 0x13
+#define AB8500_SRC_INT_USB_HOST 0x04
+#define AB8500_SRC_INT_USB_DEVICE 0x80
#define AB8500_BIT_OTG_STAT_ID (1 << 0)
#define AB8500_BIT_PHY_CTRL_HOST_EN (1 << 0)
#define AB8500_BIT_PHY_CTRL_DEVICE_EN (1 << 1)
#define AB8500_BIT_WD_CTRL_ENABLE (1 << 0)
#define AB8500_BIT_WD_CTRL_KICK (1 << 1)
+#define AB8500_BIT_VBUS_ENABLE (1 << 0)
#define AB8500_V1x_LINK_STAT_WAIT (HZ/10)
#define AB8500_WD_KICK_DELAY_US 100 /* usec */
#define AB8500_WD_V11_DISABLE_DELAY_US 100 /* usec */
+#define AB8500_V20_31952_DISABLE_DELAY_US 100 /* usec */
#define AB8500_WD_V10_DISABLE_DELAY_MS 100 /* ms */
+/* Registers in bank 0x11 */
+#define AB8500_BANK12_ACCESS 0x00
+
+/* Registers in bank 0x12 */
+#define AB8500_USB_PHY_TUNE1 0x05
+#define AB8500_USB_PHY_TUNE2 0x06
+#define AB8500_USB_PHY_TUNE3 0x07
+
+
+#define USB_PROBE_DELAY 1000 /* 1 seconds */
+#define USB_LIMIT (200) /* If we have more than 200 irqs per second */
+
+#define PUBLIC_ID_BACKUPRAM1 (U8500_BACKUPRAM1_BASE + 0x0FC0)
+#define MAX_USB_SERIAL_NUMBER_LEN 31
+
/* Usb line status register */
enum ab8500_usb_link_status {
USB_LINK_NOT_CONFIGURED = 0,
@@ -67,6 +98,13 @@ enum ab8500_usb_link_status {
USB_LINK_NOT_VALID_LINK
};
+enum ab8500_usb_mode {
+ USB_IDLE = 0,
+ USB_PERIPHERAL,
+ USB_HOST,
+ USB_DEDICATED_CHG
+};
+
struct ab8500_usb {
struct otg_transceiver otg;
struct device *dev;
@@ -80,6 +118,14 @@ struct ab8500_usb {
struct work_struct phy_dis_work;
unsigned long link_status_wait;
int rev;
+ enum ab8500_usb_mode mode;
+ struct clk *sysclk;
+ struct regulator *v_ape;
+ struct regulator *v_musb;
+ struct regulator *v_ulpi;
+ struct abx500_usbgpio_platform_data *usb_gpio;
+ struct delayed_work work_usb_workaround;
+ struct kobject *serial_number_kobj;
};
static inline struct ab8500_usb *xceiv_to_ab(struct otg_transceiver *x)
@@ -102,10 +148,8 @@ static void ab8500_usb_wd_workaround(struct ab8500_usb *ab)
(AB8500_BIT_WD_CTRL_ENABLE
| AB8500_BIT_WD_CTRL_KICK));
- if (ab->rev > 0x10) /* v1.1 v2.0 */
+ if (ab->rev > 0x10) /* v2.0 v3.0 */
udelay(AB8500_WD_V11_DISABLE_DELAY_US);
- else /* v1.0 */
- msleep(AB8500_WD_V10_DISABLE_DELAY_MS);
abx500_set_register_interruptible(ab->dev,
AB8500_SYS_CTRL2_BLOCK,
@@ -113,46 +157,153 @@ static void ab8500_usb_wd_workaround(struct ab8500_usb *ab)
0);
}
-static void ab8500_usb_phy_ctrl(struct ab8500_usb *ab, bool sel_host,
+static void ab8500_usb_load(struct work_struct *work)
+{
+ int cpu;
+ unsigned int num_irqs = 0;
+ static unsigned int old_num_irqs = UINT_MAX;
+ struct delayed_work *work_usb_workaround = to_delayed_work(work);
+ struct ab8500_usb *ab = container_of(work_usb_workaround,
+ struct ab8500_usb, work_usb_workaround);
+
+ for_each_online_cpu(cpu)
+ num_irqs += kstat_irqs_cpu(IRQ_DB8500_USBOTG, cpu);
+
+ if ((num_irqs > old_num_irqs) &&
+ (num_irqs - old_num_irqs) > USB_LIMIT)
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "usb", 125);
+ else
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "usb", 25);
+
+ old_num_irqs = num_irqs;
+
+ schedule_delayed_work_on(0,
+ &ab->work_usb_workaround,
+ msecs_to_jiffies(USB_PROBE_DELAY));
+}
+
+static void ab8500_usb_regulator_ctrl(struct ab8500_usb *ab, bool sel_host,
bool enable)
{
- u8 ctrl_reg;
- abx500_get_register_interruptible(ab->dev,
+ int ret = 0, volt = 0;
+
+ if (enable) {
+ regulator_enable(ab->v_ape);
+ if (ab->rev >= 0x30) {
+ ret = regulator_set_voltage(ab->v_ulpi,
+ 1300000, 1350000);
+ if (ret < 0)
+ dev_err(ab->dev, "Failed to set the Vintcore"
+ " to 1.3V, ret=%d\n", ret);
+ ret = regulator_set_optimum_mode(ab->v_ulpi,
+ 28000);
+ if (ret < 0)
+ dev_err(ab->dev, "Failed to set optimum mode"
+ " (ret=%d)\n", ret);
+
+ }
+ regulator_enable(ab->v_ulpi);
+ if (ab->rev >= 0x30) {
+ volt = regulator_get_voltage(ab->v_ulpi);
+ if ((volt != 1300000) && (volt != 1350000))
+ dev_err(ab->dev, "Vintcore is not"
+ " set to 1.3V"
+ " volt=%d\n", volt);
+ }
+ regulator_enable(ab->v_musb);
+
+ } else {
+ regulator_disable(ab->v_musb);
+ regulator_disable(ab->v_ulpi);
+ regulator_disable(ab->v_ape);
+ }
+}
+
+static void ab8500_usb_phy_enable(struct ab8500_usb *ab, bool sel_host)
+{
+ u8 bit;
+ bit = sel_host ? AB8500_BIT_PHY_CTRL_HOST_EN :
+ AB8500_BIT_PHY_CTRL_DEVICE_EN;
+
+ ab->usb_gpio->enable();
+ clk_enable(ab->sysclk);
+
+ ab8500_usb_regulator_ctrl(ab, sel_host, true);
+
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
+ (char *)dev_name(ab->dev), 100);
+ if (!sel_host) {
+ schedule_delayed_work_on(0,
+ &ab->work_usb_workaround,
+ msecs_to_jiffies(USB_PROBE_DELAY));
+ }
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB,
AB8500_USB_PHY_CTRL_REG,
- &ctrl_reg);
- if (sel_host) {
- if (enable)
- ctrl_reg |= AB8500_BIT_PHY_CTRL_HOST_EN;
- else
- ctrl_reg &= ~AB8500_BIT_PHY_CTRL_HOST_EN;
- } else {
- if (enable)
- ctrl_reg |= AB8500_BIT_PHY_CTRL_DEVICE_EN;
- else
- ctrl_reg &= ~AB8500_BIT_PHY_CTRL_DEVICE_EN;
+ bit,
+ bit);
+
+}
+
+static void ab8500_usb_wd_linkstatus(struct ab8500_usb *ab,u8 bit)
+{
+ /* Wrokaround for v2.0 bug # 31952 */
+ if (ab->rev == 0x20) {
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ bit,
+ bit);
+ udelay(AB8500_V20_31952_DISABLE_DELAY_US);
}
+}
- abx500_set_register_interruptible(ab->dev,
+static void ab8500_usb_phy_disable(struct ab8500_usb *ab, bool sel_host)
+{
+ u8 bit;
+ bit = sel_host ? AB8500_BIT_PHY_CTRL_HOST_EN :
+ AB8500_BIT_PHY_CTRL_DEVICE_EN;
+
+ ab8500_usb_wd_linkstatus(ab,bit);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB,
AB8500_USB_PHY_CTRL_REG,
- ctrl_reg);
+ bit,
+ 0);
+
+ /* Needed to disable the phy.*/
+ ab8500_usb_wd_workaround(ab);
+
+ clk_disable(ab->sysclk);
+
+ ab8500_usb_regulator_ctrl(ab, sel_host, false);
- /* Needed to enable the phy.*/
- if (enable)
- ab8500_usb_wd_workaround(ab);
+ ab->usb_gpio->disable();
+
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
+ (char *)dev_name(ab->dev), 50);
+
+ if (!sel_host) {
+
+ cancel_delayed_work_sync(&ab->work_usb_workaround);
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "usb", 25);
+ }
}
-#define ab8500_usb_host_phy_en(ab) ab8500_usb_phy_ctrl(ab, true, true)
-#define ab8500_usb_host_phy_dis(ab) ab8500_usb_phy_ctrl(ab, true, false)
-#define ab8500_usb_peri_phy_en(ab) ab8500_usb_phy_ctrl(ab, false, true)
-#define ab8500_usb_peri_phy_dis(ab) ab8500_usb_phy_ctrl(ab, false, false)
+#define ab8500_usb_host_phy_en(ab) ab8500_usb_phy_enable(ab, true)
+#define ab8500_usb_host_phy_dis(ab) ab8500_usb_phy_disable(ab, true)
+#define ab8500_usb_peri_phy_en(ab) ab8500_usb_phy_enable(ab, false)
+#define ab8500_usb_peri_phy_dis(ab) ab8500_usb_phy_disable(ab, false)
static int ab8500_usb_link_status_update(struct ab8500_usb *ab)
{
u8 reg;
enum ab8500_usb_link_status lsts;
- void *v = NULL;
enum usb_xceiv_events event;
abx500_get_register_interruptible(ab->dev,
@@ -166,10 +317,12 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab)
case USB_LINK_NOT_CONFIGURED:
case USB_LINK_RESERVED:
case USB_LINK_NOT_VALID_LINK:
- /* TODO: Disable regulators. */
- ab8500_usb_host_phy_dis(ab);
- ab8500_usb_peri_phy_dis(ab);
- ab->otg.state = OTG_STATE_B_IDLE;
+ case USB_LINK_ACA_RID_B:
+ if (ab->mode == USB_HOST)
+ ab8500_usb_host_phy_dis(ab);
+ else if (ab->mode == USB_PERIPHERAL)
+ ab8500_usb_peri_phy_dis(ab);
+ ab->mode = USB_IDLE;
ab->otg.default_a = false;
ab->vbus_draw = 0;
event = USB_EVENT_NONE;
@@ -181,74 +334,90 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab)
case USB_LINK_HOST_CHG_NM:
case USB_LINK_HOST_CHG_HS:
case USB_LINK_HOST_CHG_HS_CHIRP:
- if (ab->otg.gadget) {
- /* TODO: Enable regulators. */
+ case USB_LINK_ACA_RID_C_NM:
+ case USB_LINK_ACA_RID_C_HS:
+ case USB_LINK_ACA_RID_C_HS_CHIRP:
+ if (ab->mode == USB_HOST) {
+ ab->mode = USB_PERIPHERAL;
+ ab8500_usb_host_phy_dis(ab);
+ ux500_restore_context();
+ ab8500_usb_peri_phy_en(ab);
+ }
+ if (ab->mode == USB_IDLE) {
+ ab->mode = USB_PERIPHERAL;
+ ux500_restore_context();
ab8500_usb_peri_phy_en(ab);
- v = ab->otg.gadget;
}
event = USB_EVENT_VBUS;
break;
case USB_LINK_HM_IDGND:
- if (ab->otg.host) {
- /* TODO: Enable regulators. */
+ case USB_LINK_ACA_RID_A:
+ if (ab->mode == USB_PERIPHERAL) {
+ ab->mode = USB_HOST;
+ ab8500_usb_peri_phy_dis(ab);
+ ux500_restore_context();
+ ab8500_usb_host_phy_en(ab);
+ }
+ if (ab->mode == USB_IDLE) {
+ ab->mode = USB_HOST;
+ ux500_restore_context();
ab8500_usb_host_phy_en(ab);
- v = ab->otg.host;
}
- ab->otg.state = OTG_STATE_A_IDLE;
ab->otg.default_a = true;
event = USB_EVENT_ID;
break;
- case USB_LINK_ACA_RID_A:
- case USB_LINK_ACA_RID_B:
- /* TODO */
- case USB_LINK_ACA_RID_C_NM:
- case USB_LINK_ACA_RID_C_HS:
- case USB_LINK_ACA_RID_C_HS_CHIRP:
case USB_LINK_DEDICATED_CHG:
/* TODO: vbus_draw */
+ ab->mode = USB_DEDICATED_CHG;
event = USB_EVENT_CHARGER;
break;
}
- atomic_notifier_call_chain(&ab->otg.notifier, event, v);
+ atomic_notifier_call_chain(&ab->otg.notifier, event, &ab->vbus_draw);
return 0;
}
static void ab8500_usb_delayed_work(struct work_struct *work)
{
- struct ab8500_usb *ab = container_of(work, struct ab8500_usb,
- dwork.work);
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct ab8500_usb *ab = container_of(dwork, struct ab8500_usb, dwork);
ab8500_usb_link_status_update(ab);
}
-static irqreturn_t ab8500_usb_v1x_common_irq(int irq, void *data)
-{
- struct ab8500_usb *ab = (struct ab8500_usb *) data;
-
- /* Wait for link status to become stable. */
- schedule_delayed_work(&ab->dwork, ab->link_status_wait);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ab8500_usb_v1x_vbus_fall_irq(int irq, void *data)
+static irqreturn_t ab8500_usb_disconnect_irq(int irq, void *data)
{
struct ab8500_usb *ab = (struct ab8500_usb *) data;
+ enum usb_xceiv_events event;
/* Link status will not be updated till phy is disabled. */
- ab8500_usb_peri_phy_dis(ab);
-
- /* Wait for link status to become stable. */
- schedule_delayed_work(&ab->dwork, ab->link_status_wait);
+ if (ab->mode == USB_HOST) {
+ event = USB_EVENT_NONE;
+ ab->otg.default_a = false;
+ ab->vbus_draw = 0;
+ atomic_notifier_call_chain(&ab->otg.notifier,
+ event, &ab->vbus_draw);
+ ab8500_usb_host_phy_dis(ab);
+ }
+ else if (ab->mode == USB_PERIPHERAL)
+ ab8500_usb_peri_phy_dis(ab);
+ else if (ab->mode == USB_DEDICATED_CHG && ab->rev == 0x20) {
+ ab8500_usb_wd_linkstatus(ab,AB8500_BIT_PHY_CTRL_DEVICE_EN);
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_DEVICE_EN,
+ 0);
+ }
+ ab->mode = USB_IDLE;
return IRQ_HANDLED;
}
-static irqreturn_t ab8500_usb_v20_irq(int irq, void *data)
+static irqreturn_t ab8500_usb_v20_link_status_irq(int irq, void *data)
{
struct ab8500_usb *ab = (struct ab8500_usb *) data;
@@ -267,6 +436,19 @@ static void ab8500_usb_phy_disable_work(struct work_struct *work)
if (!ab->otg.gadget)
ab8500_usb_peri_phy_dis(ab);
+
+}
+
+static unsigned ab8500_eyediagram_workaroud(struct ab8500_usb *ab, unsigned mA)
+{
+ /* AB V2 has eye diagram issues when drawing more
+ * than 100mA from VBUS.So setting charging current
+ * to 100mA in case of standard host
+ */
+ if ((ab->rev < 0x30) && (mA > 100))
+ mA = 100;
+
+ return mA;
}
static int ab8500_usb_set_power(struct otg_transceiver *otg, unsigned mA)
@@ -278,18 +460,15 @@ static int ab8500_usb_set_power(struct otg_transceiver *otg, unsigned mA)
ab = xceiv_to_ab(otg);
+ mA = ab8500_eyediagram_workaroud(ab, mA);
+
ab->vbus_draw = mA;
- if (mA)
- atomic_notifier_call_chain(&ab->otg.notifier,
- USB_EVENT_ENUMERATED, ab->otg.gadget);
+ atomic_notifier_call_chain(&ab->otg.notifier,
+ USB_EVENT_VBUS, &ab->vbus_draw);
return 0;
}
-/* TODO: Implement some way for charging or other drivers to read
- * ab->vbus_draw.
- */
-
static int ab8500_usb_set_suspend(struct otg_transceiver *x, int suspend)
{
/* TODO */
@@ -306,25 +485,13 @@ static int ab8500_usb_set_peripheral(struct otg_transceiver *otg,
ab = xceiv_to_ab(otg);
+ ab->otg.gadget = gadget;
/* Some drivers call this function in atomic context.
* Do not update ab8500 registers directly till this
* is fixed.
*/
-
- if (!gadget) {
- /* TODO: Disable regulators. */
- ab->otg.gadget = NULL;
+ if (!gadget)
schedule_work(&ab->phy_dis_work);
- } else {
- ab->otg.gadget = gadget;
- ab->otg.state = OTG_STATE_B_IDLE;
-
- /* Phy will not be enabled if cable is already
- * plugged-in. Schedule to enable phy.
- * Use same delay to avoid any race condition.
- */
- schedule_delayed_work(&ab->dwork, ab->link_status_wait);
- }
return 0;
}
@@ -339,148 +506,246 @@ static int ab8500_usb_set_host(struct otg_transceiver *otg,
ab = xceiv_to_ab(otg);
+ ab->otg.host = host;
+
/* Some drivers call this function in atomic context.
* Do not update ab8500 registers directly till this
* is fixed.
*/
-
- if (!host) {
- /* TODO: Disable regulators. */
- ab->otg.host = NULL;
+ if (!host)
schedule_work(&ab->phy_dis_work);
- } else {
- ab->otg.host = host;
- /* Phy will not be enabled if cable is already
- * plugged-in. Schedule to enable phy.
- * Use same delay to avoid any race condition.
- */
- schedule_delayed_work(&ab->dwork, ab->link_status_wait);
+
+ return 0;
+}
+/**
+ * ab8500_usb_boot_detect : detect the USB cable during boot time.
+ * @device: value for device.
+ *
+ * This function is used to detect the USB cable during boot time.
+ */
+static int ab8500_usb_boot_detect(struct ab8500_usb *ab)
+{
+ /* Disabling PHY before selective enable or disable */
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_DEVICE_EN,
+ AB8500_BIT_PHY_CTRL_DEVICE_EN);
+
+ udelay(100);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_DEVICE_EN,
+ 0);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_HOST_EN,
+ AB8500_BIT_PHY_CTRL_HOST_EN);
+
+ udelay(100);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_HOST_EN,
+ 0);
+
+ ab8500_usb_link_status_update(ab);
+
+ return 0;
+}
+
+static void ab8500_usb_regulator_put(struct ab8500_usb *ab)
+{
+
+ if (ab->v_ape)
+ regulator_put(ab->v_ape);
+
+ if (ab->v_ulpi)
+ regulator_put(ab->v_ulpi);
+
+ if (ab->v_musb)
+ regulator_put(ab->v_musb);
+}
+
+static int ab8500_usb_regulator_get(struct ab8500_usb *ab)
+{
+ int err;
+
+ ab->v_ape = regulator_get(ab->dev, "v-ape");
+ if (IS_ERR(ab->v_ape)) {
+ dev_err(ab->dev, "Could not get v-ape supply\n");
+ err = PTR_ERR(ab->v_ape);
+ goto reg_error;
+ }
+
+ ab->v_ulpi = regulator_get(ab->dev, "vddulpivio18");
+ if (IS_ERR(ab->v_ulpi)) {
+ dev_err(ab->dev, "Could not get vddulpivio18 supply\n");
+ err = PTR_ERR(ab->v_ulpi);
+ goto reg_error;
+ }
+
+ ab->v_musb = regulator_get(ab->dev, "musb_1v8");
+ if (IS_ERR(ab->v_musb)) {
+ dev_err(ab->dev, "Could not get musb_1v8 supply\n");
+ err = PTR_ERR(ab->v_musb);
+ goto reg_error;
}
return 0;
+
+reg_error:
+ ab8500_usb_regulator_put(ab);
+ return err;
}
static void ab8500_usb_irq_free(struct ab8500_usb *ab)
{
- if (ab->rev < 0x20) {
+ if (ab->irq_num_id_rise)
free_irq(ab->irq_num_id_rise, ab);
+
+ if (ab->irq_num_id_fall)
free_irq(ab->irq_num_id_fall, ab);
+
+ if (ab->irq_num_vbus_rise)
free_irq(ab->irq_num_vbus_rise, ab);
+
+ if (ab->irq_num_vbus_fall)
free_irq(ab->irq_num_vbus_fall, ab);
- } else {
+
+ if (ab->irq_num_link_status)
free_irq(ab->irq_num_link_status, ab);
- }
}
-static int ab8500_usb_v1x_res_setup(struct platform_device *pdev,
+static int ab8500_usb_irq_setup(struct platform_device *pdev,
struct ab8500_usb *ab)
{
int err;
+ int irq;
+
+ if (ab->rev > 0x10) { /* 0x20 0x30 */
+ irq = platform_get_irq_byname(pdev, "USB_LINK_STATUS");
+ if (irq < 0) {
+ err = irq;
+ dev_err(&pdev->dev, "Link status irq not found\n");
+ goto irq_fail;
+ }
- ab->irq_num_id_rise = platform_get_irq_byname(pdev, "ID_WAKEUP_R");
- if (ab->irq_num_id_rise < 0) {
- dev_err(&pdev->dev, "ID rise irq not found\n");
- return ab->irq_num_id_rise;
- }
- err = request_threaded_irq(ab->irq_num_id_rise, NULL,
- ab8500_usb_v1x_common_irq,
- IRQF_NO_SUSPEND | IRQF_SHARED,
- "usb-id-rise", ab);
- if (err < 0) {
- dev_err(ab->dev, "request_irq failed for ID rise irq\n");
- goto fail0;
+ err = request_threaded_irq(irq, NULL,
+ ab8500_usb_v20_link_status_irq,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "usb-link-status", ab);
+ if (err < 0) {
+ dev_err(ab->dev,
+ "request_irq failed for link status irq\n");
+ return err;
+ }
+ ab->irq_num_link_status = irq;
}
- ab->irq_num_id_fall = platform_get_irq_byname(pdev, "ID_WAKEUP_F");
- if (ab->irq_num_id_fall < 0) {
+ irq = platform_get_irq_byname(pdev, "ID_WAKEUP_F");
+ if (irq < 0) {
+ err = irq;
dev_err(&pdev->dev, "ID fall irq not found\n");
return ab->irq_num_id_fall;
}
- err = request_threaded_irq(ab->irq_num_id_fall, NULL,
- ab8500_usb_v1x_common_irq,
+ err = request_threaded_irq(irq, NULL,
+ ab8500_usb_disconnect_irq,
IRQF_NO_SUSPEND | IRQF_SHARED,
"usb-id-fall", ab);
if (err < 0) {
dev_err(ab->dev, "request_irq failed for ID fall irq\n");
- goto fail1;
+ goto irq_fail;
}
+ ab->irq_num_id_fall = irq;
- ab->irq_num_vbus_rise = platform_get_irq_byname(pdev, "VBUS_DET_R");
- if (ab->irq_num_vbus_rise < 0) {
- dev_err(&pdev->dev, "VBUS rise irq not found\n");
- return ab->irq_num_vbus_rise;
- }
- err = request_threaded_irq(ab->irq_num_vbus_rise, NULL,
- ab8500_usb_v1x_common_irq,
- IRQF_NO_SUSPEND | IRQF_SHARED,
- "usb-vbus-rise", ab);
- if (err < 0) {
- dev_err(ab->dev, "request_irq failed for Vbus rise irq\n");
- goto fail2;
- }
-
- ab->irq_num_vbus_fall = platform_get_irq_byname(pdev, "VBUS_DET_F");
- if (ab->irq_num_vbus_fall < 0) {
+ irq = platform_get_irq_byname(pdev, "VBUS_DET_F");
+ if (irq < 0) {
+ err = irq;
dev_err(&pdev->dev, "VBUS fall irq not found\n");
- return ab->irq_num_vbus_fall;
+ goto irq_fail;
}
- err = request_threaded_irq(ab->irq_num_vbus_fall, NULL,
- ab8500_usb_v1x_vbus_fall_irq,
+ err = request_threaded_irq(irq, NULL,
+ ab8500_usb_disconnect_irq,
IRQF_NO_SUSPEND | IRQF_SHARED,
"usb-vbus-fall", ab);
if (err < 0) {
dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
- goto fail3;
+ goto irq_fail;
}
+ ab->irq_num_vbus_fall = irq;
return 0;
-fail3:
- free_irq(ab->irq_num_vbus_rise, ab);
-fail2:
- free_irq(ab->irq_num_id_fall, ab);
-fail1:
- free_irq(ab->irq_num_id_rise, ab);
-fail0:
+
+irq_fail:
+ ab8500_usb_irq_free(ab);
return err;
}
-static int ab8500_usb_v2_res_setup(struct platform_device *pdev,
- struct ab8500_usb *ab)
+/* Sys interfaces */
+static ssize_t usb_serial_number
+ (struct kobject *kobj, struct attribute *attr, char *buf)
{
- int err;
+ u32 bufer[5];
+ void __iomem *backup_ram = NULL;
- ab->irq_num_link_status = platform_get_irq_byname(pdev,
- "USB_LINK_STATUS");
- if (ab->irq_num_link_status < 0) {
- dev_err(&pdev->dev, "Link status irq not found\n");
- return ab->irq_num_link_status;
- }
+ backup_ram = ioremap(PUBLIC_ID_BACKUPRAM1, 0x14);
- err = request_threaded_irq(ab->irq_num_link_status, NULL,
- ab8500_usb_v20_irq,
- IRQF_NO_SUSPEND | IRQF_SHARED,
- "usb-link-status", ab);
- if (err < 0) {
- dev_err(ab->dev,
- "request_irq failed for link status irq\n");
- return err;
- }
+ if (backup_ram) {
+ bufer[0] = readl(backup_ram);
+ bufer[1] = readl(backup_ram + 4);
+ bufer[2] = readl(backup_ram + 8);
+ bufer[3] = readl(backup_ram + 0x0c);
+ bufer[4] = readl(backup_ram + 0x10);
- return 0;
+ snprintf(buf, MAX_USB_SERIAL_NUMBER_LEN+1,
+ "%.8X%.8X%.8X%.8X%.8X",
+ bufer[0], bufer[1], bufer[2], bufer[3], bufer[4]);
+
+ iounmap(backup_ram);
+ } else
+ printk(KERN_ERR "$$ ioremap failed\n");
+
+ return strlen(buf);
}
+static struct attribute usb_serial_number_attribute = \
+ {.name = "serial_number", .mode = S_IRUGO};
+
+static struct attribute *serial_number[] = {
+ &usb_serial_number_attribute,
+ NULL
+};
+
+const struct sysfs_ops usb_sysfs_ops = {
+ .show = usb_serial_number,
+};
+
+static struct kobj_type ktype_serial_number = {
+ .sysfs_ops = &usb_sysfs_ops,
+ .default_attrs = serial_number,
+};
+
static int __devinit ab8500_usb_probe(struct platform_device *pdev)
{
struct ab8500_usb *ab;
+ struct ab8500_platform_data *ab8500_pdata =
+ dev_get_platdata(pdev->dev.parent);
int err;
int rev;
+ int ret = -1;
rev = abx500_get_chip_id(&pdev->dev);
if (rev < 0) {
dev_err(&pdev->dev, "Chip id read failed\n");
return rev;
- } else if (rev < 0x10) {
- dev_err(&pdev->dev, "Unsupported AB8500 chip\n");
+ } else if (rev < 0x20) {
+ dev_err(&pdev->dev, "Unsupported AB8500 chip rev=%d\n", rev);
return -ENODEV;
}
@@ -492,11 +757,12 @@ static int __devinit ab8500_usb_probe(struct platform_device *pdev)
ab->rev = rev;
ab->otg.dev = ab->dev;
ab->otg.label = "ab8500";
- ab->otg.state = OTG_STATE_UNDEFINED;
+ ab->otg.state = OTG_STATE_B_IDLE;
ab->otg.set_host = ab8500_usb_set_host;
ab->otg.set_peripheral = ab8500_usb_set_peripheral;
ab->otg.set_suspend = ab8500_usb_set_suspend;
ab->otg.set_power = ab8500_usb_set_power;
+ ab->usb_gpio = ab8500_pdata->usb;
platform_set_drvdata(pdev, ab);
@@ -510,27 +776,114 @@ static int __devinit ab8500_usb_probe(struct platform_device *pdev)
/* all: Disable phy when called from set_host and set_peripheral */
INIT_WORK(&ab->phy_dis_work, ab8500_usb_phy_disable_work);
- if (ab->rev < 0x20) {
- err = ab8500_usb_v1x_res_setup(pdev, ab);
- ab->link_status_wait = AB8500_V1x_LINK_STAT_WAIT;
- } else {
- err = ab8500_usb_v2_res_setup(pdev, ab);
+ INIT_DELAYED_WORK_DEFERRABLE(&ab->work_usb_workaround,
+ ab8500_usb_load);
+ err = ab8500_usb_regulator_get(ab);
+ if (err)
+ goto fail0;
+
+ ab->sysclk = clk_get(ab->dev, "sysclk");
+ if (IS_ERR(ab->sysclk)) {
+ err = PTR_ERR(ab->sysclk);
+ goto fail1;
}
+ err = ab8500_usb_irq_setup(pdev, ab);
if (err < 0)
- goto fail0;
+ goto fail2;
err = otg_set_transceiver(&ab->otg);
if (err) {
dev_err(&pdev->dev, "Can't register transceiver\n");
- goto fail1;
+ goto fail3;
}
- dev_info(&pdev->dev, "AB8500 usb driver initialized\n");
+ /* Write Phy tuning values */
+ if (ab->rev >= 0x30) {
+ /* Enable the PBT/Bank 0x12 access */
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEVELOPMENT,
+ AB8500_BANK12_ACCESS,
+ 0x01);
+ if (ret < 0)
+ printk(KERN_ERR "Failed to enable bank12"
+ " access ret=%d\n", ret);
+
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEBUG,
+ AB8500_USB_PHY_TUNE1,
+ 0xC8);
+ if (ret < 0)
+ printk(KERN_ERR "Failed to set PHY_TUNE1"
+ " register ret=%d\n", ret);
+
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEBUG,
+ AB8500_USB_PHY_TUNE2,
+ 0x00);
+ if (ret < 0)
+ printk(KERN_ERR "Failed to set PHY_TUNE2"
+ " register ret=%d\n", ret);
+
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEBUG,
+ AB8500_USB_PHY_TUNE3,
+ 0x78);
+
+ if (ret < 0)
+ printk(KERN_ERR "Failed to set PHY_TUNE3"
+ " regester ret=%d\n", ret);
+
+ /* Switch to normal mode/disable Bank 0x12 access */
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEVELOPMENT,
+ AB8500_BANK12_ACCESS,
+ 0x00);
+
+ if (ret < 0)
+ printk(KERN_ERR "Failed to switch bank12"
+ " access ret=%d\n", ret);
+ }
+ /* Needed to enable ID detection. */
+ ab8500_usb_wd_workaround(ab);
+
+ ab->serial_number_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+
+ if (ab->serial_number_kobj == NULL)
+ ret = -ENOMEM;
+ ab->serial_number_kobj->ktype = &ktype_serial_number;
+ kobject_init(ab->serial_number_kobj, ab->serial_number_kobj->ktype);
+
+ ret = kobject_set_name(ab->serial_number_kobj, "usb_serial_number");
+ if (ret)
+ kfree(ab->serial_number_kobj);
+
+ ret = kobject_add(ab->serial_number_kobj, NULL, "usb_serial_number");
+ if (ret)
+ kfree(ab->serial_number_kobj);
+
+
+ err = ab->usb_gpio->get(ab->dev);
+ if (err < 0)
+ goto fail3;
+
+ prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
+ (char *)dev_name(ab->dev), 50);
+ dev_info(&pdev->dev, "revision 0x%2x driver initialized\n", ab->rev);
+
+ prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "usb", 25);
+
+ err = ab8500_usb_boot_detect(ab);
+ if (err < 0)
+ goto fail3;
return 0;
-fail1:
+fail3:
ab8500_usb_irq_free(ab);
+fail2:
+ clk_put(ab->sysclk);
+fail1:
+ ab8500_usb_regulator_put(ab);
fail0:
kfree(ab);
return err;
@@ -548,8 +901,16 @@ static int __devexit ab8500_usb_remove(struct platform_device *pdev)
otg_set_transceiver(NULL);
- ab8500_usb_host_phy_dis(ab);
- ab8500_usb_peri_phy_dis(ab);
+ if (ab->mode == USB_HOST)
+ ab8500_usb_host_phy_dis(ab);
+ else if (ab->mode == USB_PERIPHERAL)
+ ab8500_usb_peri_phy_dis(ab);
+
+ clk_put(ab->sysclk);
+
+ ab8500_usb_regulator_put(ab);
+
+ ab->usb_gpio->put();
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d83e967e4e1..a0b88654c57 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -23,6 +23,8 @@ source "drivers/gpu/drm/Kconfig"
source "drivers/gpu/stub/Kconfig"
+source "drivers/gpu/mali/Kconfig"
+
config VGASTATE
tristate
default n
@@ -286,6 +288,8 @@ config FB_CIRRUS
Say N unless you have such a graphics board or plan to get one
before you next recompile the kernel.
+source "drivers/video/mcde/Kconfig"
+
config FB_PM2
tristate "Permedia2 support"
depends on FB && ((AMIGA && BROKEN) || PCI)
@@ -2414,6 +2418,8 @@ source "drivers/video/omap2/Kconfig"
source "drivers/video/backlight/Kconfig"
source "drivers/video/display/Kconfig"
+source "drivers/video/av8100/Kconfig"
+source "drivers/video/b2r2/Kconfig"
if VT
source "drivers/video/console/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 9b9d8fff773..221b4379118 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -136,6 +136,9 @@ obj-$(CONFIG_FB_SH_MOBILE_MERAM) += sh_mobile_meram.o
obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
obj-$(CONFIG_FB_OMAP) += omap/
obj-y += omap2/
+obj-$(CONFIG_FB_MCDE) += mcde/
+obj-$(CONFIG_AV8100) += av8100/
+obj-y += b2r2/
obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
obj-$(CONFIG_FB_CARMINE) += carminefb.o
obj-$(CONFIG_FB_MB862XX) += mb862xx/
diff --git a/drivers/video/av8100/Kconfig b/drivers/video/av8100/Kconfig
new file mode 100644
index 00000000000..40b9943aaa9
--- /dev/null
+++ b/drivers/video/av8100/Kconfig
@@ -0,0 +1,48 @@
+config AV8100
+ tristate "AV8100 driver support(HDMI/CVBS)"
+ default n
+ help
+ Please enable this feature if hdmi/tvout driver support is required.
+
+config HDMI_AV8100_DEBUG
+ bool "HDMI and AV8100 debug messages"
+ default n
+ depends on AV8100
+ ---help---
+ Say Y here if you want the HDMI and AV8100 driver to
+ output debug messages.
+
+choice
+ prompt "AV8100 HW trig method"
+ default AV8100_HWTRIG_DSI_TE
+
+config AV8100_HWTRIG_INT
+ bool "AV8100 HW trig on INT"
+ depends on AV8100
+ ---help---
+ If you say Y here AV8100 will use HW triggering
+ from AV8100 INT to MCDE sync0.
+
+config AV8100_HWTRIG_I2SDAT3
+ bool "AV8100 HW trig on I2SDAT3"
+ depends on AV8100
+ ---help---
+ If you say Y here AV8100 will use HW triggering
+ from AV8100 I2SDAT3 to MCDE sync1.
+
+config AV8100_HWTRIG_DSI_TE
+ bool "AV8100 HW trig on DSI"
+ depends on AV8100
+ ---help---
+ If you say Y here AV8100 will use HW triggering
+ using DSI TE polling between AV8100 and MCDE.
+
+config AV8100_HWTRIG_NONE
+ bool "AV8100 SW trig"
+ depends on AV8100
+ ---help---
+ If you say Y here AV8100 will use SW triggering
+ between AV8100 and MCDE.
+
+endchoice
+
diff --git a/drivers/video/av8100/Makefile b/drivers/video/av8100/Makefile
new file mode 100644
index 00000000000..2d3028b18ca
--- /dev/null
+++ b/drivers/video/av8100/Makefile
@@ -0,0 +1,10 @@
+# Make file for compiling and loadable module HDMI
+
+obj-$(CONFIG_AV8100) += av8100.o hdmi.o
+
+ifdef CONFIG_HDMI_AV8100_DEBUG
+EXTRA_CFLAGS += -DDEBUG
+endif
+
+clean-files := av8100.o hdmi.o built-in.o modules.order
+
diff --git a/drivers/video/av8100/av8100.c b/drivers/video/av8100/av8100.c
new file mode 100644
index 00000000000..36f86512a38
--- /dev/null
+++ b/drivers/video/av8100/av8100.c
@@ -0,0 +1,4162 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * AV8100 driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/fs.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/timer.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include "av8100_regs.h"
+#include <video/av8100.h>
+#include <video/hdmi.h>
+#include <linux/firmware.h>
+
+#define AV8100_FW_FILENAME "av8100.fw"
+#define CUT_STR_0 "2.1"
+#define CUT_STR_1 "2.2"
+#define CUT_STR_3 "2.3"
+#define CUT_STR_30 "3.0"
+#define CUT_STR_UNKNOWN ""
+#define AV8100_DEVNR_DEFAULT 0
+
+/* Interrupts */
+#define AV8100_INT_EVENT 0x1
+#define AV8100_PLUGSTARTUP_EVENT 0x4
+
+#define AV8100_PLUGSTARTUP_TIME 100
+
+/* Standby search time */
+#define AV8100_ON_TIME 1 /* 9 ms step */
+#define AV8100_DENC_OFF_TIME 3 /* 275 ms step if > V1. Not used if V1 */
+#define AV8100_HDMI_OFF_TIME 2 /* 140 ms step if V2. 80 ms step if V1 */
+
+/* Command offsets */
+#define AV8100_COMMAND_OFFSET 0x10
+#define AV8100_CUTVER_OFFSET 0x11
+#define AV8100_COMMAND_MAX_LENGTH 0x81
+#define AV8100_CMD_BUF_OFFSET (AV8100_COMMAND_OFFSET + 1)
+#define AV8100_2ND_RET_BYTE_OFFSET (AV8100_COMMAND_OFFSET + 1)
+#define AV8100_CEC_RET_BUF_OFFSET (AV8100_COMMAND_OFFSET + 4)
+#define AV8100_HDCP_RET_BUF_OFFSET (AV8100_COMMAND_OFFSET + 2)
+#define AV8100_EDID_RET_BUF_OFFSET (AV8100_COMMAND_OFFSET + 1)
+#define AV8100_FUSE_CRC_OFFSET (AV8100_COMMAND_OFFSET + 2)
+#define AV8100_FUSE_PRGD_OFFSET (AV8100_COMMAND_OFFSET + 3)
+#define AV8100_CRC32_OFFSET (AV8100_COMMAND_OFFSET + 2)
+#define AV8100_CEC_ADDR_OFFSET (AV8100_COMMAND_OFFSET + 3)
+
+/* Tearing effect line numbers */
+#define AV8100_TE_LINE_NB_14 14
+#define AV8100_TE_LINE_NB_17 17
+#define AV8100_TE_LINE_NB_18 18
+#define AV8100_TE_LINE_NB_21 21
+#define AV8100_TE_LINE_NB_22 22
+#define AV8100_TE_LINE_NB_24 24
+#define AV8100_TE_LINE_NB_25 25
+#define AV8100_TE_LINE_NB_26 26
+#define AV8100_TE_LINE_NB_29 29
+#define AV8100_TE_LINE_NB_30 30
+#define AV8100_TE_LINE_NB_32 32
+#define AV8100_TE_LINE_NB_38 38
+#define AV8100_TE_LINE_NB_40 40
+#define AV8100_UI_X4_DEFAULT 6
+
+#define HDMI_REQUEST_FOR_REVOCATION_LIST_INPUT 2
+#define HDMI_CEC_MESSAGE_WRITE_BUFFER_SIZE 16
+#define HDMI_HDCP_SEND_KEY_SIZE 7
+#define HDMI_INFOFRAME_DATA_SIZE 28
+#define HDMI_FUSE_AES_KEY_SIZE 16
+#define HDMI_FUSE_AES_KEY_RET_SIZE 2
+#define HDMI_LOADAES_END_BLK_NR 145
+#define HDMI_CRC32_SIZE 4
+#define HDMI_HDCP_MGMT_BKSV_SIZE 5
+#define HDMI_HDCP_MGMT_SHA_SIZE 20
+#define HDMI_HDCP_MGMT_MAX_DEVICES_SIZE 20
+#define HDMI_HDCP_MGMT_DEVICE_MASK 0x7F
+#define HDMI_EDIDREAD_SIZE 0x7F
+
+#define HPDS_INVALID 0xF
+#define CPDS_INVALID 0xF
+#define CECRX_INVALID 0xF
+
+#define REG_16_8_LSB(p) ((u8)(p & 0xFF))
+#define REG_16_8_MSB(p) ((u8)((p & 0xFF00)>>8))
+#define REG_32_8_MSB(p) ((u8)((p & 0xFF000000)>>24))
+#define REG_32_8_MMSB(p) ((u8)((p & 0x00FF0000)>>16))
+#define REG_32_8_MLSB(p) ((u8)((p & 0x0000FF00)>>8))
+#define REG_32_8_LSB(p) ((u8)(p & 0x000000FF))
+#define REG_10_8_MSB(p) ((u8)((p & 0x300)>>8))
+#define REG_12_8_MSB(p) ((u8)((p & 0xf00)>>8))
+
+#define AV8100_WAITTIME_1MS 1
+#define AV8100_WAITTIME_5MS 5
+#define AV8100_WAITTIME_10MS 10
+#define AV8100_WAITTIME_50MS 50
+#define AV8100_WATTIME_100US 100
+
+static DEFINE_MUTEX(av8100_hw_mutex);
+#define LOCK_AV8100_HW mutex_lock(&av8100_hw_mutex)
+#define UNLOCK_AV8100_HW mutex_unlock(&av8100_hw_mutex)
+static DEFINE_MUTEX(av8100_fwdl_mutex);
+#define LOCK_AV8100_FWDL mutex_lock(&av8100_fwdl_mutex)
+#define UNLOCK_AV8100_FWDL mutex_unlock(&av8100_fwdl_mutex)
+
+struct color_conversion_cmd {
+ unsigned short c0;
+ unsigned short c1;
+ unsigned short c2;
+ unsigned short c3;
+ unsigned short c4;
+ unsigned short c5;
+ unsigned short c6;
+ unsigned short c7;
+ unsigned short c8;
+ unsigned short aoffset;
+ unsigned short boffset;
+ unsigned short coffset;
+ unsigned char lmax;
+ unsigned char lmin;
+ unsigned char cmax;
+ unsigned char cmin;
+};
+
+struct av8100_config {
+ struct i2c_client *client;
+ struct i2c_device_id *id;
+ struct av8100_video_input_format_cmd hdmi_video_input_cmd;
+ struct av8100_audio_input_format_cmd hdmi_audio_input_cmd;
+ struct av8100_video_output_format_cmd hdmi_video_output_cmd;
+ struct av8100_video_scaling_format_cmd hdmi_video_scaling_cmd;
+ enum av8100_color_transform color_transform;
+ struct av8100_cec_message_write_format_cmd
+ hdmi_cec_message_write_cmd;
+ struct av8100_cec_message_read_back_format_cmd
+ hdmi_cec_message_read_back_cmd;
+ struct av8100_denc_format_cmd hdmi_denc_cmd;
+ struct av8100_hdmi_cmd hdmi_cmd;
+ struct av8100_hdcp_send_key_format_cmd hdmi_hdcp_send_key_cmd;
+ struct av8100_hdcp_management_format_cmd
+ hdmi_hdcp_management_format_cmd;
+ struct av8100_infoframes_format_cmd hdmi_infoframes_cmd;
+ struct av8100_edid_section_readback_format_cmd
+ hdmi_edid_section_readback_cmd;
+ struct av8100_pattern_generator_format_cmd hdmi_pattern_generator_cmd;
+ struct av8100_fuse_aes_key_format_cmd hdmi_fuse_aes_key_cmd;
+};
+
+enum av8100_plug_state {
+ AV8100_UNPLUGGED,
+ AV8100_PLUGGED_STARTUP,
+ AV8100_PLUGGED
+};
+
+struct av8100_params {
+ int denc_off_time;/* 5 volt time */
+ int hdmi_off_time;/* 5 volt time */
+ int on_time;/* 5 volt time */
+ u8 hpdm;/*stby_int_mask*/
+ u8 cpdm;/*stby_int_mask*/
+ u8 cecm;/*gen_int_mask*/
+ u8 hdcpm;/*gen_int_mask*/
+ u8 uovbm;/*gen_int_mask*/
+ void (*hdmi_ev_cb)(enum av8100_hdmi_event);
+ enum av8100_plug_state plug_state;
+ struct clk *inputclk;
+ bool inputclk_requested;
+ bool opp_requested;
+ struct regulator *regulator_pwr;
+ bool regulator_requested;
+ bool pre_suspend_power;
+ bool ints_enabled;
+};
+
+/**
+ * struct av8100_cea - CEA(consumer electronic access) standard structure
+ * @cea_id:
+ * @cea_nb:
+ * @vtotale:
+ **/
+
+struct av8100_cea {
+ char cea_id[40];
+ int cea_nb;
+ int vtotale;
+ int vactive;
+ int vsbp;
+ int vslen;
+ int vsfp;
+ char vpol[5];
+ int htotale;
+ int hactive;
+ int hbp;
+ int hslen;
+ int hfp;
+ int frequence;
+ char hpol[5];
+ int reg_line_duration;
+ int blkoel_duration;
+ int uix4;
+ int pll_mult;
+ int pll_div;
+};
+
+enum av8100_command_size {
+ AV8100_COMMAND_VIDEO_INPUT_FORMAT_SIZE = 0x17,
+ AV8100_COMMAND_AUDIO_INPUT_FORMAT_SIZE = 0x8,
+ AV8100_COMMAND_VIDEO_OUTPUT_FORMAT_SIZE = 0x1E,
+ AV8100_COMMAND_VIDEO_SCALING_FORMAT_SIZE = 0x11,
+ AV8100_COMMAND_COLORSPACECONVERSION_SIZE = 0x1D,
+ AV8100_COMMAND_CEC_MESSAGE_WRITE_SIZE = 0x12,
+ AV8100_COMMAND_CEC_MESSAGE_READ_BACK_SIZE = 0x1,
+ AV8100_COMMAND_DENC_SIZE = 0x6,
+ AV8100_COMMAND_HDMI_SIZE = 0x4,
+ AV8100_COMMAND_HDCP_SENDKEY_SIZE = 0xA,
+ AV8100_COMMAND_HDCP_MANAGEMENT_SIZE = 0x3,
+ AV8100_COMMAND_INFOFRAMES_SIZE = 0x21,
+ AV8100_COMMAND_EDID_SECTION_READBACK_SIZE = 0x3,
+ AV8100_COMMAND_PATTERNGENERATOR_SIZE = 0x4,
+ AV8100_COMMAND_FUSE_AES_KEY_SIZE = 0x12,
+ AV8100_COMMAND_FUSE_AES_CHK_SIZE = 0x2,
+};
+
+struct av8100_device {
+ struct list_head list;
+ struct miscdevice miscdev;
+ struct device *dev;
+ struct av8100_config config;
+ struct av8100_status status;
+ struct timer_list timer;
+ wait_queue_head_t event;
+ int flag;
+ struct av8100_params params;
+ u8 chip_version;
+};
+
+static const unsigned int waittime_retry[10] = {
+ 1, 2, 4, 6, 8, 10, 10, 10, 10, 10};
+
+static int av8100_5V_w(u8 denc_off, u8 hdmi_off, u8 on);
+static void clr_plug_status(struct av8100_device *adev,
+ enum av8100_plugin_status status);
+static void set_plug_status(struct av8100_device *adev,
+ enum av8100_plugin_status status);
+static void cec_rx(struct av8100_device *adev);
+static void cec_tx(struct av8100_device *adev);
+static void cec_txerr(struct av8100_device *adev);
+static void hdcp_changed(struct av8100_device *adev);
+static const struct color_conversion_cmd *get_color_transform_cmd(
+ struct av8100_device *adev,
+ enum av8100_color_transform transform);
+static int av8100_open(struct inode *inode, struct file *filp);
+static int av8100_release(struct inode *inode, struct file *filp);
+static long av8100_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+static int __devinit av8100_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id);
+static int __devexit av8100_remove(struct i2c_client *i2c_client);
+
+static const struct file_operations av8100_fops = {
+ .owner = THIS_MODULE,
+ .open = av8100_open,
+ .release = av8100_release,
+ .unlocked_ioctl = av8100_ioctl
+};
+
+/* List of devices */
+static LIST_HEAD(av8100_device_list);
+
+static const struct av8100_cea av8100_all_cea[29] = {
+/* cea id
+ * cea_nr vtot vact vsbpp vslen
+ * vsfp vpol htot hact hbp hslen hfp freq
+ * hpol rld bd uix4 pm pd */
+{ "0 CUSTOM ",
+ 0, 0, 0, 0, 0,
+ 0, "-", 800, 640, 16, 96, 10, 25200000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be defined*/
+{ "1 CEA 1 VESA 4 640x480p @ 60 Hz ",
+ 1, 525, 480, 33, 2,
+ 10, "-", 800, 640, 49, 290, 146, 25200000,
+ "-", 2438, 1270, 6, 32, 1},/*RGB888*/
+{ "2 CEA 2 - 3 720x480p @ 60 Hz 4:3 ",
+ 2, 525, 480, 30, 6,
+ 9, "-", 858, 720, 34, 130, 128, 27027000,
+ "-", 1828, 0x3C0, 8, 24, 1},/*RGB565*/
+{ "3 CEA 4 1280x720p @ 60 Hz ",
+ 4, 750, 720, 20, 5,
+ 5, "+", 1650, 1280, 114, 39, 228, 74250000,
+ "+", 1706, 164, 6, 32, 1},/*RGB565*/
+{ "4 CEA 5 1920x1080i @ 60 Hz ",
+ 5, 1125, 540, 20, 5,
+ 0, "+", 2200, 1920, 88, 44, 10, 74250000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "5 CEA 6-7 480i (NTSC) ",
+ 6, 525, 240, 44, 5,
+ 0, "-", 858, 720, 12, 64, 10, 13513513,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "6 CEA 14-15 480p @ 60 Hz ",
+ 14, 525, 480, 44, 5,
+ 0, "-", 858, 720, 12, 64, 10, 27027000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "7 CEA 16 1920x1080p @ 60 Hz ",
+ 16, 1125, 1080, 36, 5,
+ 0, "+", 1980, 1280, 440, 40, 10, 133650000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "8 CEA 17-18 720x576p @ 50 Hz ",
+ 17, 625, 576, 44, 5,
+ 0, "-", 864, 720, 12, 64, 10, 27000000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "9 CEA 19 1280x720p @ 50 Hz ",
+ 19, 750, 720, 25, 5,
+ 0, "+", 1980, 1280, 440, 40, 10, 74250000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "10 CEA 20 1920 x 1080i @ 50 Hz ",
+ 20, 1125, 540, 20, 5,
+ 0, "+", 2640, 1920, 528, 44, 10, 74250000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "11 CEA 21-22 576i (PAL) ",
+ 21, 625, 288, 44, 5,
+ 0, "-", 1728, 1440, 12, 64, 10, 27000000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "12 CEA 29/30 576p ",
+ 29, 625, 576, 44, 5,
+ 0, "-", 864, 720, 12, 64, 10, 27000000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "13 CEA 31 1080p 50Hz ",
+ 31, 1125, 1080, 44, 5,
+ 0, "-", 2640, 1920, 12, 64, 10, 148500000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "14 CEA 32 1920x1080p @ 24 Hz ",
+ 32, 1125, 1080, 36, 5,
+ 4, "+", 2750, 1920, 660, 44, 153, 74250000,
+ "+", 2844, 0x530, 6, 32, 1},/*RGB565*/
+{ "15 CEA 33 1920x1080p @ 25 Hz ",
+ 33, 1125, 1080, 36, 5,
+ 4, "+", 2640, 1920, 528, 44, 10, 74250000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "16 CEA 34 1920x1080p @ 30Hz ",
+ 34, 1125, 1080, 36, 5,
+ 4, "+", 2200, 1920, 91, 44, 153, 74250000,
+ "+", 2275, 0xAB, 6, 32, 1},/*RGB565*/
+{ "17 CEA 60 1280x720p @ 24 Hz ",
+ 60, 750, 720, 20, 5,
+ 5, "+", 3300, 1280, 284, 50, 2276, 59400000,
+ "+", 4266, 0xAD0, 5, 32, 1},/*RGB565*/
+{ "18 CEA 61 1280x720p @ 25 Hz ",
+ 61, 750, 720, 20, 5,
+ 5, "+", 3960, 1280, 228, 39, 2503, 74250000,
+ "+", 4096, 0x500, 5, 32, 1},/*RGB565*/
+{ "19 CEA 62 1280x720p @ 30 Hz ",
+ 62, 750, 720, 20, 5,
+ 5, "+", 3300, 1280, 228, 39, 1820, 74250000,
+ "+", 3413, 0x770, 5, 32, 1},/*RGB565*/
+{ "20 VESA 9 800x600 @ 60 Hz ",
+ 109, 628, 600, 28, 4,
+ 0, "+", 1056, 800, 40, 128, 10, 40000000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "21 VESA 14 848x480 @ 60 Hz ",
+ 114, 517, 480, 20, 5,
+ 0, "+", 1088, 848, 24, 80, 10, 33750000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "22 VESA 16 1024x768 @ 60 Hz ",
+ 116, 806, 768, 38, 6,
+ 0, "-", 1344, 1024, 24, 135, 10, 65000000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "23 VESA 22 1280x768 @ 60 Hz ",
+ 122, 790, 768, 34, 4,
+ 0, "+", 1440, 1280, 48, 160, 10, 68250000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "24 VESA 23 1280x768 @ 60 Hz ",
+ 123, 798, 768, 30, 7,
+ 0, "+", 1664, 1280, 64, 128, 10, 79500000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "25 VESA 27 1280x800 @ 60 Hz ",
+ 127, 823, 800, 23, 6,
+ 0, "+", 1440, 1280, 48, 32, 10, 71000000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "26 VESA 28 1280x800 @ 60 Hz ",
+ 128, 831, 800, 31, 6,
+ 0, "+", 1680, 1280, 72, 128, 10, 83500000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "27 VESA 39 1360x768 @ 60 Hz ",
+ 139, 795, 768, 22, 5,
+ 0, "-", 1792, 1360, 48, 32, 10, 85500000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "28 VESA 81 1366x768 @ 60 Hz ",
+ 181, 798, 768, 30, 5,
+ 0, "+", 1792, 1366, 72, 136, 10, 85750000,
+ "-", 0, 0, 0, 0, 0} /*Settings to be define*/
+};
+
+const struct color_conversion_cmd col_trans_identity = {
+ .c0 = 0x0100, .c1 = 0x0000, .c2 = 0x0000,
+ .c3 = 0x0000, .c4 = 0x0100, .c5 = 0x0000,
+ .c6 = 0x0000, .c7 = 0x0000, .c8 = 0x0100,
+ .aoffset = 0x0000, .boffset = 0x0000, .coffset = 0x0000,
+ .lmax = 0xff,
+ .lmin = 0x00,
+ .cmax = 0xff,
+ .cmin = 0x00,
+};
+
+const struct color_conversion_cmd col_trans_identity_clamp_yuv = {
+ .c0 = 0x0100, .c1 = 0x0000, .c2 = 0x0000,
+ .c3 = 0x0000, .c4 = 0x0100, .c5 = 0x0000,
+ .c6 = 0x0000, .c7 = 0x0000, .c8 = 0x0100,
+ .aoffset = 0x0000, .boffset = 0x0000, .coffset = 0x0000,
+ .lmax = 0xeb,
+ .lmin = 0x10,
+ .cmax = 0xf0,
+ .cmin = 0x10,
+};
+
+const struct color_conversion_cmd col_trans_yuv_to_rgb_v1 = {
+ .c0 = 0x0087, .c1 = 0x0000, .c2 = 0x00ba,
+ .c3 = 0x0087, .c4 = 0xffd3, .c5 = 0xffa1,
+ .c6 = 0x0087, .c7 = 0x00eb, .c8 = 0x0000,
+ .aoffset = 0xffab, .boffset = 0x004e, .coffset = 0xff92,
+ .lmax = 0xff,
+ .lmin = 0x00,
+ .cmax = 0xff,
+ .cmin = 0x00,
+};
+
+const struct color_conversion_cmd col_trans_yuv_to_rgb_v2 = {
+ .c0 = 0x0198, .c1 = 0x012a, .c2 = 0x0000,
+ .c3 = 0xff30, .c4 = 0x012a, .c5 = 0xff9c,
+ .c6 = 0x0000, .c7 = 0x012a, .c8 = 0x0204,
+ .aoffset = 0xff21, .boffset = 0x0088, .coffset = 0xfeeb,
+ .lmax = 0xff,
+ .lmin = 0x00,
+ .cmax = 0xff,
+ .cmin = 0x00,
+};
+
+const struct color_conversion_cmd col_trans_yuv_to_denc = {
+ .c0 = 0x0100, .c1 = 0x0000, .c2 = 0x0000,
+ .c3 = 0x0000, .c4 = 0x0100, .c5 = 0x0000,
+ .c6 = 0x0000, .c7 = 0x0000, .c8 = 0x0100,
+ .aoffset = 0x0000, .boffset = 0x0000, .coffset = 0x0000,
+ .lmax = 0xeb,
+ .lmin = 0x10,
+ .cmax = 0xf0,
+ .cmin = 0x10,
+};
+
+const struct color_conversion_cmd col_trans_rgb_to_denc = {
+ .c0 = 0x0070, .c1 = 0xffb6, .c2 = 0xffda,
+ .c3 = 0x0042, .c4 = 0x0081, .c5 = 0x0019,
+ .c6 = 0xffee, .c7 = 0xffa2, .c8 = 0x0070,
+ .aoffset = 0x007f, .boffset = 0x0010, .coffset = 0x007f,
+ .lmax = 0xff,
+ .lmin = 0x00,
+ .cmax = 0xff,
+ .cmin = 0x00,
+};
+
+static const struct i2c_device_id av8100_id[] = {
+ { "av8100", 0 },
+ { }
+};
+
+static struct av8100_device *devnr_to_adev(int devnr)
+{
+ /* Get device from list of devices */
+ struct list_head *element;
+ struct av8100_device *av8100_dev;
+ int cnt = 0;
+
+ list_for_each(element, &av8100_device_list) {
+ av8100_dev = list_entry(element, struct av8100_device, list);
+ if (cnt == devnr)
+ return av8100_dev;
+ cnt++;
+ }
+
+ return NULL;
+}
+
+static struct av8100_device *dev_to_adev(struct device *dev)
+{
+ /* Get device from list of devices */
+ struct list_head *element;
+ struct av8100_device *av8100_dev;
+ int cnt = 0;
+
+ list_for_each(element, &av8100_device_list) {
+ av8100_dev = list_entry(element, struct av8100_device, list);
+ if (av8100_dev->dev == dev)
+ return av8100_dev;
+ cnt++;
+ }
+
+ return NULL;
+}
+
+static int adev_to_devnr(struct av8100_device *adev)
+{
+ /* Get devnr from list of devices */
+ struct list_head *element;
+ struct av8100_device *av8100_dev;
+ int cnt = 0;
+
+ list_for_each(element, &av8100_device_list) {
+ av8100_dev = list_entry(element, struct av8100_device, list);
+ if (av8100_dev == adev)
+ return cnt;
+ cnt++;
+ }
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_PM
+static int av8100_suspend(struct i2c_client *i2c_client, pm_message_t state)
+{
+ int ret = 0;
+ struct av8100_device *adev;
+
+ adev = dev_to_adev(&i2c_client->dev);
+ if (!adev)
+ return -EFAULT;
+
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ adev->params.pre_suspend_power =
+ (av8100_status_get().av8100_state > AV8100_OPMODE_SHUTDOWN);
+
+ if (adev->params.pre_suspend_power) {
+ ret = av8100_powerdown();
+ if (ret)
+ dev_err(adev->dev, "av8100_powerdown failed\n");
+ }
+
+ return ret;
+}
+
+static int av8100_resume(struct i2c_client *i2c_client)
+{
+ int ret;
+ u8 hpds = 0;
+ struct av8100_device *adev;
+
+ adev = dev_to_adev(&i2c_client->dev);
+ if (!adev)
+ return -EFAULT;
+
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ if (adev->params.pre_suspend_power) {
+ ret = av8100_powerup();
+ if (ret) {
+ dev_err(adev->dev, "av8100_powerup failed\n");
+ return ret;
+ }
+
+ /* Check HDMI plug status */
+ if (av8100_reg_stby_r(NULL, NULL, &hpds, NULL, NULL)) {
+ dev_warn(adev->dev, "av8100_reg_stby_r failed\n");
+ goto av8100_resume_end;
+ }
+
+ if (hpds)
+ set_plug_status(adev, AV8100_HDMI_PLUGIN); /* Plugged*/
+ else
+ clr_plug_status(adev,
+ AV8100_HDMI_PLUGIN); /* Unplugged*/
+
+ adev->params.hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH;
+ av8100_enable_interrupt();
+ }
+
+av8100_resume_end:
+ return 0;
+}
+#else
+#define av8100_suspend NULL
+#define av8100_resume NULL
+#endif
+
+static struct i2c_driver av8100_driver = {
+ .probe = av8100_probe,
+ .remove = av8100_remove,
+ .suspend = av8100_suspend,
+ .resume = av8100_resume,
+ .driver = {
+ .name = "av8100",
+ },
+ .id_table = av8100_id,
+};
+
+static void av8100_plugtimer_int(unsigned long value)
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev((int)value);
+ adev->flag |= AV8100_PLUGSTARTUP_EVENT;
+ wake_up_interruptible(&adev->event);
+ del_timer(&adev->timer);
+}
+
+static int av8100_int_event_handle(struct av8100_device *adev)
+{
+ u8 hpdi = 0;
+ u8 cpdi = 0;
+ u8 uovbi = 0;
+ u8 hdcpi = 0;
+ u8 ceci = 0;
+ u8 hpds = 0;
+ u8 cpds = 0;
+ u8 hdcps = 0;
+ u8 onuvb = 0;
+ u8 cectxerr = 0;
+ u8 cecrx = 0;
+ u8 cectx = 0;
+
+ /* STANDBY_PENDING_INTERRUPT reg */
+ if (av8100_reg_stby_pend_int_r(&hpdi, &cpdi, NULL, NULL)) {
+ dev_dbg(adev->dev, "av8100_reg_stby_pend_int_r failed\n");
+ goto av8100_int_event_handle_1;
+ }
+
+ /* Plug event */
+ if (hpdi | cpdi) {
+ /* Clear pending interrupts */
+ (void)av8100_reg_stby_pend_int_w(1, 1, 1, 0);
+
+ /* STANDBY reg */
+ if (av8100_reg_stby_r(NULL, NULL, &hpds, &cpds, NULL)) {
+ dev_dbg(adev->dev, "av8100_reg_stby_r failed\n");
+ goto av8100_int_event_handle_1;
+ }
+ }
+
+ if (cpdi & adev->params.cpdm) {
+ /* TVout plugin change */
+ if (cpds) {
+ dev_dbg(adev->dev, "cpds 1\n");
+ set_plug_status(adev, AV8100_CVBS_PLUGIN);
+ } else {
+ dev_dbg(adev->dev, "cpds 0\n");
+ clr_plug_status(adev, AV8100_CVBS_PLUGIN);
+ }
+ }
+
+ if (hpdi & adev->params.hpdm) {
+ /* HDMI plugin change */
+ if (hpds) {
+ /* Plugged */
+ /* Set 5V always on */
+ av8100_5V_w(adev->params.denc_off_time,
+ 0,
+ adev->params.on_time);
+ dev_dbg(adev->dev, "hpds 1\n");
+ set_plug_status(adev, AV8100_HDMI_PLUGIN);
+ } else {
+ /* Unplugged */
+ av8100_5V_w(adev->params.denc_off_time,
+ adev->params.hdmi_off_time,
+ adev->params.on_time);
+ dev_dbg(adev->dev, "hpds 0\n");
+ clr_plug_status(adev, AV8100_HDMI_PLUGIN);
+ }
+ }
+
+av8100_int_event_handle_1:
+ /* GENERAL_INTERRUPT reg */
+ if (av8100_reg_gen_int_r(NULL, NULL, NULL, &ceci,
+ &hdcpi, &uovbi, NULL)) {
+ dev_dbg(adev->dev, "av8100_reg_gen_int_r failed\n");
+ return -EINVAL;
+ }
+
+ /* CEC or HDCP event */
+ if (ceci | hdcpi | uovbi) {
+ /* Clear pending interrupts */
+ av8100_reg_gen_int_w(1, 1, 1, 1, 1, 1);
+
+ /* GENERAL_STATUS reg */
+ if (av8100_reg_gen_status_r(&cectxerr, &cecrx, &cectx, NULL,
+ &onuvb, &hdcps) != 0) {
+ dev_dbg(adev->dev, "av8100_reg_gen_status_r fail\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Underflow or overflow */
+ if (uovbi)
+ dev_dbg(adev->dev, "uovbi %d\n", onuvb);
+
+ /* CEC received */
+ if (ceci && cecrx) {
+ u8 val;
+
+ dev_dbg(adev->dev, "cecrx\n");
+
+ /* Clear cecrx in status reg*/
+ if (av8100_reg_r(AV8100_GENERAL_STATUS, &val) == 0) {
+ if (av8100_reg_w(AV8100_GENERAL_STATUS,
+ val & ~AV8100_GENERAL_STATUS_CECREC_MASK))
+ dev_info(adev->dev, "gen_stat write error\n");
+ } else {
+ dev_info(adev->dev, "gen_stat read error\n");
+ }
+
+ /* Report CEC event */
+ cec_rx(adev);
+ }
+
+ /* CEC tx error */
+ if (ceci && cectx && cectxerr) {
+ dev_dbg(adev->dev, "cectxerr\n");
+ /* Report CEC tx error event */
+ cec_txerr(adev);
+ } else if (ceci && cectx) {
+ dev_dbg(adev->dev, "cectx\n");
+ /* Report CEC tx event */
+ cec_tx(adev);
+ }
+
+ /* HDCP event */
+ if (hdcpi) {
+ dev_dbg(adev->dev, "hdcpch:%0x\n", hdcps);
+ /* Report HDCP status change event */
+ hdcp_changed(adev);
+ }
+
+ return 0;
+}
+
+static int av8100_plugstartup_event_handle(struct av8100_device *adev)
+{
+ u8 hpds = 0;
+ u8 cpds = 0;
+
+ switch (adev->params.plug_state) {
+ case AV8100_UNPLUGGED:
+ case AV8100_PLUGGED:
+ default:
+ break;
+
+ case AV8100_PLUGGED_STARTUP:
+ /* Unmask interrupt */
+ adev->params.hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH;
+ if (av8100_reg_stby_int_mask_w(adev->params.hpdm,
+ adev->params.cpdm,
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT,
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW)) {
+ dev_dbg(adev->dev,
+ "av8100_reg_stby_int_mask_w fail\n");
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ /* Get actual plug status */
+ if (av8100_reg_stby_r(NULL, NULL, &hpds, &cpds, NULL))
+ dev_dbg(adev->dev, "av8100_reg_stby_r fail\n");
+
+ /* Set plugstate */
+ if (hpds) {
+ adev->params.plug_state = AV8100_PLUGGED;
+ dev_dbg(adev->dev, "plug_state:2\n");
+ } else {
+ adev->params.plug_state = AV8100_UNPLUGGED;
+ dev_dbg(adev->dev, "plug_state:0\n");
+
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(
+ AV8100_HDMI_EVENT_HDMI_PLUGOUT);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int av8100_thread(void *p)
+{
+ u8 flags;
+ struct av8100_device *adev = p;
+
+ while (1) {
+ wait_event_interruptible(adev->event, (adev->flag != 0));
+ flags = adev->flag;
+ adev->flag = 0;
+
+ if (adev->status.av8100_state < AV8100_OPMODE_STANDBY)
+ continue;
+
+ if (flags & AV8100_INT_EVENT)
+ (void)av8100_int_event_handle(adev);
+
+ if (flags & AV8100_PLUGSTARTUP_EVENT)
+ (void)av8100_plugstartup_event_handle(adev);
+ }
+
+ return 0;
+}
+
+static irqreturn_t av8100_intr_handler(int irq, void *p)
+{
+ struct av8100_device *adev;
+
+ adev = (struct av8100_device *) p;
+ adev->flag |= AV8100_INT_EVENT;
+ wake_up_interruptible(&adev->event);
+
+ return IRQ_HANDLED;
+}
+
+static u16 av8100_get_te_line_nb(
+ enum av8100_output_CEA_VESA output_video_format)
+{
+ u16 retval;
+
+ switch (output_video_format) {
+ case AV8100_CEA1_640X480P_59_94HZ:
+ case AV8100_CEA2_3_720X480P_59_94HZ:
+ case AV8100_VESA16_1024X768P_60HZ:
+ retval = AV8100_TE_LINE_NB_30;
+ break;
+
+ case AV8100_CEA4_1280X720P_60HZ:
+ case AV8100_CEA60_1280X720P_24HZ:
+ case AV8100_CEA61_1280X720P_25HZ:
+ case AV8100_CEA62_1280X720P_30HZ:
+ retval = AV8100_TE_LINE_NB_21;
+ break;
+
+ case AV8100_CEA5_1920X1080I_60HZ:
+ case AV8100_CEA6_7_NTSC_60HZ:
+ case AV8100_CEA20_1920X1080I_50HZ:
+ case AV8100_CEA21_22_576I_PAL_50HZ:
+ case AV8100_VESA27_1280X800P_59_91HZ:
+ retval = AV8100_TE_LINE_NB_18;
+ break;
+
+ case AV8100_CEA14_15_480p_60HZ:
+ retval = AV8100_TE_LINE_NB_32;
+ break;
+
+ case AV8100_CEA17_18_720X576P_50HZ:
+ case AV8100_CEA29_30_576P_50HZ:
+ retval = AV8100_TE_LINE_NB_40;
+ break;
+
+ case AV8100_CEA19_1280X720P_50HZ:
+ case AV8100_VESA39_1360X768P_60_02HZ:
+ retval = AV8100_TE_LINE_NB_22;
+ break;
+
+ case AV8100_CEA32_1920X1080P_24HZ:
+ case AV8100_CEA33_1920X1080P_25HZ:
+ case AV8100_CEA34_1920X1080P_30HZ:
+ retval = AV8100_TE_LINE_NB_38;
+ break;
+
+ case AV8100_VESA9_800X600P_60_32HZ:
+ retval = AV8100_TE_LINE_NB_24;
+ break;
+
+ case AV8100_VESA14_848X480P_60HZ:
+ retval = AV8100_TE_LINE_NB_29;
+ break;
+
+ case AV8100_VESA22_1280X768P_59_99HZ:
+ retval = AV8100_TE_LINE_NB_17;
+ break;
+
+ case AV8100_VESA23_1280X768P_59_87HZ:
+ case AV8100_VESA81_1366X768P_59_79HZ:
+ retval = AV8100_TE_LINE_NB_25;
+ break;
+
+ case AV8100_VESA28_1280X800P_59_81HZ:
+ retval = AV8100_TE_LINE_NB_26;
+ break;
+
+ case AV8100_CEA16_1920X1080P_60HZ:
+ case AV8100_CEA31_1920x1080P_50Hz:
+ default:
+ /* TODO */
+ retval = AV8100_TE_LINE_NB_38;
+ break;
+ }
+
+ return retval;
+}
+
+static u16 av8100_get_ui_x4(
+ enum av8100_output_CEA_VESA output_video_format)
+{
+ return AV8100_UI_X4_DEFAULT;
+}
+
+static int av8100_config_video_output_dep(
+ enum av8100_output_CEA_VESA output_format)
+{
+ int retval;
+ union av8100_configuration config;
+
+ /* video input */
+ config.video_input_format.dsi_input_mode =
+ AV8100_HDMI_DSI_COMMAND_MODE;
+ config.video_input_format.input_pixel_format = AV8100_INPUT_PIX_RGB565;
+ config.video_input_format.total_horizontal_pixel =
+ av8100_all_cea[output_format].htotale;
+ config.video_input_format.total_horizontal_active_pixel =
+ av8100_all_cea[output_format].hactive;
+ config.video_input_format.total_vertical_lines =
+ av8100_all_cea[output_format].vtotale;
+ config.video_input_format.total_vertical_active_lines =
+ av8100_all_cea[output_format].vactive;
+
+ switch (output_format) {
+ case AV8100_CEA5_1920X1080I_60HZ:
+ case AV8100_CEA20_1920X1080I_50HZ:
+ case AV8100_CEA21_22_576I_PAL_50HZ:
+ case AV8100_CEA6_7_NTSC_60HZ:
+ config.video_input_format.video_mode =
+ AV8100_VIDEO_INTERLACE;
+ break;
+
+ default:
+ config.video_input_format.video_mode =
+ AV8100_VIDEO_PROGRESSIVE;
+ break;
+ }
+
+ config.video_input_format.nb_data_lane =
+ AV8100_DATA_LANES_USED_2;
+ config.video_input_format.nb_virtual_ch_command_mode = 0;
+ config.video_input_format.nb_virtual_ch_video_mode = 0;
+ config.video_input_format.ui_x4 = av8100_get_ui_x4(output_format);
+ config.video_input_format.TE_line_nb = av8100_get_te_line_nb(
+ output_format);
+ config.video_input_format.TE_config = AV8100_TE_OFF;
+ config.video_input_format.master_clock_freq = 0;
+
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_VIDEO_INPUT_FORMAT, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* DENC */
+ switch (output_format) {
+ case AV8100_CEA21_22_576I_PAL_50HZ:
+ config.denc_format.cvbs_video_format = AV8100_CVBS_625;
+ config.denc_format.standard_selection = AV8100_PAL_BDGHI;
+ break;
+
+ case AV8100_CEA6_7_NTSC_60HZ:
+ config.denc_format.cvbs_video_format = AV8100_CVBS_525;
+ config.denc_format.standard_selection = AV8100_NTSC_M;
+ break;
+
+ default:
+ /* Not supported */
+ break;
+ }
+
+ return 0;
+}
+
+static int av8100_config_init(struct av8100_device *adev)
+{
+ int retval;
+ union av8100_configuration config;
+
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ memset(&config, 0, sizeof(union av8100_configuration));
+ memset(&adev->config, 0, sizeof(struct av8100_config));
+
+ /* Color conversion */
+ config.color_transform = AV8100_COLOR_TRANSFORM_INDENTITY;
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_COLORSPACECONVERSION, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* DENC */
+ config.denc_format.cvbs_video_format = AV8100_CVBS_625;
+ config.denc_format.standard_selection = AV8100_PAL_BDGHI;
+ config.denc_format.enable = 0;
+ config.denc_format.macrovision_enable = 0;
+ config.denc_format.internal_generator = 0;
+ retval = av8100_conf_prep(AV8100_COMMAND_DENC, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* Video output */
+ config.video_output_format.video_output_cea_vesa =
+ AV8100_CEA4_1280X720P_60HZ;
+
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_VIDEO_OUTPUT_FORMAT, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* Video input */
+ av8100_config_video_output_dep(
+ config.video_output_format.video_output_cea_vesa);
+
+ /* Pattern generator */
+ config.pattern_generator_format.pattern_audio_mode =
+ AV8100_PATTERN_AUDIO_OFF;
+ config.pattern_generator_format.pattern_type =
+ AV8100_PATTERN_GENERATOR;
+ config.pattern_generator_format.pattern_video_format =
+ AV8100_PATTERN_720P;
+ retval = av8100_conf_prep(AV8100_COMMAND_PATTERNGENERATOR,
+ &config);
+ if (retval)
+ return -EFAULT;
+
+ /* Audio input */
+ config.audio_input_format.audio_input_if_format =
+ AV8100_AUDIO_I2SDELAYED_MODE;
+ config.audio_input_format.i2s_input_nb = 1;
+ config.audio_input_format.sample_audio_freq = AV8100_AUDIO_FREQ_48KHZ;
+ config.audio_input_format.audio_word_lg = AV8100_AUDIO_16BITS;
+ config.audio_input_format.audio_format = AV8100_AUDIO_LPCM_MODE;
+ config.audio_input_format.audio_if_mode = AV8100_AUDIO_MASTER;
+ config.audio_input_format.audio_mute = AV8100_AUDIO_MUTE_DISABLE;
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_AUDIO_INPUT_FORMAT, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* HDMI mode */
+ config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+ config.hdmi_format.hdmi_format = AV8100_HDMI;
+ config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0;
+ retval = av8100_conf_prep(AV8100_COMMAND_HDMI, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* EDID section readback */
+ config.edid_section_readback_format.address = 0xA0;
+ config.edid_section_readback_format.block_number = 0;
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_EDID_SECTION_READBACK, &config);
+ if (retval)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int av8100_params_init(struct av8100_device *adev)
+{
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ adev->params.denc_off_time = AV8100_DENC_OFF_TIME;
+ adev->params.hdmi_off_time = AV8100_HDMI_OFF_TIME;
+ adev->params.on_time = AV8100_ON_TIME;
+
+ adev->params.hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH;
+ adev->params.cpdm = AV8100_STANDBY_INTERRUPT_MASK_CPDM_HIGH;
+ adev->params.hdcpm = AV8100_GENERAL_INTERRUPT_MASK_HDCPM_HIGH;
+ adev->params.cecm = AV8100_GENERAL_INTERRUPT_MASK_CECM_HIGH;
+ adev->params.uovbm = AV8100_GENERAL_INTERRUPT_MASK_UOVBM_HIGH;
+
+ adev->params.plug_state = AV8100_UNPLUGGED;
+ adev->params.inputclk = NULL;
+ adev->params.inputclk_requested = false;
+ adev->params.opp_requested = false;
+ adev->params.regulator_requested = false;
+
+ return 0;
+}
+
+static void clr_plug_status(struct av8100_device *adev,
+ enum av8100_plugin_status status)
+{
+ adev->status.av8100_plugin_status &= ~status;
+
+ switch (status) {
+ case AV8100_HDMI_PLUGIN:
+ switch (adev->params.plug_state) {
+ case AV8100_UNPLUGGED:
+ case AV8100_PLUGGED_STARTUP:
+ default:
+ break;
+
+ case AV8100_PLUGGED:
+ adev->params.plug_state =
+ AV8100_UNPLUGGED;
+ dev_dbg(adev->dev, "plug_state:0\n");
+
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(
+ AV8100_HDMI_EVENT_HDMI_PLUGOUT);
+ break;
+ }
+ break;
+
+ case AV8100_CVBS_PLUGIN:
+ /* TODO */
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void set_plug_status(struct av8100_device *adev,
+ enum av8100_plugin_status status)
+{
+ adev->status.av8100_plugin_status |= status;
+
+ switch (status) {
+ case AV8100_HDMI_PLUGIN:
+ switch (adev->params.plug_state) {
+ case AV8100_UNPLUGGED:
+ adev->params.plug_state =
+ AV8100_PLUGGED_STARTUP;
+
+ dev_dbg(adev->dev, "plug_state:1\n");
+
+ /*
+ * Mask interrupts to avoid plug detect during
+ * startup
+ * */
+ adev->params.hpdm =
+ AV8100_STANDBY_INTERRUPT_MASK_HPDM_LOW;
+ if (av8100_reg_stby_int_mask_w(
+ adev->params.hpdm,
+ adev->params.cpdm,
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT,
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW)) {
+ dev_dbg(adev->dev,
+ "av8100_reg_stby_int_mask_w fail\n");
+ }
+
+ /* Set plug startup timer */
+ init_timer(&adev->timer);
+ adev->timer.expires = jiffies +
+ AV8100_PLUGSTARTUP_TIME;
+ adev->timer.function =
+ av8100_plugtimer_int;
+ adev->timer.data = 0;
+ adev->timer.data = adev_to_devnr(adev);
+ mod_timer(&adev->timer, adev->timer.expires);
+
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(
+ AV8100_HDMI_EVENT_HDMI_PLUGIN);
+ break;
+
+ case AV8100_PLUGGED_STARTUP:
+ case AV8100_PLUGGED:
+ default:
+ break;
+ }
+ break;
+
+ case AV8100_CVBS_PLUGIN:
+ /* TODO */
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void cec_rx(struct av8100_device *adev)
+{
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_CEC);
+}
+
+static void cec_tx(struct av8100_device *adev)
+{
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_CECTX);
+}
+
+static void cec_txerr(struct av8100_device *adev)
+{
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_CECTXERR);
+}
+
+static void hdcp_changed(struct av8100_device *adev)
+{
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_HDCP);
+}
+
+static void av8100_set_state(struct av8100_device *adev,
+ enum av8100_operating_mode state)
+{
+ adev->status.av8100_state = state;
+
+ if (state <= AV8100_OPMODE_STANDBY) {
+ clr_plug_status(adev, AV8100_HDMI_PLUGIN);
+ clr_plug_status(adev, AV8100_CVBS_PLUGIN);
+ adev->status.hdmi_on = false;
+ }
+}
+
+/**
+ * write_single_byte() - Write a single byte to av8100
+ * through i2c interface.
+ * @client: i2c client structure
+ * @reg: register offset
+ * @data: data byte to be written
+ *
+ * This funtion uses smbus byte write API to write a single byte to av8100
+ **/
+static int write_single_byte(struct i2c_client *client, u8 reg,
+ u8 data)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ if (ret < 0)
+ dev_dbg(dev, "i2c smbus write byte failed\n");
+
+ return ret;
+}
+
+/**
+ * read_single_byte() - read single byte from av8100
+ * through i2c interface
+ * @client: i2c client structure
+ * @reg: register offset
+ * @val: register value
+ *
+ * This funtion uses smbus read block API to read single byte from the reg
+ * offset.
+ **/
+static int read_single_byte(struct i2c_client *client, u8 reg, u8 *val)
+{
+ int value;
+ struct device *dev = &client->dev;
+
+ value = i2c_smbus_read_byte_data(client, reg);
+ if (value < 0) {
+ dev_dbg(dev, "i2c smbus read byte failed,read data = %x "
+ "from offset:%x\n" , value, reg);
+ return -EFAULT;
+ }
+
+ *val = (u8) value;
+ return 0;
+}
+
+/**
+ * write_multi_byte() - Write a multiple bytes to av8100 through
+ * i2c interface.
+ * @client: i2c client structure
+ * @buf: buffer to be written
+ * @nbytes: nunmber of bytes to be written
+ *
+ * This funtion uses smbus block write API's to write n number of bytes to the
+ * av8100
+ **/
+static int write_multi_byte(struct i2c_client *client, u8 reg,
+ u8 *buf, u8 nbytes)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ ret = i2c_smbus_write_i2c_block_data(client, reg, nbytes, buf);
+ if (ret < 0)
+ dev_dbg(dev, "i2c smbus write multi byte error\n");
+
+ return ret;
+}
+
+static int configuration_video_input_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_video_input_cmd.dsi_input_mode;
+ buffer[1] = adev->config.hdmi_video_input_cmd.input_pixel_format;
+ buffer[2] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ total_horizontal_pixel);
+ buffer[3] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ total_horizontal_pixel);
+ buffer[4] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ total_horizontal_active_pixel);
+ buffer[5] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ total_horizontal_active_pixel);
+ buffer[6] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ total_vertical_lines);
+ buffer[7] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ total_vertical_lines);
+ buffer[8] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ total_vertical_active_lines);
+ buffer[9] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ total_vertical_active_lines);
+ buffer[10] = adev->config.hdmi_video_input_cmd.video_mode;
+ buffer[11] = adev->config.hdmi_video_input_cmd.nb_data_lane;
+ buffer[12] = adev->config.hdmi_video_input_cmd.
+ nb_virtual_ch_command_mode;
+ buffer[13] = adev->config.hdmi_video_input_cmd.
+ nb_virtual_ch_video_mode;
+ buffer[14] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ TE_line_nb);
+ buffer[15] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ TE_line_nb);
+ buffer[16] = adev->config.hdmi_video_input_cmd.TE_config;
+ buffer[17] = REG_32_8_MSB(adev->config.hdmi_video_input_cmd.
+ master_clock_freq);
+ buffer[18] = REG_32_8_MMSB(adev->config.hdmi_video_input_cmd.
+ master_clock_freq);
+ buffer[19] = REG_32_8_MLSB(adev->config.hdmi_video_input_cmd.
+ master_clock_freq);
+ buffer[20] = REG_32_8_LSB(adev->config.hdmi_video_input_cmd.
+ master_clock_freq);
+ buffer[21] = adev->config.hdmi_video_input_cmd.ui_x4;
+
+ *length = AV8100_COMMAND_VIDEO_INPUT_FORMAT_SIZE - 1;
+ return 0;
+
+}
+
+static int configuration_audio_input_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_audio_input_cmd.audio_input_if_format;
+ buffer[1] = adev->config.hdmi_audio_input_cmd.i2s_input_nb;
+ buffer[2] = adev->config.hdmi_audio_input_cmd.sample_audio_freq;
+ buffer[3] = adev->config.hdmi_audio_input_cmd.audio_word_lg;
+ buffer[4] = adev->config.hdmi_audio_input_cmd.audio_format;
+ buffer[5] = adev->config.hdmi_audio_input_cmd.audio_if_mode;
+ buffer[6] = adev->config.hdmi_audio_input_cmd.audio_mute;
+
+ *length = AV8100_COMMAND_AUDIO_INPUT_FORMAT_SIZE - 1;
+ return 0;
+}
+
+static int configuration_video_output_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_video_output_cmd.
+ video_output_cea_vesa;
+
+ if (buffer[0] == AV8100_CUSTOM) {
+ buffer[1] = adev->config.hdmi_video_output_cmd.
+ vsync_polarity;
+ buffer[2] = adev->config.hdmi_video_output_cmd.
+ hsync_polarity;
+ buffer[3] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.total_horizontal_pixel);
+ buffer[4] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.total_horizontal_pixel);
+ buffer[5] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.total_horizontal_active_pixel);
+ buffer[6] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.total_horizontal_active_pixel);
+ buffer[7] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.total_vertical_in_half_lines);
+ buffer[8] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.total_vertical_in_half_lines);
+ buffer[9] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.
+ total_vertical_active_in_half_lines);
+ buffer[10] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.
+ total_vertical_active_in_half_lines);
+ buffer[11] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.hsync_start_in_pixel);
+ buffer[12] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.hsync_start_in_pixel);
+ buffer[13] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.hsync_length_in_pixel);
+ buffer[14] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.hsync_length_in_pixel);
+ buffer[15] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.vsync_start_in_half_line);
+ buffer[16] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.vsync_start_in_half_line);
+ buffer[17] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.vsync_length_in_half_line);
+ buffer[18] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.vsync_length_in_half_line);
+ buffer[19] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.hor_video_start_pixel);
+ buffer[20] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.hor_video_start_pixel);
+ buffer[21] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.vert_video_start_pixel);
+ buffer[22] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.vert_video_start_pixel);
+ buffer[23] = adev->config.hdmi_video_output_cmd.video_type;
+ buffer[24] = adev->config.hdmi_video_output_cmd.pixel_repeat;
+ buffer[25] = REG_32_8_MSB(adev->config.
+ hdmi_video_output_cmd.pixel_clock_freq_Hz);
+ buffer[26] = REG_32_8_MMSB(adev->config.
+ hdmi_video_output_cmd.pixel_clock_freq_Hz);
+ buffer[27] = REG_32_8_MLSB(adev->config.
+ hdmi_video_output_cmd.pixel_clock_freq_Hz);
+ buffer[28] = REG_32_8_LSB(adev->config.
+ hdmi_video_output_cmd.pixel_clock_freq_Hz);
+
+ *length = AV8100_COMMAND_VIDEO_OUTPUT_FORMAT_SIZE - 1;
+ } else {
+ *length = 1;
+ }
+
+ return 0;
+}
+
+static int configuration_video_scaling_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ h_start_in_pixel);
+ buffer[1] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ h_start_in_pixel);
+ buffer[2] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ h_stop_in_pixel);
+ buffer[3] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ h_stop_in_pixel);
+ buffer[4] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ v_start_in_line);
+ buffer[5] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ v_start_in_line);
+ buffer[6] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ v_stop_in_line);
+ buffer[7] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ v_stop_in_line);
+ buffer[8] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ h_start_out_pixel);
+ buffer[9] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd
+ .h_start_out_pixel);
+ buffer[10] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ h_stop_out_pixel);
+ buffer[11] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ h_stop_out_pixel);
+ buffer[12] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ v_start_out_line);
+ buffer[13] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ v_start_out_line);
+ buffer[14] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ v_stop_out_line);
+ buffer[15] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ v_stop_out_line);
+
+ *length = AV8100_COMMAND_VIDEO_SCALING_FORMAT_SIZE - 1;
+ return 0;
+}
+
+static int configuration_colorspace_conversion_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ const struct color_conversion_cmd *hdmi_color_space_conversion_cmd;
+
+ hdmi_color_space_conversion_cmd =
+ get_color_transform_cmd(adev, adev->config.color_transform);
+
+ buffer[0] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c0);
+ buffer[1] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c0);
+ buffer[2] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c1);
+ buffer[3] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c1);
+ buffer[4] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c2);
+ buffer[5] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c2);
+ buffer[6] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c3);
+ buffer[7] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c3);
+ buffer[8] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c4);
+ buffer[9] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c4);
+ buffer[10] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c5);
+ buffer[11] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c5);
+ buffer[12] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c6);
+ buffer[13] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c6);
+ buffer[14] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c7);
+ buffer[15] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c7);
+ buffer[16] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c8);
+ buffer[17] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c8);
+ buffer[18] = REG_10_8_MSB(hdmi_color_space_conversion_cmd->aoffset);
+ buffer[19] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->aoffset);
+ buffer[20] = REG_10_8_MSB(hdmi_color_space_conversion_cmd->boffset);
+ buffer[21] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->boffset);
+ buffer[22] = REG_10_8_MSB(hdmi_color_space_conversion_cmd->coffset);
+ buffer[23] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->coffset);
+ buffer[24] = hdmi_color_space_conversion_cmd->lmax;
+ buffer[25] = hdmi_color_space_conversion_cmd->lmin;
+ buffer[26] = hdmi_color_space_conversion_cmd->cmax;
+ buffer[27] = hdmi_color_space_conversion_cmd->cmin;
+
+ *length = AV8100_COMMAND_COLORSPACECONVERSION_SIZE - 1;
+ return 0;
+}
+
+static int configuration_cec_message_write_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_cec_message_write_cmd.buffer_length;
+ memcpy(&buffer[1], adev->config.hdmi_cec_message_write_cmd.buffer,
+ adev->config.hdmi_cec_message_write_cmd.buffer_length);
+
+ *length = adev->config.hdmi_cec_message_write_cmd.buffer_length + 1;
+
+ return 0;
+}
+
+static int configuration_cec_message_read_get(char *buffer,
+ unsigned int *length)
+{
+ /* No buffer data */
+ *length = AV8100_COMMAND_CEC_MESSAGE_READ_BACK_SIZE - 1;
+ return 0;
+}
+
+static int configuration_denc_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_denc_cmd.cvbs_video_format;
+ buffer[1] = adev->config.hdmi_denc_cmd.standard_selection;
+ buffer[2] = adev->config.hdmi_denc_cmd.enable;
+ buffer[3] = adev->config.hdmi_denc_cmd.macrovision_enable;
+ buffer[4] = adev->config.hdmi_denc_cmd.internal_generator;
+
+ *length = AV8100_COMMAND_DENC_SIZE - 1;
+ return 0;
+}
+
+static int configuration_hdmi_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_cmd.hdmi_mode;
+ buffer[1] = adev->config.hdmi_cmd.hdmi_format;
+ buffer[2] = adev->config.hdmi_cmd.dvi_format;
+
+ *length = AV8100_COMMAND_HDMI_SIZE - 1;
+ return 0;
+}
+
+static int configuration_hdcp_sendkey_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_hdcp_send_key_cmd.key_number;
+ memcpy(&buffer[1], adev->config.hdmi_hdcp_send_key_cmd.data,
+ adev->config.hdmi_hdcp_send_key_cmd.data_len);
+
+ *length = adev->config.hdmi_hdcp_send_key_cmd.data_len + 1;
+ return 0;
+}
+
+static int configuration_hdcp_management_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_hdcp_management_format_cmd.req_type;
+ buffer[1] = adev->config.hdmi_hdcp_management_format_cmd.encr_use;
+
+ *length = AV8100_COMMAND_HDCP_MANAGEMENT_SIZE - 1;
+ return 0;
+}
+
+static int configuration_infoframe_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_infoframes_cmd.type;
+ buffer[1] = adev->config.hdmi_infoframes_cmd.version;
+ buffer[2] = adev->config.hdmi_infoframes_cmd.length;
+ buffer[3] = adev->config.hdmi_infoframes_cmd.crc;
+ memcpy(&buffer[4], adev->config.hdmi_infoframes_cmd.data,
+ HDMI_INFOFRAME_DATA_SIZE);
+
+ *length = adev->config.hdmi_infoframes_cmd.length + 4;
+ return 0;
+}
+
+static int av8100_edid_section_readback_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_edid_section_readback_cmd.address;
+ buffer[1] = adev->config.hdmi_edid_section_readback_cmd.
+ block_number;
+
+ *length = AV8100_COMMAND_EDID_SECTION_READBACK_SIZE - 1;
+ return 0;
+}
+
+static int configuration_pattern_generator_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_pattern_generator_cmd.pattern_type;
+ buffer[1] = adev->config.hdmi_pattern_generator_cmd.
+ pattern_video_format;
+ buffer[2] = adev->config.hdmi_pattern_generator_cmd.
+ pattern_audio_mode;
+
+ *length = AV8100_COMMAND_PATTERNGENERATOR_SIZE - 1;
+ return 0;
+}
+
+static int configuration_fuse_aes_key_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_fuse_aes_key_cmd.fuse_operation;
+ if (adev->config.hdmi_fuse_aes_key_cmd.fuse_operation) {
+ /* Write key command */
+ memcpy(&buffer[1], adev->config.hdmi_fuse_aes_key_cmd.key,
+ HDMI_FUSE_AES_KEY_SIZE);
+
+ *length = AV8100_COMMAND_FUSE_AES_KEY_SIZE - 1;
+ } else {
+ /* Check key command */
+ *length = AV8100_COMMAND_FUSE_AES_CHK_SIZE - 1;
+ }
+ return 0;
+}
+
+static int get_command_return_first(struct i2c_client *i2c,
+ enum av8100_command_type command_type) {
+ int retval = 0;
+ char val;
+ struct device *dev = &i2c->dev;
+
+ retval = read_single_byte(i2c, AV8100_COMMAND_OFFSET, &val);
+ if (retval) {
+ dev_dbg(dev, "%s 1st ret failed\n", __func__);
+ return retval;
+ }
+
+ if (val != (0x80 | command_type)) {
+ dev_dbg(dev, "%s 1st ret wrong:%x\n", __func__, val);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int get_command_return_data(struct i2c_client *i2c,
+ enum av8100_command_type command_type,
+ u8 *command_buffer,
+ u8 *buffer_length,
+ u8 *buffer)
+{
+ int retval = 0;
+ char val;
+ int index = 0;
+ struct device *dev = &i2c->dev;
+
+ if (buffer_length)
+ *buffer_length = 0;
+
+ switch (command_type) {
+ case AV8100_COMMAND_VIDEO_INPUT_FORMAT:
+ case AV8100_COMMAND_AUDIO_INPUT_FORMAT:
+ case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT:
+ case AV8100_COMMAND_VIDEO_SCALING_FORMAT:
+ case AV8100_COMMAND_COLORSPACECONVERSION:
+ case AV8100_COMMAND_CEC_MESSAGE_WRITE:
+ case AV8100_COMMAND_DENC:
+ case AV8100_COMMAND_HDMI:
+ case AV8100_COMMAND_INFOFRAMES:
+ case AV8100_COMMAND_PATTERNGENERATOR:
+ /* Get the second return byte */
+ retval = read_single_byte(i2c,
+ AV8100_2ND_RET_BYTE_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail2r;
+
+ if (val) {
+ retval = -EFAULT;
+ goto get_command_return_data_fail2v;
+ }
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_READ_BACK:
+ if ((buffer == NULL) || (buffer_length == NULL)) {
+ retval = -EINVAL;
+ goto get_command_return_data_fail;
+ }
+
+ /* Get the return buffer length */
+ retval = read_single_byte(i2c, AV8100_CEC_ADDR_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail;
+
+ dev_dbg(dev, "cec buflen:%d\n", val);
+ *buffer_length = val;
+
+ if (*buffer_length >
+ HDMI_CEC_READ_MAXSIZE) {
+ dev_dbg(dev, "CEC size too large %d\n",
+ *buffer_length);
+ *buffer_length = HDMI_CEC_READ_MAXSIZE;
+ }
+
+ dev_dbg(dev, "return data: ");
+
+ /* Get the return buffer */
+ for (index = 0; index < *buffer_length; ++index) {
+ retval = read_single_byte(i2c,
+ AV8100_CEC_RET_BUF_OFFSET + index, &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ }
+
+ dev_dbg(dev, "\n");
+ break;
+
+ case AV8100_COMMAND_HDCP_MANAGEMENT:
+ {
+ u8 nrdev;
+ u8 devcnt;
+ int cnt;
+
+ /* Get the second return byte */
+ retval = read_single_byte(i2c,
+ AV8100_2ND_RET_BYTE_OFFSET, &val);
+ if (retval) {
+ goto get_command_return_data_fail2r;
+ } else {
+ /* Check the second return byte */
+ if (val)
+ goto get_command_return_data_fail2v;
+ }
+
+ if ((buffer == NULL) || (buffer_length == NULL))
+ /* Ignore return data */
+ break;
+
+ dev_dbg(dev, "req_type:%02x ", command_buffer[0]);
+
+ /* Check if revoc list data is requested */
+ if (command_buffer[0] !=
+ HDMI_REQUEST_FOR_REVOCATION_LIST_INPUT) {
+ *buffer_length = 0;
+ break;
+ }
+
+ dev_dbg(dev, "return data: ");
+
+ /* Get the return buffer */
+ for (cnt = 0; cnt < HDMI_HDCP_MGMT_BKSV_SIZE; cnt++) {
+ retval = read_single_byte(i2c,
+ AV8100_HDCP_RET_BUF_OFFSET + index, &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ index++;
+ }
+
+ /* Get Device count */
+ retval = read_single_byte(i2c,
+ AV8100_HDCP_RET_BUF_OFFSET + index, &nrdev);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = nrdev;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ index++;
+
+ /* Determine number of devices */
+ nrdev &= HDMI_HDCP_MGMT_DEVICE_MASK;
+ if (nrdev > HDMI_HDCP_MGMT_MAX_DEVICES_SIZE)
+ nrdev = HDMI_HDCP_MGMT_MAX_DEVICES_SIZE;
+
+ /* Get Bksv for each connected equipment */
+ for (devcnt = 0; devcnt < nrdev; devcnt++)
+ for (cnt = 0; cnt < HDMI_HDCP_MGMT_BKSV_SIZE; cnt++) {
+ retval = read_single_byte(i2c,
+ AV8100_HDCP_RET_BUF_OFFSET + index,
+ &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ",
+ *(buffer + index));
+ }
+ index++;
+ }
+
+ if (nrdev == 0)
+ goto hdcp_management_end;
+
+ /* Get SHA signature */
+ for (cnt = 0; cnt < HDMI_HDCP_MGMT_SHA_SIZE - 1; cnt++) {
+ retval = read_single_byte(i2c,
+ AV8100_HDCP_RET_BUF_OFFSET + index, &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ index++;
+ }
+
+hdcp_management_end:
+ *buffer_length = index;
+
+ dev_dbg(dev, "\n");
+ }
+ break;
+
+ case AV8100_COMMAND_EDID_SECTION_READBACK:
+ if ((buffer == NULL) || (buffer_length == NULL)) {
+ retval = -EINVAL;
+ goto get_command_return_data_fail;
+ }
+
+ /* Return buffer length is fixed */
+ *buffer_length = HDMI_EDIDREAD_SIZE;
+
+ dev_dbg(dev, "return data: ");
+
+ /* Get the return buffer */
+ for (index = 0; index < *buffer_length; ++index) {
+ retval = read_single_byte(i2c,
+ AV8100_EDID_RET_BUF_OFFSET + index, &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ }
+
+ dev_dbg(dev, "\n");
+ break;
+
+ case AV8100_COMMAND_FUSE_AES_KEY:
+ if ((buffer == NULL) || (buffer_length == NULL)) {
+ retval = -EINVAL;
+ goto get_command_return_data_fail;
+ }
+
+ /* Get the second return byte */
+ retval = read_single_byte(i2c,
+ AV8100_2ND_RET_BYTE_OFFSET, &val);
+
+ if (retval)
+ goto get_command_return_data_fail2r;
+
+ /* Check the second return byte */
+ if (val) {
+ retval = -EFAULT;
+ goto get_command_return_data_fail2v;
+ }
+
+ /* Return buffer length is fixed */
+ *buffer_length = HDMI_FUSE_AES_KEY_RET_SIZE;
+
+ /* Get CRC */
+ retval = read_single_byte(i2c,
+ AV8100_FUSE_CRC_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail;
+
+ *buffer = val;
+ dev_dbg(dev, "CRC:%02x ", val);
+
+ /* Get programmed status */
+ retval = read_single_byte(i2c,
+ AV8100_FUSE_PRGD_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail;
+
+ *(buffer + 1) = val;
+
+ dev_dbg(dev, "programmed:%02x ", val);
+ break;
+
+ case AV8100_COMMAND_HDCP_SENDKEY:
+ if ((command_buffer[0] == HDMI_LOADAES_END_BLK_NR) &&
+ ((buffer == NULL) || (buffer_length == NULL))) {
+ retval = -EINVAL;
+ goto get_command_return_data_fail;
+ }
+
+ /* Get the second return byte */
+ retval = read_single_byte(i2c,
+ AV8100_2ND_RET_BYTE_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail2r;
+
+ if (val) {
+ retval = -EFAULT;
+ goto get_command_return_data_fail2v;
+ }
+
+ if (command_buffer[0] == HDMI_LOADAES_END_BLK_NR) {
+ /* Return CRC32 if last AES block */
+ int cnt;
+
+ dev_dbg(dev, "CRC32:");
+ for (cnt = 0; cnt < HDMI_CRC32_SIZE; cnt++) {
+ if (read_single_byte(i2c,
+ AV8100_CRC32_OFFSET + cnt, &val))
+ goto get_command_return_data_fail;
+ *(buffer + cnt) = val;
+ dev_dbg(dev, "%02x", val);
+ }
+
+ *buffer_length = HDMI_CRC32_SIZE;
+ }
+ break;
+
+ default:
+ retval = -EFAULT;
+ break;
+ }
+
+ return retval;
+get_command_return_data_fail2r:
+ dev_dbg(dev, "%s Reading 2nd return byte failed\n", __func__);
+ return retval;
+get_command_return_data_fail2v:
+ dev_dbg(dev, "%s 2nd return byte is wrong:%x\n", __func__, val);
+ return retval;
+get_command_return_data_fail:
+ dev_dbg(dev, "%s FAIL\n", __func__);
+ return retval;
+}
+
+static int av8100_powerup1(struct av8100_device *adev)
+{
+ int retval;
+ struct av8100_platform_data *pdata = adev->dev->platform_data;
+
+ /* Regulator enable */
+ if ((adev->params.regulator_pwr) &&
+ (adev->params.regulator_requested == false)) {
+ retval = regulator_enable(adev->params.regulator_pwr);
+ if (retval < 0) {
+ dev_warn(adev->dev, "%s: regulator_enable failed\n",
+ __func__);
+ return retval;
+ }
+ dev_dbg(adev->dev, "regulator_enable ok\n");
+ adev->params.regulator_requested = true;
+ }
+
+ /* Reset av8100 */
+ gpio_set_value_cansleep(pdata->reset, 1);
+
+ /* Need to wait before proceeding */
+ mdelay(AV8100_WAITTIME_1MS);
+
+ av8100_set_state(adev, AV8100_OPMODE_STANDBY);
+
+ if (pdata->alt_powerupseq) {
+ dev_dbg(adev->dev, "powerup seq alt\n");
+ retval = av8100_5V_w(0, 0, AV8100_ON_TIME);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 1\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ udelay(AV8100_WATTIME_100US);
+
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_HIGH);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 2\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ udelay(AV8100_WATTIME_100US);
+
+ retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW,
+ AV8100_STANDBY_STBY_HIGH, pdata->mclk_freq);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 3\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW,
+ AV8100_STANDBY_STBY_LOW, pdata->mclk_freq);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 4\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 5\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+ }
+
+ /* Get chip version */
+ retval = av8100_reg_stby_pend_int_r(NULL, NULL, NULL,
+ &adev->chip_version);
+ if (retval) {
+ dev_err(adev->dev, "Failed to read chip version\n");
+ goto av8100_powerup1_err;
+ }
+
+ dev_info(adev->dev, "chip version:%d\n", adev->chip_version);
+
+ switch (adev->chip_version) {
+ case AV8100_CHIPVER_1:
+ case AV8100_CHIPVER_2:
+ break;
+
+ default:
+ dev_err(adev->dev, "Unsupported chip version:%d\n",
+ adev->chip_version);
+ goto av8100_powerup1_err;
+ break;
+ }
+
+ return 0;
+
+av8100_powerup1_err:
+ av8100_powerdown();
+ return -EFAULT;
+}
+
+static int av8100_powerup2(struct av8100_device *adev)
+{
+ int retval;
+
+ /* ON time & OFF time on 5v HDMI plug detect */
+ retval = av8100_5V_w(adev->params.denc_off_time,
+ adev->params.hdmi_off_time,
+ adev->params.on_time);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return retval;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ av8100_set_state(adev, AV8100_OPMODE_SCAN);
+
+ return 0;
+}
+
+static int register_read_internal(u8 offset, u8 *value)
+{
+ int retval = 0;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ i2c = adev->config.client;
+
+ /* Read from register */
+ retval = read_single_byte(i2c, offset, value);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to read the value from av8100 register\n");
+ return -EFAULT;
+ }
+
+ return retval;
+}
+
+static int register_write_internal(u8 offset, u8 value)
+{
+ int retval;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ i2c = adev->config.client;
+
+ /* Write to register */
+ retval = write_single_byte(i2c, offset, value);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int av8100_powerscan(void)
+{
+ int retval;
+ struct av8100_device *adev;
+ struct av8100_platform_data *pdata;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ pdata = adev->dev->platform_data;
+
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ if (av8100_status_get().av8100_state > AV8100_OPMODE_SCAN) {
+ dev_dbg(adev->dev, "set to scan mode\n");
+
+ av8100_disable_interrupt();
+
+ /* Stby mode */
+ retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW,
+ AV8100_STANDBY_STBY_LOW, pdata->mclk_freq);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to write to av8100 register\n");
+ return retval;
+ }
+
+ /* Remove APE OPP requirement */
+ if (adev->params.opp_requested) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name);
+ prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP,
+ (char *)adev->miscdev.name);
+ adev->params.opp_requested = false;
+ }
+
+ /* Clock disable */
+ if (adev->params.inputclk &&
+ adev->params.inputclk_requested) {
+ clk_disable(adev->params.inputclk);
+ adev->params.inputclk_requested = false;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ av8100_enable_interrupt();
+
+ av8100_set_state(adev, AV8100_OPMODE_SCAN);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_powerscan);
+
+int av8100_powerup(void)
+{
+ int ret = 0;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ if (av8100_status_get().av8100_state == AV8100_OPMODE_UNDEFINED)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state < AV8100_OPMODE_STANDBY) {
+ ret = av8100_powerup1(adev);
+ if (ret) {
+ dev_err(adev->dev, "av8100_powerup1 fail\n");
+ return -EFAULT;
+ }
+ }
+
+ if (av8100_status_get().av8100_state < AV8100_OPMODE_SCAN)
+ ret = av8100_powerup2(adev);
+
+ av8100_enable_interrupt();
+
+ return ret;
+}
+EXPORT_SYMBOL(av8100_powerup);
+
+int av8100_powerdown(void)
+{
+ int retval = 0;
+ struct av8100_device *adev;
+ struct av8100_platform_data *pdata;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ pdata = adev->dev->platform_data;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ goto av8100_powerdown_end;
+
+ av8100_disable_interrupt();
+
+ if (pdata->alt_powerupseq) {
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_HIGH);
+
+ if (retval)
+ dev_err(adev->dev, "%s reg_wr err\n", __func__);
+ msleep(AV8100_WAITTIME_50MS);
+ }
+
+ /* Remove APE OPP requirement */
+ if (adev->params.opp_requested) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name);
+ prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP,
+ (char *)adev->miscdev.name);
+ adev->params.opp_requested = false;
+ }
+
+ /* Clock disable */
+ if (adev->params.inputclk && adev->params.inputclk_requested) {
+ clk_disable(adev->params.inputclk);
+ adev->params.inputclk_requested = false;
+ }
+
+ /* Regulator disable */
+ if ((adev->params.regulator_pwr) &&
+ (adev->params.regulator_requested)) {
+ dev_dbg(adev->dev, "regulator_disable\n");
+ regulator_disable(adev->params.regulator_pwr);
+ adev->params.regulator_requested = false;
+ }
+
+ gpio_set_value_cansleep(pdata->reset, 0);
+
+ if (pdata->alt_powerupseq)
+ mdelay(AV8100_WAITTIME_5MS);
+
+ av8100_set_state(adev, AV8100_OPMODE_SHUTDOWN);
+
+av8100_powerdown_end:
+ return retval;
+}
+EXPORT_SYMBOL(av8100_powerdown);
+
+int av8100_download_firmware(enum interface_type if_type)
+{
+ int retval;
+ int temp = 0x0;
+ int increment = 15;
+ int index = 0;
+ int size = 0x0;
+ char val = 0x0;
+ char checksum = 0;
+ int cnt;
+ int cnt_max;
+ struct i2c_client *i2c;
+ u8 uc;
+ u8 fdl;
+ u8 hld;
+ u8 wa;
+ u8 ra;
+ struct av8100_platform_data *pdata;
+ const struct firmware *fw_file;
+ u8 *fw_buff;
+ int fw_bytes;
+ struct av8100_device *adev;
+ struct av8100_status status;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ LOCK_AV8100_FWDL;
+
+ status = av8100_status_get();
+ if (status.av8100_state <= AV8100_OPMODE_SHUTDOWN) {
+ retval = -EINVAL;
+ goto av8100_download_firmware_err2;
+ }
+
+ if (status.av8100_state >= AV8100_OPMODE_INIT) {
+ dev_dbg(adev->dev, "FW already ok\n");
+ retval = 0;
+ goto av8100_download_firmware_err2;
+ }
+
+ av8100_set_state(adev, AV8100_OPMODE_INIT);
+
+ pdata = adev->dev->platform_data;
+
+ /* Request firmware */
+ if (request_firmware(&fw_file,
+ AV8100_FW_FILENAME,
+ adev->dev)) {
+ dev_err(adev->dev, "fw request failed\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err2;
+ }
+
+ /* Master clock timing, running */
+ retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW,
+ AV8100_STANDBY_STBY_HIGH, pdata->mclk_freq);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ goto av8100_download_firmware_err;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ /* Clock enable */
+ if (adev->params.inputclk &&
+ adev->params.inputclk_requested == false) {
+ if (clk_enable(adev->params.inputclk)) {
+ dev_err(adev->dev, "inputclk en failed\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ adev->params.inputclk_requested = true;
+ }
+
+ /* Request 100% APE OPP */
+ if (adev->params.opp_requested == false) {
+ if (prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name, 100)) {
+ dev_err(adev->dev, "APE OPP 100 failed\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+ if (prcmu_qos_add_requirement(PRCMU_QOS_DDR_OPP,
+ (char *)adev->miscdev.name, 100)) {
+ dev_err(adev->dev, "DDR OPP 100 failed\n");
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name);
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ adev->params.opp_requested = true;
+ }
+
+ msleep(AV8100_WAITTIME_10MS);
+
+ /* Prepare firmware data */
+ fw_bytes = fw_file->size;
+ fw_buff = (u8 *)fw_file->data;
+ dev_dbg(adev->dev, "fw size:%d\n", fw_bytes);
+
+ i2c = adev->config.client;
+
+ /* Enable firmware download */
+ retval = av8100_reg_gen_ctrl_w(
+ AV8100_GENERAL_CONTROL_FDL_HIGH,
+ AV8100_GENERAL_CONTROL_HLD_HIGH,
+ AV8100_GENERAL_CONTROL_WA_LOW,
+ AV8100_GENERAL_CONTROL_RA_LOW);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ retval = av8100_reg_gen_ctrl_r(&fdl, &hld, &wa, &ra);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to read the value from av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ } else {
+ dev_dbg(adev->dev, "GENERAL_CONTROL_REG register fdl:%d "
+ "hld:%d wa:%d ra:%d\n", fdl, hld, wa, ra);
+ }
+
+ LOCK_AV8100_HW;
+
+ temp = fw_bytes % increment;
+ for (size = 0; size < (fw_bytes-temp); size = size + increment,
+ index += increment) {
+ if (if_type == I2C_INTERFACE) {
+ retval = write_multi_byte(i2c,
+ AV8100_FIRMWARE_DOWNLOAD_ENTRY, fw_buff + size,
+ increment);
+ if (retval) {
+ dev_dbg(adev->dev, "Failed to download the "
+ "av8100 firmware\n");
+ UNLOCK_AV8100_HW;
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+ } else if (if_type == DSI_INTERFACE) {
+ dev_dbg(adev->dev,
+ "DSI_INTERFACE is currently not supported\n");
+ UNLOCK_AV8100_HW;
+ retval = -EINVAL;
+ goto av8100_download_firmware_err;
+ } else {
+ UNLOCK_AV8100_HW;
+ retval = -EINVAL;
+ goto av8100_download_firmware_err;
+ }
+ }
+
+ /* Transfer last firmware bytes */
+ if (if_type == I2C_INTERFACE) {
+ retval = write_multi_byte(i2c,
+ AV8100_FIRMWARE_DOWNLOAD_ENTRY, fw_buff + size, temp);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to download the av8100 firmware\n");
+ UNLOCK_AV8100_HW;
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+ } else if (if_type == DSI_INTERFACE) {
+ /* TODO: Add support for DSI firmware download */
+ UNLOCK_AV8100_HW;
+ retval = -EINVAL;
+ goto av8100_download_firmware_err;
+ } else {
+ UNLOCK_AV8100_HW;
+ retval = -EINVAL;
+ goto av8100_download_firmware_err;
+ }
+
+ /* check transfer*/
+ for (size = 0; size < fw_bytes; size++)
+ checksum = checksum ^ fw_buff[size];
+
+ UNLOCK_AV8100_HW;
+
+ retval = av8100_reg_fw_dl_entry_r(&val);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to read the value from the av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ dev_dbg(adev->dev, "checksum:%x,val:%x\n", checksum, val);
+
+ if (checksum != val) {
+ dev_dbg(adev->dev,
+ ">Fw downloading.... FAIL checksum issue\n");
+ dev_dbg(adev->dev, "checksum = %d\n", checksum);
+ dev_dbg(adev->dev, "checksum read: %d\n", val);
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ } else {
+ dev_dbg(adev->dev, ">Fw downloading.... success\n");
+ }
+
+ /* Set to idle mode */
+ av8100_reg_gen_ctrl_w(AV8100_GENERAL_CONTROL_FDL_LOW,
+ AV8100_GENERAL_CONTROL_HLD_LOW, AV8100_GENERAL_CONTROL_WA_LOW,
+ AV8100_GENERAL_CONTROL_RA_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to the av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ /* Wait Internal Micro controler ready */
+ cnt = 0;
+ cnt_max = sizeof(waittime_retry) / sizeof(waittime_retry[0]);
+ retval = av8100_reg_gen_status_r(NULL, NULL, NULL, &uc,
+ NULL, NULL);
+ while ((retval == 0) && (uc != 0x1) && (cnt < cnt_max)) {
+ mdelay(waittime_retry[cnt]);
+ retval = av8100_reg_gen_status_r(NULL, NULL, NULL,
+ &uc, NULL, NULL);
+ cnt++;
+ }
+ dev_dbg(adev->dev, "av8100 fwdl cnt:%d\n", cnt);
+
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to read the value from the av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ if (uc != 0x1)
+ dev_dbg(adev->dev, "UC is not ready\n");
+
+ release_firmware(fw_file);
+
+ if (adev->chip_version != 1) {
+ char *cut_str;
+
+ /* Get cut version */
+ retval = read_single_byte(i2c, AV8100_CUTVER_OFFSET, &val);
+ if (retval) {
+ dev_err(adev->dev, "Read cut ver failed\n");
+ return retval;
+ }
+
+ switch (val) {
+ case 0x00:
+ cut_str = CUT_STR_0;
+ break;
+ case 0x01:
+ cut_str = CUT_STR_1;
+ break;
+ case 0x03:
+ cut_str = CUT_STR_3;
+ break;
+ case 0x30:
+ cut_str = CUT_STR_30;
+ break;
+ default:
+ cut_str = CUT_STR_UNKNOWN;
+ break;
+ }
+ dev_dbg(adev->dev, "Cut ver %d %s\n", val, cut_str);
+ }
+
+ av8100_set_state(adev, AV8100_OPMODE_IDLE);
+
+ UNLOCK_AV8100_FWDL;
+ return 0;
+
+av8100_download_firmware_err:
+ release_firmware(fw_file);
+
+ /* Remove APE OPP requirement */
+ if (adev->params.opp_requested) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name);
+ prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP,
+ (char *)adev->miscdev.name);
+ adev->params.opp_requested = false;
+ }
+
+ /* Clock disable */
+ if (adev->params.inputclk && adev->params.inputclk_requested) {
+ clk_disable(adev->params.inputclk);
+ adev->params.inputclk_requested = false;
+ }
+
+av8100_download_firmware_err2:
+ UNLOCK_AV8100_FWDL;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_download_firmware);
+
+int av8100_disable_interrupt(void)
+{
+ int retval;
+ u8 hpdm = 0;
+ u8 cpdm = 0;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ if (!adev->params.ints_enabled)
+ return 0;
+
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ retval = av8100_reg_gen_int_mask_w(
+ AV8100_GENERAL_INTERRUPT_MASK_EOCM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_VSIM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_VSOM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_CECM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_HDCPM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_UOVBM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_TEM_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ hpdm = adev->params.hpdm;
+ cpdm = adev->params.cpdm;
+
+ retval = av8100_reg_stby_int_mask_w(
+ AV8100_STANDBY_INTERRUPT_MASK_HPDM_LOW,
+ AV8100_STANDBY_INTERRUPT_MASK_CPDM_LOW,
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT,
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ adev->params.hpdm = hpdm;
+ adev->params.cpdm = cpdm;
+ adev->params.ints_enabled = false;
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_disable_interrupt);
+
+int av8100_enable_interrupt(void)
+{
+ int retval;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ if (adev->params.ints_enabled)
+ return 0;
+
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ retval = av8100_reg_gen_int_mask_w(
+ AV8100_GENERAL_INTERRUPT_MASK_EOCM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_VSIM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_VSOM_LOW,
+ adev->params.cecm,
+ adev->params.hdcpm,
+ adev->params.uovbm,
+ AV8100_GENERAL_INTERRUPT_MASK_TEM_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ retval = av8100_reg_stby_int_mask_w(
+ adev->params.hpdm,
+ adev->params.cpdm,
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT,
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ adev->params.ints_enabled = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_enable_interrupt);
+
+int av8100_reg_stby_w(
+ u8 cpd, u8 stby, u8 mclkrng)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_STANDBY_CPD(cpd) | AV8100_STANDBY_STBY(stby) |
+ AV8100_STANDBY_MCLKRNG(mclkrng);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_STANDBY, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_w);
+
+static int av8100_5V_w(u8 denc_off, u8 hdmi_off, u8 on)
+{
+ u8 val;
+ int retval;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value.
+ * chip_version == 1 have one common off time
+ * chip_version > 1 support different off time for hdmi and tvout. */
+ if (adev->chip_version == 1)
+ val = AV8100_HDMI_5_VOLT_TIME_OFF_TIME(hdmi_off) |
+ AV8100_HDMI_5_VOLT_TIME_ON_TIME(on);
+ else
+ val = AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME(denc_off) |
+ AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME(hdmi_off) |
+ AV8100_HDMI_5_VOLT_TIME_ON_TIME(on);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_HDMI_5_VOLT_TIME, val);
+
+ UNLOCK_AV8100_HW;
+
+ return retval;
+}
+
+int av8100_reg_hdmi_5_volt_time_w(u8 denc_off, u8 hdmi_off, u8 on)
+{
+ int retval;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ retval = av8100_5V_w(denc_off, hdmi_off, on);
+
+ /* Set vars */
+ if (adev->chip_version > 1)
+ adev->params.denc_off_time = denc_off;
+
+ adev->params.hdmi_off_time = hdmi_off;
+ if (on)
+ adev->params.on_time = on;
+
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_hdmi_5_volt_time_w);
+
+int av8100_reg_stby_int_mask_w(
+ u8 hpdm, u8 cpdm, u8 stbygpiocfg, u8 ipol)
+{
+ int retval;
+ u8 val;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_STANDBY_INTERRUPT_MASK_HPDM(hpdm) |
+ AV8100_STANDBY_INTERRUPT_MASK_CPDM(cpdm) |
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG(stbygpiocfg) |
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL(ipol);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_STANDBY_INTERRUPT_MASK, val);
+
+ adev->params.hpdm = hpdm;
+ adev->params.cpdm = cpdm;
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_int_mask_w);
+
+int av8100_reg_stby_pend_int_w(
+ u8 hpdi, u8 cpdi, u8 oni, u8 bpdig)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_STANDBY_PENDING_INTERRUPT_HPDI(hpdi) |
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI(cpdi) |
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI(oni) |
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG(bpdig);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_STANDBY_PENDING_INTERRUPT, val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_pend_int_w);
+
+int av8100_reg_gen_int_mask_w(
+ u8 eocm, u8 vsim, u8 vsom, u8 cecm, u8 hdcpm, u8 uovbm, u8 tem)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_GENERAL_INTERRUPT_MASK_EOCM(eocm) |
+ AV8100_GENERAL_INTERRUPT_MASK_VSIM(vsim) |
+ AV8100_GENERAL_INTERRUPT_MASK_VSOM(vsom) |
+ AV8100_GENERAL_INTERRUPT_MASK_CECM(cecm) |
+ AV8100_GENERAL_INTERRUPT_MASK_HDCPM(hdcpm) |
+ AV8100_GENERAL_INTERRUPT_MASK_UOVBM(uovbm) |
+ AV8100_GENERAL_INTERRUPT_MASK_TEM(tem);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_GENERAL_INTERRUPT_MASK, val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_int_mask_w);
+
+int av8100_reg_gen_int_w(
+ u8 eoci, u8 vsii, u8 vsoi, u8 ceci, u8 hdcpi, u8 uovbi)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_GENERAL_INTERRUPT_EOCI(eoci) |
+ AV8100_GENERAL_INTERRUPT_VSII(vsii) |
+ AV8100_GENERAL_INTERRUPT_VSOI(vsoi) |
+ AV8100_GENERAL_INTERRUPT_CECI(ceci) |
+ AV8100_GENERAL_INTERRUPT_HDCPI(hdcpi) |
+ AV8100_GENERAL_INTERRUPT_UOVBI(uovbi);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_GENERAL_INTERRUPT, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_int_w);
+
+int av8100_reg_gpio_conf_w(
+ u8 dat3dir, u8 dat3val, u8 dat2dir, u8 dat2val, u8 dat1dir,
+ u8 dat1val, u8 ucdbg)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_GPIO_CONFIGURATION_DAT3DIR(dat3dir) |
+ AV8100_GPIO_CONFIGURATION_DAT3VAL(dat3val) |
+ AV8100_GPIO_CONFIGURATION_DAT2DIR(dat2dir) |
+ AV8100_GPIO_CONFIGURATION_DAT2VAL(dat2val) |
+ AV8100_GPIO_CONFIGURATION_DAT1DIR(dat1dir) |
+ AV8100_GPIO_CONFIGURATION_DAT1VAL(dat1val) |
+ AV8100_GPIO_CONFIGURATION_UCDBG(ucdbg);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_GPIO_CONFIGURATION, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gpio_conf_w);
+
+int av8100_reg_gen_ctrl_w(
+ u8 fdl, u8 hld, u8 wa, u8 ra)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_GENERAL_CONTROL_FDL(fdl) |
+ AV8100_GENERAL_CONTROL_HLD(hld) |
+ AV8100_GENERAL_CONTROL_WA(wa) |
+ AV8100_GENERAL_CONTROL_RA(ra);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_GENERAL_CONTROL, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_ctrl_w);
+
+int av8100_reg_fw_dl_entry_w(
+ u8 mbyte_code_entry)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY(
+ mbyte_code_entry);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_FIRMWARE_DOWNLOAD_ENTRY, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_fw_dl_entry_w);
+
+int av8100_reg_w(
+ u8 offset, u8 value)
+{
+ int retval = 0;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ i2c = adev->config.client;
+
+ /* Write to register */
+ retval = write_single_byte(i2c, offset, value);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ UNLOCK_AV8100_HW;
+ return -EFAULT;
+ }
+
+ UNLOCK_AV8100_HW;
+ return 0;
+}
+EXPORT_SYMBOL(av8100_reg_w);
+
+int av8100_reg_stby_r(
+ u8 *cpd, u8 *stby, u8 *hpds, u8 *cpds, u8 *mclkrng)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_STANDBY, &val);
+
+ /* Set return params */
+ if (cpd)
+ *cpd = AV8100_STANDBY_CPD_GET(val);
+ if (stby)
+ *stby = AV8100_STANDBY_STBY_GET(val);
+ if (hpds)
+ *hpds = AV8100_STANDBY_HPDS_GET(val);
+ if (cpds)
+ *cpds = AV8100_STANDBY_CPDS_GET(val);
+ if (mclkrng)
+ *mclkrng = AV8100_STANDBY_MCLKRNG_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_r);
+
+int av8100_reg_hdmi_5_volt_time_r(
+ u8 *denc_off_time, u8 *hdmi_off_time, u8 *on_time)
+{
+ int retval;
+ u8 val;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_HDMI_5_VOLT_TIME, &val);
+
+ /* Set return params */
+ if (adev->chip_version == 1) {
+ if (denc_off_time)
+ *denc_off_time = 0;
+ if (hdmi_off_time)
+ *hdmi_off_time =
+ AV8100_HDMI_5_VOLT_TIME_OFF_TIME_GET(val);
+ } else {
+ if (denc_off_time)
+ *denc_off_time =
+ AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_GET(val);
+ if (hdmi_off_time)
+ *hdmi_off_time =
+ AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_GET(val);
+ }
+
+ if (on_time)
+ *on_time = AV8100_HDMI_5_VOLT_TIME_ON_TIME_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_hdmi_5_volt_time_r);
+
+int av8100_reg_stby_int_mask_r(
+ u8 *hpdm, u8 *cpdm, u8 *stbygpiocfg, u8 *ipol)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_STANDBY_INTERRUPT_MASK, &val);
+
+ /* Set return params */
+ if (hpdm)
+ *hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_GET(val);
+ if (cpdm)
+ *cpdm = AV8100_STANDBY_INTERRUPT_MASK_CPDM_GET(val);
+ if (stbygpiocfg)
+ *stbygpiocfg =
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_GET(val);
+ if (ipol)
+ *ipol = AV8100_STANDBY_INTERRUPT_MASK_IPOL_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_int_mask_r);
+
+int av8100_reg_stby_pend_int_r(
+ u8 *hpdi, u8 *cpdi, u8 *oni, u8 *sid)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_STANDBY_PENDING_INTERRUPT,
+ &val);
+
+ /* Set return params */
+ if (hpdi)
+ *hpdi = AV8100_STANDBY_PENDING_INTERRUPT_HPDI_GET(val);
+ if (cpdi)
+ *cpdi = AV8100_STANDBY_PENDING_INTERRUPT_CPDI_GET(val);
+ if (oni)
+ *oni = AV8100_STANDBY_PENDING_INTERRUPT_ONI_GET(val);
+ if (sid)
+ *sid = AV8100_STANDBY_PENDING_INTERRUPT_SID_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_pend_int_r);
+
+int av8100_reg_gen_int_mask_r(
+ u8 *eocm,
+ u8 *vsim,
+ u8 *vsom,
+ u8 *cecm,
+ u8 *hdcpm,
+ u8 *uovbm,
+ u8 *tem)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GENERAL_INTERRUPT_MASK, &val);
+
+ /* Set return params */
+ if (eocm)
+ *eocm = AV8100_GENERAL_INTERRUPT_MASK_EOCM_GET(val);
+ if (vsim)
+ *vsim = AV8100_GENERAL_INTERRUPT_MASK_VSIM_GET(val);
+ if (vsom)
+ *vsom = AV8100_GENERAL_INTERRUPT_MASK_VSOM_GET(val);
+ if (cecm)
+ *cecm = AV8100_GENERAL_INTERRUPT_MASK_CECM_GET(val);
+ if (hdcpm)
+ *hdcpm = AV8100_GENERAL_INTERRUPT_MASK_HDCPM_GET(val);
+ if (uovbm)
+ *uovbm = AV8100_GENERAL_INTERRUPT_MASK_UOVBM_GET(val);
+ if (tem)
+ *tem = AV8100_GENERAL_INTERRUPT_MASK_TEM_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_int_mask_r);
+
+int av8100_reg_gen_int_r(
+ u8 *eoci,
+ u8 *vsii,
+ u8 *vsoi,
+ u8 *ceci,
+ u8 *hdcpi,
+ u8 *uovbi,
+ u8 *tei)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GENERAL_INTERRUPT, &val);
+
+ /* Set return params */
+ if (eoci)
+ *eoci = AV8100_GENERAL_INTERRUPT_EOCI_GET(val);
+ if (vsii)
+ *vsii = AV8100_GENERAL_INTERRUPT_VSII_GET(val);
+ if (vsoi)
+ *vsoi = AV8100_GENERAL_INTERRUPT_VSOI_GET(val);
+ if (ceci)
+ *ceci = AV8100_GENERAL_INTERRUPT_CECI_GET(val);
+ if (hdcpi)
+ *hdcpi = AV8100_GENERAL_INTERRUPT_HDCPI_GET(val);
+ if (uovbi)
+ *uovbi = AV8100_GENERAL_INTERRUPT_UOVBI_GET(val);
+ if (tei)
+ *tei = AV8100_GENERAL_INTERRUPT_TEI_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_int_r);
+
+int av8100_reg_gen_status_r(
+ u8 *cectxerr,
+ u8 *cecrec,
+ u8 *cectrx,
+ u8 *uc,
+ u8 *onuvb,
+ u8 *hdcps)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GENERAL_STATUS, &val);
+
+ /* Set return params */
+ if (cectxerr)
+ *cectxerr = AV8100_GENERAL_STATUS_CECTXERR_GET(val);
+ if (cecrec)
+ *cecrec = AV8100_GENERAL_STATUS_CECREC_GET(val);
+ if (cectrx)
+ *cectrx = AV8100_GENERAL_STATUS_CECTRX_GET(val);
+ if (uc)
+ *uc = AV8100_GENERAL_STATUS_UC_GET(val);
+ if (onuvb)
+ *onuvb = AV8100_GENERAL_STATUS_ONUVB_GET(val);
+ if (hdcps)
+ *hdcps = AV8100_GENERAL_STATUS_HDCPS_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_status_r);
+
+int av8100_reg_gpio_conf_r(
+ u8 *dat3dir,
+ u8 *dat3val,
+ u8 *dat2dir,
+ u8 *dat2val,
+ u8 *dat1dir,
+ u8 *dat1val,
+ u8 *ucdbg)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GPIO_CONFIGURATION, &val);
+
+ /* Set return params */
+ if (dat3dir)
+ *dat3dir = AV8100_GPIO_CONFIGURATION_DAT3DIR_GET(val);
+ if (dat3val)
+ *dat3val = AV8100_GPIO_CONFIGURATION_DAT3VAL_GET(val);
+ if (dat2dir)
+ *dat2dir = AV8100_GPIO_CONFIGURATION_DAT2DIR_GET(val);
+ if (dat2val)
+ *dat2val = AV8100_GPIO_CONFIGURATION_DAT2VAL_GET(val);
+ if (dat1dir)
+ *dat1dir = AV8100_GPIO_CONFIGURATION_DAT1DIR_GET(val);
+ if (dat1val)
+ *dat1val = AV8100_GPIO_CONFIGURATION_DAT1VAL_GET(val);
+ if (ucdbg)
+ *ucdbg = AV8100_GPIO_CONFIGURATION_UCDBG_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gpio_conf_r);
+
+int av8100_reg_gen_ctrl_r(
+ u8 *fdl,
+ u8 *hld,
+ u8 *wa,
+ u8 *ra)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GENERAL_CONTROL, &val);
+ /* Set return params */
+ if (fdl)
+ *fdl = AV8100_GENERAL_CONTROL_FDL_GET(val);
+ if (hld)
+ *hld = AV8100_GENERAL_CONTROL_HLD_GET(val);
+ if (wa)
+ *wa = AV8100_GENERAL_CONTROL_WA_GET(val);
+ if (ra)
+ *ra = AV8100_GENERAL_CONTROL_RA_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_ctrl_r);
+
+int av8100_reg_fw_dl_entry_r(
+ u8 *mbyte_code_entry)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_FIRMWARE_DOWNLOAD_ENTRY, &val);
+
+ /* Set return params */
+ if (mbyte_code_entry)
+ *mbyte_code_entry =
+ AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_fw_dl_entry_r);
+
+int av8100_reg_r(
+ u8 offset,
+ u8 *value)
+{
+ int retval = 0;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ i2c = adev->config.client;
+
+ /* Read from register */
+ retval = read_single_byte(i2c, offset, value);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to read the value from av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_register_read_out;
+ }
+
+av8100_register_read_out:
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_r);
+
+int av8100_conf_get(enum av8100_command_type command_type,
+ union av8100_configuration *config)
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state == AV8100_OPMODE_UNDEFINED)
+ return -EINVAL;
+
+ /* Put configuration data to the corresponding data struct depending
+ * on command type */
+ switch (command_type) {
+ case AV8100_COMMAND_VIDEO_INPUT_FORMAT:
+ memcpy(&config->video_input_format,
+ &adev->config.hdmi_video_input_cmd,
+ sizeof(struct av8100_video_input_format_cmd));
+ break;
+
+ case AV8100_COMMAND_AUDIO_INPUT_FORMAT:
+ memcpy(&config->audio_input_format,
+ &adev->config.hdmi_audio_input_cmd,
+ sizeof(struct av8100_audio_input_format_cmd));
+ break;
+
+ case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT:
+ memcpy(&config->video_output_format,
+ &adev->config.hdmi_video_output_cmd,
+ sizeof(struct av8100_video_output_format_cmd));
+ break;
+
+ case AV8100_COMMAND_VIDEO_SCALING_FORMAT:
+ memcpy(&config->video_scaling_format,
+ &adev->config.hdmi_video_scaling_cmd,
+ sizeof(struct av8100_video_scaling_format_cmd));
+ break;
+
+ case AV8100_COMMAND_COLORSPACECONVERSION:
+ config->color_transform = adev->config.color_transform;
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_WRITE:
+ memcpy(&config->cec_message_write_format,
+ &adev->config.hdmi_cec_message_write_cmd,
+ sizeof(struct av8100_cec_message_write_format_cmd));
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_READ_BACK:
+ memcpy(&config->cec_message_read_back_format,
+ &adev->config.hdmi_cec_message_read_back_cmd,
+ sizeof(struct av8100_cec_message_read_back_format_cmd));
+ break;
+
+ case AV8100_COMMAND_DENC:
+ memcpy(&config->denc_format, &adev->config.hdmi_denc_cmd,
+ sizeof(struct av8100_denc_format_cmd));
+ break;
+
+ case AV8100_COMMAND_HDMI:
+ memcpy(&config->hdmi_format, &adev->config.hdmi_cmd,
+ sizeof(struct av8100_hdmi_cmd));
+ break;
+
+ case AV8100_COMMAND_HDCP_SENDKEY:
+ memcpy(&config->hdcp_send_key_format,
+ &adev->config.hdmi_hdcp_send_key_cmd,
+ sizeof(struct av8100_hdcp_send_key_format_cmd));
+ break;
+
+ case AV8100_COMMAND_HDCP_MANAGEMENT:
+ memcpy(&config->hdcp_management_format,
+ &adev->config.hdmi_hdcp_management_format_cmd,
+ sizeof(struct av8100_hdcp_management_format_cmd));
+ break;
+
+ case AV8100_COMMAND_INFOFRAMES:
+ memcpy(&config->infoframes_format,
+ &adev->config.hdmi_infoframes_cmd,
+ sizeof(struct av8100_infoframes_format_cmd));
+ break;
+
+ case AV8100_COMMAND_EDID_SECTION_READBACK:
+ memcpy(&config->edid_section_readback_format,
+ &adev->config.hdmi_edid_section_readback_cmd,
+ sizeof(struct
+ av8100_edid_section_readback_format_cmd));
+ break;
+
+ case AV8100_COMMAND_PATTERNGENERATOR:
+ memcpy(&config->pattern_generator_format,
+ &adev->config.hdmi_pattern_generator_cmd,
+ sizeof(struct av8100_pattern_generator_format_cmd));
+ break;
+
+ case AV8100_COMMAND_FUSE_AES_KEY:
+ memcpy(&config->fuse_aes_key_format,
+ &adev->config.hdmi_fuse_aes_key_cmd,
+ sizeof(struct av8100_fuse_aes_key_format_cmd));
+ break;
+
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_conf_get);
+
+int av8100_conf_prep(enum av8100_command_type command_type,
+ union av8100_configuration *config)
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!config || !adev)
+ return -EINVAL;
+
+ /* Put configuration data to the corresponding data struct depending
+ * on command type */
+ switch (command_type) {
+ case AV8100_COMMAND_VIDEO_INPUT_FORMAT:
+ memcpy(&adev->config.hdmi_video_input_cmd,
+ &config->video_input_format,
+ sizeof(struct av8100_video_input_format_cmd));
+ break;
+
+ case AV8100_COMMAND_AUDIO_INPUT_FORMAT:
+ memcpy(&adev->config.hdmi_audio_input_cmd,
+ &config->audio_input_format,
+ sizeof(struct av8100_audio_input_format_cmd));
+ break;
+
+ case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT:
+ memcpy(&adev->config.hdmi_video_output_cmd,
+ &config->video_output_format,
+ sizeof(struct av8100_video_output_format_cmd));
+
+ /* Set params that depend on video output */
+ av8100_config_video_output_dep(adev->config.
+ hdmi_video_output_cmd.video_output_cea_vesa);
+ break;
+
+ case AV8100_COMMAND_VIDEO_SCALING_FORMAT:
+ memcpy(&adev->config.hdmi_video_scaling_cmd,
+ &config->video_scaling_format,
+ sizeof(struct av8100_video_scaling_format_cmd));
+ break;
+
+ case AV8100_COMMAND_COLORSPACECONVERSION:
+ adev->config.color_transform = config->color_transform;
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_WRITE:
+ memcpy(&adev->config.hdmi_cec_message_write_cmd,
+ &config->cec_message_write_format,
+ sizeof(struct av8100_cec_message_write_format_cmd));
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_READ_BACK:
+ memcpy(&adev->config.hdmi_cec_message_read_back_cmd,
+ &config->cec_message_read_back_format,
+ sizeof(struct av8100_cec_message_read_back_format_cmd));
+ break;
+
+ case AV8100_COMMAND_DENC:
+ memcpy(&adev->config.hdmi_denc_cmd, &config->denc_format,
+ sizeof(struct av8100_denc_format_cmd));
+ break;
+
+ case AV8100_COMMAND_HDMI:
+ memcpy(&adev->config.hdmi_cmd, &config->hdmi_format,
+ sizeof(struct av8100_hdmi_cmd));
+ break;
+
+ case AV8100_COMMAND_HDCP_SENDKEY:
+ memcpy(&adev->config.hdmi_hdcp_send_key_cmd,
+ &config->hdcp_send_key_format,
+ sizeof(struct av8100_hdcp_send_key_format_cmd));
+ break;
+
+ case AV8100_COMMAND_HDCP_MANAGEMENT:
+ memcpy(&adev->config.hdmi_hdcp_management_format_cmd,
+ &config->hdcp_management_format,
+ sizeof(struct av8100_hdcp_management_format_cmd));
+ break;
+
+ case AV8100_COMMAND_INFOFRAMES:
+ memcpy(&adev->config.hdmi_infoframes_cmd,
+ &config->infoframes_format,
+ sizeof(struct av8100_infoframes_format_cmd));
+ break;
+
+ case AV8100_COMMAND_EDID_SECTION_READBACK:
+ memcpy(&adev->config.hdmi_edid_section_readback_cmd,
+ &config->edid_section_readback_format,
+ sizeof(struct
+ av8100_edid_section_readback_format_cmd));
+ break;
+
+ case AV8100_COMMAND_PATTERNGENERATOR:
+ memcpy(&adev->config.hdmi_pattern_generator_cmd,
+ &config->pattern_generator_format,
+ sizeof(struct av8100_pattern_generator_format_cmd));
+ break;
+
+ case AV8100_COMMAND_FUSE_AES_KEY:
+ memcpy(&adev->config.hdmi_fuse_aes_key_cmd,
+ &config->fuse_aes_key_format,
+ sizeof(struct av8100_fuse_aes_key_format_cmd));
+ break;
+
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_conf_prep);
+
+int av8100_conf_w(enum av8100_command_type command_type,
+ u8 *return_buffer_length,
+ u8 *return_buffer, enum interface_type if_type)
+{
+ int retval = 0;
+ u8 cmd_buffer[AV8100_COMMAND_MAX_LENGTH];
+ u32 cmd_length = 0;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ if (return_buffer_length)
+ *return_buffer_length = 0;
+
+ i2c = adev->config.client;
+
+ memset(&cmd_buffer, 0x00, AV8100_COMMAND_MAX_LENGTH);
+
+#define PRNK_MODE(_m) dev_dbg(adev->dev, "cmd: " #_m "\n");
+
+ /* Fill the command buffer with configuration data */
+ switch (command_type) {
+ case AV8100_COMMAND_VIDEO_INPUT_FORMAT:
+ PRNK_MODE(AV8100_COMMAND_VIDEO_INPUT_FORMAT);
+ configuration_video_input_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_AUDIO_INPUT_FORMAT:
+ PRNK_MODE(AV8100_COMMAND_AUDIO_INPUT_FORMAT);
+ configuration_audio_input_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT:
+ PRNK_MODE(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT);
+ configuration_video_output_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_VIDEO_SCALING_FORMAT:
+ PRNK_MODE(AV8100_COMMAND_VIDEO_SCALING_FORMAT);
+ configuration_video_scaling_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_COLORSPACECONVERSION:
+ PRNK_MODE(AV8100_COMMAND_COLORSPACECONVERSION);
+ configuration_colorspace_conversion_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_WRITE:
+ PRNK_MODE(AV8100_COMMAND_CEC_MESSAGE_WRITE);
+ configuration_cec_message_write_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_READ_BACK:
+ PRNK_MODE(AV8100_COMMAND_CEC_MESSAGE_READ_BACK);
+ configuration_cec_message_read_get(cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_DENC:
+ PRNK_MODE(AV8100_COMMAND_DENC);
+ configuration_denc_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_HDMI:
+ PRNK_MODE(AV8100_COMMAND_HDMI);
+ configuration_hdmi_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_HDCP_SENDKEY:
+ PRNK_MODE(AV8100_COMMAND_HDCP_SENDKEY);
+ configuration_hdcp_sendkey_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_HDCP_MANAGEMENT:
+ PRNK_MODE(AV8100_COMMAND_HDCP_MANAGEMENT);
+ configuration_hdcp_management_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_INFOFRAMES:
+ PRNK_MODE(AV8100_COMMAND_INFOFRAMES);
+ configuration_infoframe_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_EDID_SECTION_READBACK:
+ PRNK_MODE(AV8100_COMMAND_EDID_SECTION_READBACK);
+ av8100_edid_section_readback_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_PATTERNGENERATOR:
+ PRNK_MODE(AV8100_COMMAND_PATTERNGENERATOR);
+ configuration_pattern_generator_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_FUSE_AES_KEY:
+ PRNK_MODE(AV8100_COMMAND_FUSE_AES_KEY);
+ configuration_fuse_aes_key_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ default:
+ dev_dbg(adev->dev, "Invalid command type\n");
+ retval = -EFAULT;
+ break;
+ }
+
+ LOCK_AV8100_HW;
+
+ if (if_type == I2C_INTERFACE) {
+ int cnt = 0;
+ int cnt_max;
+
+ dev_dbg(adev->dev, "av8100_conf_w cmd_type:%02x length:%02x ",
+ command_type, cmd_length);
+ dev_dbg(adev->dev, "buffer: ");
+ while (cnt < cmd_length) {
+ dev_dbg(adev->dev, "%02x ", cmd_buffer[cnt]);
+ cnt++;
+ }
+
+ /* Write the command buffer */
+ retval = write_multi_byte(i2c,
+ AV8100_CMD_BUF_OFFSET, cmd_buffer, cmd_length);
+ if (retval) {
+ UNLOCK_AV8100_HW;
+ return retval;
+ }
+
+ /* Write the command */
+ retval = write_single_byte(i2c, AV8100_COMMAND_OFFSET,
+ command_type);
+ if (retval) {
+ UNLOCK_AV8100_HW;
+ return retval;
+ }
+
+
+ /* Get the first return byte */
+ mdelay(AV8100_WAITTIME_1MS);
+ cnt = 0;
+ cnt_max = sizeof(waittime_retry) / sizeof(waittime_retry[0]);
+ retval = get_command_return_first(i2c, command_type);
+ while (retval && (cnt < cnt_max)) {
+ mdelay(waittime_retry[cnt]);
+ retval = get_command_return_first(i2c, command_type);
+ cnt++;
+ }
+ dev_dbg(adev->dev, "first return cnt:%d\n", cnt);
+
+ if (retval) {
+ UNLOCK_AV8100_HW;
+ return retval;
+ }
+
+ retval = get_command_return_data(i2c, command_type, cmd_buffer,
+ return_buffer_length, return_buffer);
+ } else if (if_type == DSI_INTERFACE) {
+ /* TODO */
+ } else {
+ retval = -EINVAL;
+ dev_dbg(adev->dev, "Invalid command type\n");
+ }
+
+ if (command_type == AV8100_COMMAND_HDMI) {
+ adev->status.hdmi_on = ((adev->config.hdmi_cmd.
+ hdmi_mode == AV8100_HDMI_ON) &&
+ (adev->config.hdmi_cmd.hdmi_format == AV8100_HDMI));
+ }
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_conf_w);
+
+int av8100_conf_w_raw(enum av8100_command_type command_type,
+ u8 buffer_length,
+ u8 *buffer,
+ u8 *return_buffer_length,
+ u8 *return_buffer)
+{
+ int retval = 0;
+ struct i2c_client *i2c;
+ int cnt;
+ int cnt_max;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ if (return_buffer_length)
+ *return_buffer_length = 0;
+
+ i2c = adev->config.client;
+
+ /* Write the command buffer */
+ retval = write_multi_byte(i2c,
+ AV8100_CMD_BUF_OFFSET, buffer, buffer_length);
+ if (retval)
+ goto av8100_conf_w_raw_out;
+
+ /* Write the command */
+ retval = write_single_byte(i2c, AV8100_COMMAND_OFFSET,
+ command_type);
+ if (retval)
+ goto av8100_conf_w_raw_out;
+
+
+ /* Get the first return byte */
+ mdelay(AV8100_WAITTIME_1MS);
+ cnt = 0;
+ cnt_max = sizeof(waittime_retry) / sizeof(waittime_retry[0]);
+ retval = get_command_return_first(i2c, command_type);
+ while (retval && (cnt < cnt_max)) {
+ mdelay(waittime_retry[cnt]);
+ retval = get_command_return_first(i2c, command_type);
+ cnt++;
+ }
+ dev_dbg(adev->dev, "first return cnt:%d\n", cnt);
+ if (retval)
+ goto av8100_conf_w_raw_out;
+
+ retval = get_command_return_data(i2c, command_type, buffer,
+ return_buffer_length, return_buffer);
+
+av8100_conf_w_raw_out:
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_conf_w_raw);
+
+struct av8100_status av8100_status_get(void)
+{
+ struct av8100_status status = {0};
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (adev)
+ return adev->status;
+ else
+ return status;
+}
+EXPORT_SYMBOL(av8100_status_get);
+
+enum av8100_output_CEA_VESA av8100_video_output_format_get(int xres,
+ int yres,
+ int htot,
+ int vtot,
+ int pixelclk,
+ bool interlaced)
+{
+ enum av8100_output_CEA_VESA index = 1;
+ int yres_div = !interlaced ? 1 : 2;
+ int hres_div = 1;
+ long freq1;
+ long freq2;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ /*
+ * 720_576_I need a divider for hact and htot since
+ * these params need to be twice as large as expected in av8100_all_cea,
+ * which is used as input parameter to video input config.
+ */
+ if ((xres == 720) && (yres == 576) && (interlaced == true))
+ hres_div = 2;
+
+ freq1 = 1000000 / htot * 1000000 / vtot / pixelclk + 1;
+ while (index < sizeof(av8100_all_cea)/sizeof(struct av8100_cea)) {
+ freq2 = av8100_all_cea[index].frequence /
+ av8100_all_cea[index].htotale /
+ av8100_all_cea[index].vtotale;
+
+ dev_dbg(adev->dev, "freq1:%ld freq2:%ld\n", freq1, freq2);
+ if ((xres == av8100_all_cea[index].hactive / hres_div) &&
+ (yres == av8100_all_cea[index].vactive * yres_div) &&
+ (htot == av8100_all_cea[index].htotale / hres_div) &&
+ (vtot == av8100_all_cea[index].vtotale) &&
+ (abs(freq1 - freq2) < 2)) {
+ goto av8100_video_output_format_get_out;
+ }
+ index++;
+ }
+
+av8100_video_output_format_get_out:
+ dev_dbg(adev->dev, "av8100_video_output_format_get %d %d %d %d %d\n",
+ xres, yres, htot, vtot, index);
+ return index;
+}
+EXPORT_SYMBOL(av8100_video_output_format_get);
+
+void av8100_hdmi_event_cb_set(void (*hdmi_ev_cb)(enum av8100_hdmi_event))
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (adev)
+ adev->params.hdmi_ev_cb = hdmi_ev_cb;
+}
+EXPORT_SYMBOL(av8100_hdmi_event_cb_set);
+
+u8 av8100_ver_get(void)
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ return adev->chip_version;
+}
+EXPORT_SYMBOL(av8100_ver_get);
+
+static const struct color_conversion_cmd *get_color_transform_cmd(
+ struct av8100_device *adev,
+ enum av8100_color_transform transform)
+{
+ const struct color_conversion_cmd *result;
+
+ switch (transform) {
+ case AV8100_COLOR_TRANSFORM_INDENTITY:
+ result = &col_trans_identity;
+ break;
+ case AV8100_COLOR_TRANSFORM_INDENTITY_CLAMP_YUV:
+ result = &col_trans_identity_clamp_yuv;
+ break;
+ case AV8100_COLOR_TRANSFORM_YUV_TO_RGB:
+ if (adev->chip_version == AV8100_CHIPVER_1)
+ result = &col_trans_yuv_to_rgb_v1;
+ else
+ result = &col_trans_yuv_to_rgb_v2;
+ break;
+ case AV8100_COLOR_TRANSFORM_YUV_TO_DENC:
+ result = &col_trans_yuv_to_denc;
+ break;
+ case AV8100_COLOR_TRANSFORM_RGB_TO_DENC:
+ result = &col_trans_rgb_to_denc;
+ break;
+ default:
+ dev_warn(adev->dev, "Unknown color space transform\n");
+ result = &col_trans_identity;
+ break;
+ }
+ return result;
+}
+
+static int av8100_open(struct inode *inode, struct file *filp)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+static int av8100_release(struct inode *inode, struct file *filp)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+static long av8100_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return 0;
+}
+
+int av8100_device_register(struct av8100_device *adev)
+{
+ adev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ adev->miscdev.name = "av8100";
+ adev->miscdev.fops = &av8100_fops;
+
+ if (misc_register(&adev->miscdev)) {
+ pr_err("av8100 misc_register failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int av8100_init_device(struct av8100_device *adev, struct device *dev)
+{
+ adev->dev = dev;
+
+ if (av8100_config_init(adev)) {
+ dev_info(dev, "av8100_config_init failed\n");
+ return -EFAULT;
+ }
+
+ if (av8100_params_init(adev)) {
+ dev_info(dev, "av8100_params_init failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int __devinit av8100_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct av8100_platform_data *pdata = i2c_client->dev.platform_data;
+ struct device *dev;
+ struct av8100_device *adev;
+
+ dev = &i2c_client->dev;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* Allocate device data */
+ adev = kzalloc(sizeof(struct av8100_device), GFP_KERNEL);
+ if (!adev) {
+ dev_info(dev, "%s: Alloc failure\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Add to list */
+ list_add_tail(&adev->list, &av8100_device_list);
+
+ av8100_device_register(adev);
+
+ av8100_init_device(adev, dev);
+
+ av8100_set_state(adev, AV8100_OPMODE_UNDEFINED);
+
+ if (!i2c_check_functionality(i2c_client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ ret = -ENODEV;
+ dev_info(dev, "av8100 i2c_check_functionality failed\n");
+ goto err;
+ }
+
+ init_waitqueue_head(&adev->event);
+
+ adev->config.client = i2c_client;
+ adev->config.id = (struct i2c_device_id *) id;
+ i2c_set_clientdata(i2c_client, &adev->config);
+
+ kthread_run(av8100_thread, adev, "av8100_thread");
+
+ ret = request_irq(pdata->irq, av8100_intr_handler,
+ IRQF_TRIGGER_RISING, "av8100", adev);
+ if (ret) {
+ dev_err(dev, "av8100_hw request_irq %d failed %d\n",
+ pdata->irq, ret);
+ gpio_free(pdata->irq);
+ goto err;
+ }
+
+ /* Get regulator resource */
+ if (pdata->regulator_pwr_id) {
+ adev->params.regulator_pwr = regulator_get(dev,
+ pdata->regulator_pwr_id);
+ if (IS_ERR(adev->params.regulator_pwr)) {
+ ret = PTR_ERR(adev->params.regulator_pwr);
+ dev_warn(dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_pwr_id);
+ adev->params.regulator_pwr = NULL;
+ return ret;
+ }
+ }
+
+ /* Get clock resource */
+ if (pdata->inputclk_id) {
+ adev->params.inputclk = clk_get(NULL, pdata->inputclk_id);
+ if (IS_ERR(adev->params.inputclk)) {
+ adev->params.inputclk = NULL;
+ dev_warn(dev, "%s: Failed to get clock '%s'\n",
+ __func__, pdata->inputclk_id);
+ }
+ }
+
+ av8100_set_state(adev, AV8100_OPMODE_SHUTDOWN);
+
+ /* Obtain the chip version */
+ if (av8100_powerup1(adev)) {
+ dev_err(adev->dev, "av8100_powerup1 fail\n");
+ return -EFAULT;
+ }
+
+ ret = av8100_powerdown();
+
+err:
+ return ret;
+}
+
+static int __devexit av8100_remove(struct i2c_client *i2c_client)
+{
+ struct av8100_device *adev;
+
+ adev = dev_to_adev(&i2c_client->dev);
+ if (!adev)
+ return -EFAULT;
+
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ if (adev->params.inputclk)
+ clk_put(adev->params.inputclk);
+
+ /* Release regulator resource */
+ if (adev->params.regulator_pwr)
+ regulator_put(adev->params.regulator_pwr);
+
+ misc_deregister(&adev->miscdev);
+
+ /* Remove from list */
+ list_del(&adev->list);
+
+ /* Free device data */
+ kfree(adev);
+
+ return 0;
+}
+
+int av8100_init(void)
+{
+ pr_debug("%s\n", __func__);
+
+ if (i2c_add_driver(&av8100_driver)) {
+ pr_err("av8100 i2c_add_driver failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+module_init(av8100_init);
+
+void av8100_exit(void)
+{
+ pr_debug("%s\n", __func__);
+
+ i2c_del_driver(&av8100_driver);
+}
+module_exit(av8100_exit);
+
+MODULE_AUTHOR("Per Persson <per.xb.persson@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson hdmi display driver");
diff --git a/drivers/video/av8100/av8100_regs.h b/drivers/video/av8100/av8100_regs.h
new file mode 100644
index 00000000000..6ed9000987a
--- /dev/null
+++ b/drivers/video/av8100/av8100_regs.h
@@ -0,0 +1,346 @@
+
+#define AV8100_VAL2REG(__reg, __fld, __val) \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK)
+#define AV8100_REG2VAL(__reg, __fld, __val) \
+ (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT)
+
+#define AV8100_STANDBY 0x00000000
+#define AV8100_STANDBY_CPD_SHIFT 0
+#define AV8100_STANDBY_CPD_MASK 0x00000001
+#define AV8100_STANDBY_CPD_HIGH 1
+#define AV8100_STANDBY_CPD_LOW 0
+#define AV8100_STANDBY_CPD(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, CPD, __x)
+#define AV8100_STANDBY_CPD_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, CPD, __x)
+#define AV8100_STANDBY_STBY_SHIFT 1
+#define AV8100_STANDBY_STBY_MASK 0x00000002
+#define AV8100_STANDBY_STBY_HIGH 1
+#define AV8100_STANDBY_STBY_LOW 0
+#define AV8100_STANDBY_STBY(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, STBY, __x)
+#define AV8100_STANDBY_STBY_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, STBY, __x)
+#define AV8100_STANDBY_HPDS_SHIFT 2
+#define AV8100_STANDBY_HPDS_MASK 0x00000004
+#define AV8100_STANDBY_HPDS(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, HPDS, __x)
+#define AV8100_STANDBY_HPDS_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, HPDS, __x)
+#define AV8100_STANDBY_CPDS_SHIFT 3
+#define AV8100_STANDBY_CPDS_MASK 0x00000008
+#define AV8100_STANDBY_CPDS(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, CPDS, __x)
+#define AV8100_STANDBY_CPDS_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, CPDS, __x)
+#define AV8100_STANDBY_MCLKRNG_SHIFT 4
+#define AV8100_STANDBY_MCLKRNG_MASK 0x000000F0
+#define AV8100_STANDBY_MCLKRNG(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, MCLKRNG, __x)
+#define AV8100_STANDBY_MCLKRNG_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, MCLKRNG, __x)
+#define AV8100_HDMI_5_VOLT_TIME 0x00000001
+#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME_SHIFT 0
+#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME_MASK 0x0000001F
+#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME(__x) \
+ AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME_GET(__x) \
+ AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_SHIFT 0
+#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_MASK 0x00000003
+#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME(__x) \
+ AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, DAC_OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_GET(__x) \
+ AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, DAC_OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_SHIFT 2
+#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_MASK 0x0000001C
+#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME(__x) \
+ AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, SU_OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_GET(__x) \
+ AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, SU_OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_ON_TIME_SHIFT 5
+#define AV8100_HDMI_5_VOLT_TIME_ON_TIME_MASK 0x000000E0
+#define AV8100_HDMI_5_VOLT_TIME_ON_TIME(__x) \
+ AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, ON_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_ON_TIME_GET(__x) \
+ AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, ON_TIME, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK 0x00000002
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_SHIFT 0
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_MASK 0x00000001
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH 1
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_LOW 0
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, HPDM, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, HPDM, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_SHIFT 1
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_MASK 0x00000002
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_HIGH 1
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_LOW 0
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, CPDM, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, CPDM, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_SHIFT 2
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_MASK 0x0000000C
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT 0x00
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_ALT 0x01
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_OUTPUT0 0x02
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_OUTPUT1 0x03
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, STBYGPIOCFG, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, STBYGPIOCFG, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_SHIFT 7
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_MASK 0x00000080
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_HIGH 1
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW 0
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, IPOL, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, IPOL, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT 0x00000003
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_SHIFT 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_MASK 0x00000001
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_HIGH 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, HPDI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, HPDI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_SHIFT 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_MASK 0x00000002
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_HIGH 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, CPDI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, CPDI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_SHIFT 2
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_MASK 0x00000004
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_HIGH 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, ONI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, ONI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_SID_SHIFT 4
+#define AV8100_STANDBY_PENDING_INTERRUPT_SID_MASK 0x000000F0
+#define AV8100_STANDBY_PENDING_INTERRUPT_SID(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, SID, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_SID_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, SID, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_SHIFT 6
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_MASK 0x00000040
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_HIGH 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, BPDIG, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK 0x00000004
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_SHIFT 0
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_MASK 0x00000001
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, EOCM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, EOCM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_SHIFT 1
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_MASK 0x00000002
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, VSIM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, VSIM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_SHIFT 2
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_MASK 0x00000004
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, VSOM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, VSOM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_SHIFT 3
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_MASK 0x00000008
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, CECM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, CECM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_SHIFT 4
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_MASK 0x00000010
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, HDCPM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, HDCPM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_SHIFT 5
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_MASK 0x00000020
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, UOVBM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, UOVBM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_SHIFT 6
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_MASK 0x00000040
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, TEM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, TEM, __x)
+#define AV8100_GENERAL_INTERRUPT 0x00000005
+#define AV8100_GENERAL_INTERRUPT_EOCI_SHIFT 0
+#define AV8100_GENERAL_INTERRUPT_EOCI_MASK 0x00000001
+#define AV8100_GENERAL_INTERRUPT_EOCI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, EOCI, __x)
+#define AV8100_GENERAL_INTERRUPT_EOCI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, EOCI, __x)
+#define AV8100_GENERAL_INTERRUPT_VSII_SHIFT 1
+#define AV8100_GENERAL_INTERRUPT_VSII_MASK 0x00000002
+#define AV8100_GENERAL_INTERRUPT_VSII(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, VSII, __x)
+#define AV8100_GENERAL_INTERRUPT_VSII_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, VSII, __x)
+#define AV8100_GENERAL_INTERRUPT_VSOI_SHIFT 2
+#define AV8100_GENERAL_INTERRUPT_VSOI_MASK 0x00000004
+#define AV8100_GENERAL_INTERRUPT_VSOI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, VSOI, __x)
+#define AV8100_GENERAL_INTERRUPT_VSOI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, VSOI, __x)
+#define AV8100_GENERAL_INTERRUPT_CECI_SHIFT 3
+#define AV8100_GENERAL_INTERRUPT_CECI_MASK 0x00000008
+#define AV8100_GENERAL_INTERRUPT_CECI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, CECI, __x)
+#define AV8100_GENERAL_INTERRUPT_CECI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, CECI, __x)
+#define AV8100_GENERAL_INTERRUPT_HDCPI_SHIFT 4
+#define AV8100_GENERAL_INTERRUPT_HDCPI_MASK 0x00000010
+#define AV8100_GENERAL_INTERRUPT_HDCPI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, HDCPI, __x)
+#define AV8100_GENERAL_INTERRUPT_HDCPI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, HDCPI, __x)
+#define AV8100_GENERAL_INTERRUPT_UOVBI_SHIFT 5
+#define AV8100_GENERAL_INTERRUPT_UOVBI_MASK 0x00000020
+#define AV8100_GENERAL_INTERRUPT_UOVBI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, UOVBI, __x)
+#define AV8100_GENERAL_INTERRUPT_UOVBI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, UOVBI, __x)
+#define AV8100_GENERAL_INTERRUPT_TEI_SHIFT 6
+#define AV8100_GENERAL_INTERRUPT_TEI_MASK 0x00000040
+#define AV8100_GENERAL_INTERRUPT_TEI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, TEI, __x)
+#define AV8100_GENERAL_INTERRUPT_TEI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, TEI, __x)
+#define AV8100_GENERAL_STATUS 0x00000006
+#define AV8100_GENERAL_STATUS_CECTXERR_SHIFT 0
+#define AV8100_GENERAL_STATUS_CECTXERR_MASK 0x00000001
+#define AV8100_GENERAL_STATUS_CECTXERR_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, CECTXERR, __x)
+#define AV8100_GENERAL_STATUS_CECREC_SHIFT 1
+#define AV8100_GENERAL_STATUS_CECREC_MASK 0x00000002
+#define AV8100_GENERAL_STATUS_CECREC_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, CECREC, __x)
+#define AV8100_GENERAL_STATUS_CECTRX_SHIFT 2
+#define AV8100_GENERAL_STATUS_CECTRX_MASK 0x00000004
+#define AV8100_GENERAL_STATUS_CECTRX_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, CECTRX, __x)
+#define AV8100_GENERAL_STATUS_UC_SHIFT 3
+#define AV8100_GENERAL_STATUS_UC_MASK 0x00000008
+#define AV8100_GENERAL_STATUS_UC_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, UC, __x)
+#define AV8100_GENERAL_STATUS_ONUVB_SHIFT 4
+#define AV8100_GENERAL_STATUS_ONUVB_MASK 0x00000010
+#define AV8100_GENERAL_STATUS_ONUVB_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, ONUVB, __x)
+#define AV8100_GENERAL_STATUS_HDCPS_SHIFT 5
+#define AV8100_GENERAL_STATUS_HDCPS_MASK 0x000000E0
+#define AV8100_GENERAL_STATUS_HDCPS_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, HDCPS, __x)
+#define AV8100_GPIO_CONFIGURATION 0x00000007
+#define AV8100_GPIO_CONFIGURATION_DAT3DIR_SHIFT 0
+#define AV8100_GPIO_CONFIGURATION_DAT3DIR_MASK 0x00000001
+#define AV8100_GPIO_CONFIGURATION_DAT3DIR(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT3DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT3DIR_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT3DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT3VAL_SHIFT 1
+#define AV8100_GPIO_CONFIGURATION_DAT3VAL_MASK 0x00000002
+#define AV8100_GPIO_CONFIGURATION_DAT3VAL(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT3VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT3VAL_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT3VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT2DIR_SHIFT 2
+#define AV8100_GPIO_CONFIGURATION_DAT2DIR_MASK 0x00000004
+#define AV8100_GPIO_CONFIGURATION_DAT2DIR(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT2DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT2DIR_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT2DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT2VAL_SHIFT 3
+#define AV8100_GPIO_CONFIGURATION_DAT2VAL_MASK 0x00000008
+#define AV8100_GPIO_CONFIGURATION_DAT2VAL(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT2VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT2VAL_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT2VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT1DIR_SHIFT 4
+#define AV8100_GPIO_CONFIGURATION_DAT1DIR_MASK 0x00000010
+#define AV8100_GPIO_CONFIGURATION_DAT1DIR(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT1DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT1DIR_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT1DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT1VAL_SHIFT 5
+#define AV8100_GPIO_CONFIGURATION_DAT1VAL_MASK 0x00000020
+#define AV8100_GPIO_CONFIGURATION_DAT1VAL(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT1VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT1VAL_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT1VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_UCDBG_SHIFT 6
+#define AV8100_GPIO_CONFIGURATION_UCDBG_MASK 0x00000040
+#define AV8100_GPIO_CONFIGURATION_UCDBG(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, UCDBG, __x)
+#define AV8100_GPIO_CONFIGURATION_UCDBG_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, UCDBG, __x)
+#define AV8100_GENERAL_CONTROL 0x00000008
+#define AV8100_GENERAL_CONTROL_FDL_SHIFT 4
+#define AV8100_GENERAL_CONTROL_FDL_MASK 0x00000010
+#define AV8100_GENERAL_CONTROL_FDL_HIGH 1
+#define AV8100_GENERAL_CONTROL_FDL_LOW 0
+#define AV8100_GENERAL_CONTROL_FDL(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_CONTROL, FDL, __x)
+#define AV8100_GENERAL_CONTROL_FDL_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_CONTROL, FDL, __x)
+#define AV8100_GENERAL_CONTROL_HLD_SHIFT 5
+#define AV8100_GENERAL_CONTROL_HLD_MASK 0x00000020
+#define AV8100_GENERAL_CONTROL_HLD_HIGH 1
+#define AV8100_GENERAL_CONTROL_HLD_LOW 0
+#define AV8100_GENERAL_CONTROL_HLD(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_CONTROL, HLD, __x)
+#define AV8100_GENERAL_CONTROL_HLD_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_CONTROL, HLD, __x)
+#define AV8100_GENERAL_CONTROL_WA_SHIFT 6
+#define AV8100_GENERAL_CONTROL_WA_MASK 0x00000040
+#define AV8100_GENERAL_CONTROL_WA_HIGH 1
+#define AV8100_GENERAL_CONTROL_WA_LOW 0
+#define AV8100_GENERAL_CONTROL_WA(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_CONTROL, WA, __x)
+#define AV8100_GENERAL_CONTROL_WA_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_CONTROL, WA, __x)
+#define AV8100_GENERAL_CONTROL_RA_SHIFT 7
+#define AV8100_GENERAL_CONTROL_RA_MASK 0x00000080
+#define AV8100_GENERAL_CONTROL_RA_HIGH 1
+#define AV8100_GENERAL_CONTROL_RA_LOW 0
+#define AV8100_GENERAL_CONTROL_RA(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_CONTROL, RA, __x)
+#define AV8100_GENERAL_CONTROL_RA_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_CONTROL, RA, __x)
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY 0x0000000F
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_SHIFT 0
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_MASK 0x000000FF
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY(__x) \
+ AV8100_VAL2REG(AV8100_FIRMWARE_DOWNLOAD_ENTRY, MBYTE_CODE_ENTRY, __x)
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_GET(__x) \
+ AV8100_REG2VAL(AV8100_FIRMWARE_DOWNLOAD_ENTRY, MBYTE_CODE_ENTRY, __x)
diff --git a/drivers/video/av8100/hdmi.c b/drivers/video/av8100/hdmi.c
new file mode 100644
index 00000000000..3159c4446f1
--- /dev/null
+++ b/drivers/video/av8100/hdmi.c
@@ -0,0 +1,2479 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson HDMI driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <video/av8100.h>
+#include <video/hdmi.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/ctype.h>
+#include "hdmi_loc.h"
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+
+#define SYSFS_EVENT_FILENAME "evread"
+#define HDMI_DEVNR_DEFAULT 0
+
+DEFINE_MUTEX(hdmi_events_mutex);
+#define LOCK_HDMI_EVENTS mutex_lock(&hdmi_events_mutex)
+#define UNLOCK_HDMI_EVENTS mutex_unlock(&hdmi_events_mutex)
+#define EVENTS_MASK 0xFF
+
+struct hdmi_device {
+ struct list_head list;
+ struct miscdevice miscdev;
+ struct device *dev;
+ struct hdmi_sysfs_data sysfs_data;
+ int events;
+ int events_mask;
+ wait_queue_head_t event_wq;
+ bool events_received;
+ int devnr;
+};
+
+/* List of devices */
+static LIST_HEAD(hdmi_device_list);
+
+static ssize_t store_storeastext(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_plugdeten(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_edidread(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_edidread(struct device *dev, struct device_attribute *attr,
+ char *buf);
+static ssize_t store_ceceven(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_cecread(struct device *dev, struct device_attribute *attr,
+ char *buf);
+static ssize_t store_cecsend(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_infofrsend(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_hdcpeven(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_hdcpchkaesotp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_hdcpfuseaes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_hdcpfuseaes(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_hdcploadaes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_hdcploadaes(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_hdcpauthencr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_hdcpauthencr(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t show_hdcpstateget(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t show_evread(struct device *dev, struct device_attribute *attr,
+ char *buf);
+static ssize_t store_evclr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_audiocfg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_plugstatus(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_poweronoff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_poweronoff(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_evwakeup(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static const struct device_attribute hdmi_sysfs_attrs[] = {
+ __ATTR(storeastext, S_IWUSR, NULL, store_storeastext),
+ __ATTR(plugdeten, S_IWUSR, NULL, store_plugdeten),
+ __ATTR(edidread, S_IRUGO | S_IWUSR, show_edidread, store_edidread),
+ __ATTR(ceceven, S_IWUSR, NULL, store_ceceven),
+ __ATTR(cecread, S_IRUGO, show_cecread, NULL),
+ __ATTR(cecsend, S_IWUSR, NULL, store_cecsend),
+ __ATTR(infofrsend, S_IWUSR, NULL, store_infofrsend),
+ __ATTR(hdcpeven, S_IWUSR, NULL, store_hdcpeven),
+ __ATTR(hdcpchkaesotp, S_IRUGO, show_hdcpchkaesotp, NULL),
+ __ATTR(hdcpfuseaes, S_IRUGO | S_IWUSR, show_hdcpfuseaes,
+ store_hdcpfuseaes),
+ __ATTR(hdcploadaes, S_IRUGO | S_IWUSR, show_hdcploadaes,
+ store_hdcploadaes),
+ __ATTR(hdcpauthencr, S_IRUGO | S_IWUSR, show_hdcpauthencr,
+ store_hdcpauthencr),
+ __ATTR(hdcpstateget, S_IRUGO, show_hdcpstateget, NULL),
+ __ATTR(evread, S_IRUGO, show_evread, NULL),
+ __ATTR(evclr, S_IWUSR, NULL, store_evclr),
+ __ATTR(audiocfg, S_IWUSR, NULL, store_audiocfg),
+ __ATTR(plugstatus, S_IRUGO, show_plugstatus, NULL),
+ __ATTR(poweronoff, S_IRUGO | S_IWUSR, show_poweronoff,
+ store_poweronoff),
+ __ATTR(evwakeup, S_IWUSR, NULL, store_evwakeup),
+ __ATTR_NULL
+};
+
+/* Hex to int conversion */
+static unsigned int htoi(const char *ptr)
+{
+ unsigned int value = 0;
+ char ch;
+
+ if (!ptr)
+ return 0;
+
+ ch = *ptr;
+ if (isdigit(ch))
+ value = ch - '0';
+ else
+ value = toupper(ch) - 'A' + 10;
+
+ value <<= 4;
+ ch = *(++ptr);
+
+ if (isdigit(ch))
+ value += ch - '0';
+ else
+ value += toupper(ch) - 'A' + 10;
+
+ return value;
+}
+
+static struct hdmi_device *dev_to_hdev(struct device *dev)
+{
+ /* Get device from list of devices */
+ struct list_head *element;
+ struct hdmi_device *hdmi_dev;
+ int cnt = 0;
+
+ list_for_each(element, &hdmi_device_list) {
+ hdmi_dev = list_entry(element, struct hdmi_device, list);
+ if (hdmi_dev->dev == dev)
+ return hdmi_dev;
+ cnt++;
+ }
+
+ return NULL;
+}
+
+static struct hdmi_device *devnr_to_hdev(int devnr)
+{
+ /* Get device from list of devices */
+ struct list_head *element;
+ struct hdmi_device *hdmi_dev;
+ int cnt = 0;
+
+ list_for_each(element, &hdmi_device_list) {
+ hdmi_dev = list_entry(element, struct hdmi_device, list);
+ if (cnt == devnr)
+ return hdmi_dev;
+ cnt++;
+ }
+
+ return NULL;
+}
+
+static int event_enable(struct hdmi_device *hdev, bool enable,
+ enum hdmi_event ev)
+{
+ struct kobject *kobj = &hdev->dev->kobj;
+
+ dev_dbg(hdev->dev, "enable_event %d %02x\n", enable, ev);
+ if (enable)
+ hdev->events_mask |= ev;
+ else
+ hdev->events_mask &= ~ev;
+
+ if (hdev->events & ev) {
+ /* Report pending event */
+ /* Wake up application waiting for event via call to poll() */
+ sysfs_notify(kobj, NULL, SYSFS_EVENT_FILENAME);
+
+ LOCK_HDMI_EVENTS;
+ hdev->events_received = true;
+ UNLOCK_HDMI_EVENTS;
+
+ wake_up_interruptible(&hdev->event_wq);
+ }
+
+ return 0;
+}
+
+static int plugdeten(struct hdmi_device *hdev, struct plug_detect *pldet)
+{
+ struct av8100_status status;
+ u8 denc_off_time = 0;
+ int retval;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ event_enable(hdev, pldet->hdmi_detect_enable != 0,
+ HDMI_EVENT_HDMI_PLUGIN);
+ event_enable(hdev, pldet->hdmi_detect_enable != 0,
+ HDMI_EVENT_HDMI_PLUGOUT);
+
+ av8100_reg_hdmi_5_volt_time_r(&denc_off_time, NULL, NULL);
+
+ retval = av8100_reg_hdmi_5_volt_time_w(
+ denc_off_time,
+ pldet->hdmi_off_time,
+ pldet->on_time);
+
+ if (retval) {
+ dev_err(hdev->dev, "Failed to write the value to av8100 "
+ "register\n");
+ return -EFAULT;
+ }
+
+ return retval;
+}
+
+static int edidread(struct hdmi_device *hdev, struct edid_read *edidread,
+ u8 *len, u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.edid_section_readback_format.address = edidread->address;
+ config.edid_section_readback_format.block_number = edidread->block_nr;
+
+ dev_dbg(hdev->dev, "addr:%0x blnr:%0x",
+ config.edid_section_readback_format.address,
+ config.edid_section_readback_format.block_number);
+
+ if (av8100_conf_prep(AV8100_COMMAND_EDID_SECTION_READBACK,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_EDID_SECTION_READBACK,
+ len, data, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(hdev->dev, "len:%0x\n", *len);
+
+ return 0;
+}
+
+static int cecread(struct hdmi_device *hdev, u8 *src, u8 *dest, u8 *data_len,
+ u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ u8 buf_len;
+ u8 buff[HDMI_CEC_READ_MAXSIZE];
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ if (av8100_conf_prep(AV8100_COMMAND_CEC_MESSAGE_READ_BACK,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_CEC_MESSAGE_READ_BACK,
+ &buf_len, buff, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ if (buf_len > 0) {
+ *src = (buff[0] & 0xF0) >> 4;
+ *dest = buff[0] & 0x0F;
+ *data_len = buf_len - 1;
+ memcpy(data, &buff[1], buf_len - 1);
+ } else
+ *data_len = 0;
+
+ return 0;
+}
+
+/* CEC tx status can be set or read */
+static bool cec_tx_status(struct hdmi_device *hdev,
+ enum cec_tx_status_action action)
+{
+ static bool cec_tx_busy;
+
+ switch (action) {
+ case CEC_TX_SET_FREE:
+ cec_tx_busy = false;
+ dev_dbg(hdev->dev, "cec_tx_busy set:%d\n", cec_tx_busy);
+ break;
+
+ case CEC_TX_SET_BUSY:
+ cec_tx_busy = true;
+ dev_dbg(hdev->dev, "cec_tx_busy set:%d\n", cec_tx_busy);
+ break;
+
+ case CEC_TX_CHECK:
+ default:
+ dev_dbg(hdev->dev, "cec_tx_busy chk:%d\n", cec_tx_busy);
+ break;
+ }
+
+ return cec_tx_busy;
+}
+
+static int cecsend(struct hdmi_device *hdev, u8 src, u8 dest, u8 data_len,
+ u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ int cnt;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.cec_message_write_format.buffer[0] = ((src & 0x0F) << 4) +
+ (dest & 0x0F);
+ config.cec_message_write_format.buffer_length = data_len + 1;
+ memcpy(&config.cec_message_write_format.buffer[1], data, data_len);
+
+ if (av8100_conf_prep(AV8100_COMMAND_CEC_MESSAGE_WRITE,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_enable_interrupt() != 0) {
+ dev_err(hdev->dev, "av8100_ei FAIL\n");
+ return -EINVAL;
+ }
+
+ cnt = 0;
+ while ((cnt < CECTX_TRY) && cec_tx_status(hdev, CEC_TX_CHECK)) {
+ /* Wait for pending CEC to be finished */
+ msleep(CECTX_WAITTIME);
+ cnt++;
+ }
+ dev_dbg(hdev->dev, "cectxcnt:%d\n", cnt);
+
+ if (av8100_conf_w(AV8100_COMMAND_CEC_MESSAGE_WRITE,
+ NULL, NULL, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+ cec_tx_status(hdev, CEC_TX_SET_BUSY);
+
+ return 0;
+}
+
+static int infofrsend(struct hdmi_device *hdev, u8 type, u8 version, u8 crc,
+ u8 data_len, u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((data_len < 1) || (data_len > HDMI_INFOFRAME_MAX_SIZE))
+ return -EINVAL;
+
+ config.infoframes_format.type = type;
+ config.infoframes_format.version = version;
+ config.infoframes_format.crc = crc;
+ config.infoframes_format.length = data_len;
+ memcpy(&config.infoframes_format.data, data, data_len);
+ if (av8100_conf_prep(AV8100_COMMAND_INFOFRAMES,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_INFOFRAMES,
+ NULL, NULL, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hdcpchkaesotp(struct hdmi_device *hdev, u8 *crc, u8 *progged)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ u8 buf_len;
+ u8 buf[2];
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.fuse_aes_key_format.fuse_operation = AV8100_FUSE_READ;
+ memset(config.fuse_aes_key_format.key, 0, AV8100_FUSE_KEY_SIZE);
+ if (av8100_conf_prep(AV8100_COMMAND_FUSE_AES_KEY,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_FUSE_AES_KEY,
+ &buf_len, buf, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ if (buf_len == 2) {
+ *crc = buf[0];
+ *progged = buf[1];
+ }
+
+ return 0;
+}
+
+static int hdcpfuseaes(struct hdmi_device *hdev, u8 *key, u8 crc, u8 *result)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ u8 buf_len;
+ u8 buf[2];
+
+ /* Default not OK */
+ *result = HDMI_RESULT_NOT_OK;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.fuse_aes_key_format.fuse_operation = AV8100_FUSE_WRITE;
+ memcpy(config.fuse_aes_key_format.key, key, AV8100_FUSE_KEY_SIZE);
+ if (av8100_conf_prep(AV8100_COMMAND_FUSE_AES_KEY,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_FUSE_AES_KEY,
+ &buf_len, buf, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ if (buf_len == 2) {
+ dev_dbg(hdev->dev, "buf[0]:%02x buf[1]:%02x\n", buf[0], buf[1]);
+ if ((crc == buf[0]) && (buf[1] == 1))
+ /* OK */
+ *result = HDMI_RESULT_OK;
+ else
+ *result = HDMI_RESULT_CRC_MISMATCH;
+ }
+
+ return 0;
+}
+
+static int hdcploadaes(struct hdmi_device *hdev, u8 block, u8 key_len, u8 *key,
+ u8 *result, u8 *crc32)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ u8 buf_len;
+ u8 buf[CRC32_SIZE];
+
+ /* Default not OK */
+ *result = HDMI_RESULT_NOT_OK;
+
+ dev_dbg(hdev->dev, "%s block:%d\n", __func__, block);
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.hdcp_send_key_format.key_number = block;
+ config.hdcp_send_key_format.data_len = key_len;
+ memcpy(config.hdcp_send_key_format.data, key, key_len);
+ if (av8100_conf_prep(AV8100_COMMAND_HDCP_SENDKEY, &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_HDCP_SENDKEY,
+ &buf_len, buf, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ if ((buf_len == CRC32_SIZE) && (crc32)) {
+ memcpy(crc32, buf, CRC32_SIZE);
+ dev_dbg(hdev->dev, "crc32:%02x%02x%02x%02x\n",
+ crc32[0], crc32[1], crc32[2], crc32[3]);
+ }
+
+ *result = HDMI_RESULT_OK;
+
+ return 0;
+}
+
+static int hdcpauthencr(struct hdmi_device *hdev, u8 auth_type, u8 encr_type,
+ u8 *len, u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ switch (auth_type) {
+ case HDMI_HDCP_AUTH_OFF:
+ default:
+ config.hdcp_management_format.req_type =
+ AV8100_HDCP_AUTH_REQ_OFF;
+ break;
+
+ case HDMI_HDCP_AUTH_START:
+ config.hdcp_management_format.req_type =
+ AV8100_HDCP_AUTH_REQ_ON;
+ break;
+
+ case HDMI_HDCP_AUTH_REV_LIST_REQ:
+ config.hdcp_management_format.req_type =
+ AV8100_HDCP_REV_LIST_REQ;
+ break;
+ case HDMI_HDCP_AUTH_CONT:
+ config.hdcp_management_format.req_type =
+ AV8100_HDCP_AUTH_CONT;
+ break;
+ }
+
+ switch (encr_type) {
+ case HDMI_HDCP_ENCR_OESS:
+ default:
+ config.hdcp_management_format.encr_use =
+ AV8100_HDCP_ENCR_USE_OESS;
+ break;
+
+ case HDMI_HDCP_ENCR_EESS:
+ config.hdcp_management_format.encr_use =
+ AV8100_HDCP_ENCR_USE_EESS;
+ break;
+ }
+
+ if (av8100_conf_prep(AV8100_COMMAND_HDCP_MANAGEMENT,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_HDCP_MANAGEMENT,
+ len, data, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 events_read(struct hdmi_device *hdev)
+{
+ int ret;
+
+ LOCK_HDMI_EVENTS;
+ ret = hdev->events;
+ dev_dbg(hdev->dev, "%s %02x\n", __func__, hdev->events);
+ UNLOCK_HDMI_EVENTS;
+
+ return ret;
+}
+
+static int events_clear(struct hdmi_device *hdev, u8 ev)
+{
+ dev_dbg(hdev->dev, "%s %02x\n", __func__, ev);
+
+ LOCK_HDMI_EVENTS;
+ hdev->events &= ~ev & EVENTS_MASK;
+ UNLOCK_HDMI_EVENTS;
+
+ return 0;
+}
+
+static int event_wakeup(struct hdmi_device *hdev)
+{
+ struct kobject *kobj = &hdev->dev->kobj;
+
+ dev_dbg(hdev->dev, "%s", __func__);
+
+ LOCK_HDMI_EVENTS;
+ hdev->events |= HDMI_EVENT_WAKEUP;
+ hdev->events_received = true;
+ UNLOCK_HDMI_EVENTS;
+
+ /* Wake up application waiting for event via call to poll() */
+ sysfs_notify(kobj, NULL, SYSFS_EVENT_FILENAME);
+ wake_up_interruptible(&hdev->event_wq);
+
+ return 0;
+}
+
+static int audiocfg(struct hdmi_device *hdev, struct audio_cfg *cfg)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.audio_input_format.audio_input_if_format = cfg->if_format;
+ config.audio_input_format.i2s_input_nb = cfg->i2s_entries;
+ config.audio_input_format.sample_audio_freq = cfg->freq;
+ config.audio_input_format.audio_word_lg = cfg->word_length;
+ config.audio_input_format.audio_format = cfg->format;
+ config.audio_input_format.audio_if_mode = cfg->if_mode;
+ config.audio_input_format.audio_mute = cfg->mute;
+
+ if (av8100_conf_prep(AV8100_COMMAND_AUDIO_INPUT_FORMAT,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_AUDIO_INPUT_FORMAT,
+ NULL, NULL, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* sysfs */
+static ssize_t store_storeastext(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if ((count != HDMI_STOREASTEXT_BIN_SIZE) &&
+ (count != HDMI_STOREASTEXT_TEXT_SIZE) &&
+ (count != HDMI_STOREASTEXT_TEXT_SIZE + 1))
+ return -EINVAL;
+
+ if ((count == HDMI_STOREASTEXT_BIN_SIZE) && (*buf == 0x1))
+ hdev->sysfs_data.store_as_hextext = true;
+ else if (((count == HDMI_STOREASTEXT_TEXT_SIZE) ||
+ (count == HDMI_STOREASTEXT_TEXT_SIZE + 1)) && (*buf == '0') &&
+ (*(buf + 1) == '1')) {
+ hdev->sysfs_data.store_as_hextext = true;
+ } else {
+ hdev->sysfs_data.store_as_hextext = false;
+ }
+
+ dev_dbg(hdev->dev, "store_as_hextext:%0d\n",
+ hdev->sysfs_data.store_as_hextext);
+
+ return count;
+}
+
+static ssize_t store_plugdeten(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct plug_detect plug_detect;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_PLUGDETEN_TEXT_SIZE) &&
+ (count != HDMI_PLUGDETEN_TEXT_SIZE + 1))
+ return -EINVAL;
+ plug_detect.hdmi_detect_enable = htoi(buf + index);
+ index += 2;
+ plug_detect.on_time = htoi(buf + index);
+ index += 2;
+ plug_detect.hdmi_off_time = htoi(buf + index);
+ index += 2;
+ } else {
+ if (count != HDMI_PLUGDETEN_BIN_SIZE)
+ return -EINVAL;
+ plug_detect.hdmi_detect_enable = *(buf + index++);
+ plug_detect.on_time = *(buf + index++);
+ plug_detect.hdmi_off_time = *(buf + index++);
+ }
+
+ if (plugdeten(hdev, &plug_detect))
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t store_edidread(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct edid_read edid_read;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+ dev_dbg(hdev->dev, "count:%d\n", count);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_EDIDREAD_TEXT_SIZE) &&
+ (count != HDMI_EDIDREAD_TEXT_SIZE + 1))
+ return -EINVAL;
+ edid_read.address = htoi(buf + index);
+ index += 2;
+ edid_read.block_nr = htoi(buf + index);
+ index += 2;
+ } else {
+ if (count != HDMI_EDIDREAD_BIN_SIZE)
+ return -EINVAL;
+ edid_read.address = *(buf + index++);
+ edid_read.block_nr = *(buf + index++);
+ }
+
+ if (edidread(hdev, &edid_read, &hdev->sysfs_data.edid_data.buf_len,
+ hdev->sysfs_data.edid_data.buf))
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t show_edidread(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int len;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ len = hdev->sysfs_data.edid_data.buf_len;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", len);
+ index += 2;
+ } else
+ *(buf + index++) = len;
+
+ dev_dbg(hdev->dev, "len:%02x\n", len);
+
+ cnt = 0;
+ while (cnt < len) {
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x",
+ hdev->sysfs_data.edid_data.buf[cnt]);
+ index += 2;
+ } else
+ *(buf + index++) =
+ hdev->sysfs_data.edid_data.buf[cnt];
+
+ dev_dbg(hdev->dev, "%02x ",
+ hdev->sysfs_data.edid_data.buf[cnt]);
+
+ cnt++;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_ceceven(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ bool enable = false;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_CECEVEN_TEXT_SIZE) &&
+ (count != HDMI_CECEVEN_TEXT_SIZE + 1))
+ return -EINVAL;
+ if ((*buf == '0') && (*(buf + 1) == '1'))
+ enable = true;
+ } else {
+ if (count != HDMI_CECEVEN_BIN_SIZE)
+ return -EINVAL;
+ if (*buf == 0x01)
+ enable = true;
+ }
+
+ event_enable(hdev, enable, HDMI_EVENT_CEC | HDMI_EVENT_CECTXERR |
+ HDMI_EVENT_CECTX);
+
+ return count;
+}
+
+static ssize_t show_cecread(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct cec_rw cec_read;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (cecread(hdev, &cec_read.src, &cec_read.dest, &cec_read.length,
+ cec_read.data))
+ return -EINVAL;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", cec_read.src);
+ index += 2;
+ snprintf(buf + index, 3, "%02x", cec_read.dest);
+ index += 2;
+ snprintf(buf + index, 3, "%02x", cec_read.length);
+ index += 2;
+ } else {
+ *(buf + index++) = cec_read.src;
+ *(buf + index++) = cec_read.dest;
+ *(buf + index++) = cec_read.length;
+ }
+
+ dev_dbg(hdev->dev, "len:%02x\n", cec_read.length);
+
+ cnt = 0;
+ while (cnt < cec_read.length) {
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", cec_read.data[cnt]);
+ index += 2;
+ } else
+ *(buf + index++) = cec_read.data[cnt];
+
+ dev_dbg(hdev->dev, "%02x ", cec_read.data[cnt]);
+
+ cnt++;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_cecsend(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct cec_rw cec_w;
+ int index = 0;
+ int cnt;
+ int store_as_text;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if ((*buf == 'F') || (*buf == 'f'))
+ /* To be able to override bin format for test purpose */
+ store_as_text = 1;
+ else
+ store_as_text = hdev->sysfs_data.store_as_hextext;
+
+ if (store_as_text) {
+ if ((count < HDMI_CECSEND_TEXT_SIZE_MIN) ||
+ (count > HDMI_CECSEND_TEXT_SIZE_MAX))
+ return -EINVAL;
+
+ cec_w.src = htoi(buf + index) & 0x0F;
+ index += 2;
+ cec_w.dest = htoi(buf + index);
+ index += 2;
+ cec_w.length = htoi(buf + index);
+ index += 2;
+ if (cec_w.length > HDMI_CEC_WRITE_MAXSIZE)
+ return -EINVAL;
+ cnt = 0;
+ while (cnt < cec_w.length) {
+ cec_w.data[cnt] = htoi(buf + index);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", cec_w.data[cnt]);
+ cnt++;
+ }
+ } else {
+ if ((count < HDMI_CECSEND_BIN_SIZE_MIN) ||
+ (count > HDMI_CECSEND_BIN_SIZE_MAX))
+ return -EINVAL;
+
+ cec_w.src = *(buf + index++);
+ cec_w.dest = *(buf + index++);
+ cec_w.length = *(buf + index++);
+ if (cec_w.length > HDMI_CEC_WRITE_MAXSIZE)
+ return -EINVAL;
+ memcpy(cec_w.data, buf + index, cec_w.length);
+ }
+
+ if (cecsend(hdev, cec_w.src, cec_w.dest, cec_w.length, cec_w.data))
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t store_infofrsend(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct info_fr info_fr;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count < HDMI_INFOFRSEND_TEXT_SIZE_MIN) ||
+ (count > HDMI_INFOFRSEND_TEXT_SIZE_MAX))
+ return -EINVAL;
+
+ info_fr.type = htoi(&buf[index]);
+ index += 2;
+ info_fr.ver = htoi(&buf[index]);
+ index += 2;
+ info_fr.crc = htoi(&buf[index]);
+ index += 2;
+ info_fr.length = htoi(&buf[index]);
+ index += 2;
+
+ if (info_fr.length > HDMI_INFOFRAME_MAX_SIZE)
+ return -EINVAL;
+ cnt = 0;
+ while (cnt < info_fr.length) {
+ info_fr.data[cnt] = htoi(buf + index);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", info_fr.data[cnt]);
+ cnt++;
+ }
+ } else {
+ if ((count < HDMI_INFOFRSEND_BIN_SIZE_MIN) ||
+ (count > HDMI_INFOFRSEND_BIN_SIZE_MAX))
+ return -EINVAL;
+
+ info_fr.type = *(buf + index++);
+ info_fr.ver = *(buf + index++);
+ info_fr.crc = *(buf + index++);
+ info_fr.length = *(buf + index++);
+
+ if (info_fr.length > HDMI_INFOFRAME_MAX_SIZE)
+ return -EINVAL;
+ memcpy(info_fr.data, buf + index, info_fr.length);
+ }
+
+ if (infofrsend(hdev, info_fr.type, info_fr.ver, info_fr.crc,
+ info_fr.length, info_fr.data))
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t store_hdcpeven(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ bool enable = false;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_HDCPEVEN_TEXT_SIZE) &&
+ (count != HDMI_HDCPEVEN_TEXT_SIZE + 1))
+ return -EINVAL;
+ if ((*buf == '0') && (*(buf + 1) == '1'))
+ enable = true;
+ } else {
+ if (count != HDMI_HDCPEVEN_BIN_SIZE)
+ return -EINVAL;
+ if (*buf == 0x01)
+ enable = true;
+ }
+
+ event_enable(hdev, enable, HDMI_EVENT_HDCP);
+
+ return count;
+}
+
+static ssize_t show_hdcpchkaesotp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ u8 crc;
+ u8 progged;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdcpchkaesotp(hdev, &crc, &progged))
+ return -EINVAL;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", progged);
+ index += 2;
+ } else {
+ *(buf + index++) = progged;
+ }
+
+ dev_dbg(hdev->dev, "progged:%02x\n", progged);
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_hdcpfuseaes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct hdcp_fuseaes hdcp_fuseaes;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ /* Default not OK */
+ hdev->sysfs_data.fuse_result = HDMI_RESULT_NOT_OK;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_HDCP_FUSEAES_TEXT_SIZE) &&
+ (count != HDMI_HDCP_FUSEAES_TEXT_SIZE + 1))
+ return -EINVAL;
+
+ cnt = 0;
+ while (cnt < HDMI_HDCP_FUSEAES_KEYSIZE) {
+ hdcp_fuseaes.key[cnt] = htoi(buf + index);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", hdcp_fuseaes.key[cnt]);
+ cnt++;
+ }
+ hdcp_fuseaes.crc = htoi(&buf[index]);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", hdcp_fuseaes.crc);
+ } else {
+ if (count != HDMI_HDCP_FUSEAES_BIN_SIZE)
+ return -EINVAL;
+
+ memcpy(hdcp_fuseaes.key, buf + index,
+ HDMI_HDCP_FUSEAES_KEYSIZE);
+ index += HDMI_HDCP_FUSEAES_KEYSIZE;
+ hdcp_fuseaes.crc = *(buf + index++);
+ }
+
+ if (hdcpfuseaes(hdev, hdcp_fuseaes.key, hdcp_fuseaes.crc,
+ &hdcp_fuseaes.result))
+ return -EINVAL;
+
+ dev_dbg(hdev->dev, "fuseresult:%02x ", hdcp_fuseaes.result);
+
+ hdev->sysfs_data.fuse_result = hdcp_fuseaes.result;
+
+ return count;
+}
+
+static ssize_t show_hdcpfuseaes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", hdev->sysfs_data.fuse_result);
+ index += 2;
+ } else
+ *(buf + index++) = hdev->sysfs_data.fuse_result;
+
+ dev_dbg(hdev->dev, "status:%02x\n", hdev->sysfs_data.fuse_result);
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_hdcploadaes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct hdcp_loadaesone hdcp_loadaes;
+ int index = 0;
+ int block_cnt;
+ int cnt;
+ u8 crc32_rcvd[CRC32_SIZE];
+ u8 crc;
+ u8 progged;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ /* Default not OK */
+ hdev->sysfs_data.loadaes_result = HDMI_RESULT_NOT_OK;
+
+ if (hdcpchkaesotp(hdev, &crc, &progged))
+ return -EINVAL;
+
+ if (!progged) {
+ /* AES is not fused */
+ hdcp_loadaes.result = HDMI_AES_NOT_FUSED;
+ goto store_hdcploadaes_err;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_HDCP_LOADAES_TEXT_SIZE) &&
+ (count != HDMI_HDCP_LOADAES_TEXT_SIZE + 1)) {
+ dev_err(hdev->dev, "%s", "count mismatch\n");
+ return -EINVAL;
+ }
+
+ /* AES */
+ block_cnt = 0;
+ while (block_cnt < HDMI_HDCP_AES_NR_OF_BLOCKS) {
+ cnt = 0;
+ while (cnt < HDMI_HDCP_AES_KEYSIZE) {
+ hdcp_loadaes.key[cnt] = htoi(buf + index);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ",
+ hdcp_loadaes.key[cnt]);
+ cnt++;
+ }
+
+ if (hdcploadaes(hdev,
+ block_cnt + HDMI_HDCP_AES_BLOCK_START,
+ HDMI_HDCP_AES_KEYSIZE,
+ hdcp_loadaes.key,
+ &hdcp_loadaes.result,
+ crc32_rcvd)) {
+ dev_err(hdev->dev, "%s %d\n",
+ "hdcploadaes err aes block",
+ block_cnt + HDMI_HDCP_AES_BLOCK_START);
+ return -EINVAL;
+ }
+
+ if (hdcp_loadaes.result)
+ goto store_hdcploadaes_err;
+
+ block_cnt++;
+ }
+
+ /* KSV */
+ memset(hdcp_loadaes.key, 0, HDMI_HDCP_AES_KSVZEROESSIZE);
+ cnt = HDMI_HDCP_AES_KSVZEROESSIZE;
+ while (cnt < HDMI_HDCP_AES_KSVSIZE +
+ HDMI_HDCP_AES_KSVZEROESSIZE) {
+ hdcp_loadaes.key[cnt] =
+ htoi(&buf[index]);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", hdcp_loadaes.key[cnt]);
+ cnt++;
+ }
+
+ if (hdcploadaes(hdev, HDMI_HDCP_KSV_BLOCK,
+ HDMI_HDCP_AES_KSVSIZE +
+ HDMI_HDCP_AES_KSVZEROESSIZE,
+ hdcp_loadaes.key,
+ &hdcp_loadaes.result,
+ NULL)) {
+ dev_err(hdev->dev,
+ "%s %d\n", "hdcploadaes err in ksv\n",
+ block_cnt + HDMI_HDCP_AES_BLOCK_START);
+ return -EINVAL;
+ }
+
+ if (hdcp_loadaes.result)
+ goto store_hdcploadaes_err;
+
+ /* CRC32 */
+ for (cnt = 0; cnt < CRC32_SIZE; cnt++) {
+ hdcp_loadaes.crc32[cnt] = htoi(buf + index);
+ index += 2;
+ }
+
+ if (memcmp(hdcp_loadaes.crc32, crc32_rcvd, CRC32_SIZE)) {
+ dev_dbg(hdev->dev, "crc32exp:%02x%02x%02x%02x\n",
+ hdcp_loadaes.crc32[0],
+ hdcp_loadaes.crc32[1],
+ hdcp_loadaes.crc32[2],
+ hdcp_loadaes.crc32[3]);
+ hdcp_loadaes.result = HDMI_RESULT_CRC_MISMATCH;
+ goto store_hdcploadaes_err;
+ }
+ } else {
+ if (count != HDMI_HDCP_LOADAES_BIN_SIZE) {
+ dev_err(hdev->dev, "%s", "count mismatch\n");
+ return -EINVAL;
+ }
+
+ /* AES */
+ block_cnt = 0;
+ while (block_cnt < HDMI_HDCP_AES_NR_OF_BLOCKS) {
+ memcpy(hdcp_loadaes.key, buf + index,
+ HDMI_HDCP_AES_KEYSIZE);
+ index += HDMI_HDCP_AES_KEYSIZE;
+
+ if (hdcploadaes(hdev,
+ block_cnt + HDMI_HDCP_AES_BLOCK_START,
+ HDMI_HDCP_AES_KEYSIZE,
+ hdcp_loadaes.key,
+ &hdcp_loadaes.result,
+ crc32_rcvd)) {
+ dev_err(hdev->dev, "%s %d\n",
+ "hdcploadaes err aes block",
+ block_cnt + HDMI_HDCP_AES_BLOCK_START);
+ return -EINVAL;
+ }
+
+ if (hdcp_loadaes.result)
+ goto store_hdcploadaes_err;
+
+ block_cnt++;
+ }
+
+ /* KSV */
+ memset(hdcp_loadaes.key, 0, HDMI_HDCP_AES_KSVZEROESSIZE);
+ memcpy(hdcp_loadaes.key + HDMI_HDCP_AES_KSVZEROESSIZE,
+ buf + index,
+ HDMI_HDCP_AES_KSVSIZE);
+ index += HDMI_HDCP_AES_KSVSIZE;
+
+ if (hdcploadaes(hdev, HDMI_HDCP_KSV_BLOCK,
+ HDMI_HDCP_AES_KSVSIZE +
+ HDMI_HDCP_AES_KSVZEROESSIZE,
+ hdcp_loadaes.key,
+ &hdcp_loadaes.result,
+ NULL)) {
+ dev_err(hdev->dev, "%s %d\n",
+ "hdcploadaes err in ksv\n",
+ block_cnt + HDMI_HDCP_AES_BLOCK_START);
+ return -EINVAL;
+ }
+
+ memcpy(hdcp_loadaes.crc32, buf + index, CRC32_SIZE);
+ index += CRC32_SIZE;
+
+ /* CRC32 */
+ if (memcmp(hdcp_loadaes.crc32, crc32_rcvd, CRC32_SIZE)) {
+ dev_dbg(hdev->dev, "crc32exp:%02x%02x%02x%02x\n",
+ hdcp_loadaes.crc32[0],
+ hdcp_loadaes.crc32[1],
+ hdcp_loadaes.crc32[2],
+ hdcp_loadaes.crc32[3]);
+ hdcp_loadaes.result = HDMI_RESULT_CRC_MISMATCH;
+ }
+ }
+
+store_hdcploadaes_err:
+ hdev->sysfs_data.loadaes_result = hdcp_loadaes.result;
+ return count;
+}
+
+static ssize_t show_hdcploadaes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x",
+ hdev->sysfs_data.loadaes_result);
+ index += 2;
+ } else
+ *(buf + index++) = hdev->sysfs_data.loadaes_result;
+
+ dev_dbg(hdev->dev, "result:%02x\n", hdev->sysfs_data.loadaes_result);
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_hdcpauthencr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct hdcp_authencr hdcp_authencr;
+ int index = 0;
+ u8 crc;
+ u8 progged;
+ int result = HDMI_RESULT_NOT_OK;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ /* Default */
+ hdev->sysfs_data.authencr.buf_len = 0;
+
+ if (hdcpchkaesotp(hdev, &crc, &progged)) {
+ result = HDMI_AES_NOT_FUSED;
+ goto store_hdcpauthencr_end;
+ }
+
+ if (!progged) {
+ /* AES is not fused */
+ result = HDMI_AES_NOT_FUSED;
+ goto store_hdcpauthencr_end;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_HDCPAUTHENCR_TEXT_SIZE) &&
+ (count != HDMI_HDCPAUTHENCR_TEXT_SIZE + 1))
+ goto store_hdcpauthencr_end;
+
+ hdcp_authencr.auth_type = htoi(buf + index);
+ index += 2;
+ hdcp_authencr.encr_type = htoi(buf + index);
+ index += 2;
+ } else {
+ if (count != HDMI_HDCPAUTHENCR_BIN_SIZE)
+ goto store_hdcpauthencr_end;
+
+ hdcp_authencr.auth_type = *(buf + index++);
+ hdcp_authencr.encr_type = *(buf + index++);
+ }
+
+ if (hdcpauthencr(hdev, hdcp_authencr.auth_type, hdcp_authencr.encr_type,
+ &hdev->sysfs_data.authencr.buf_len,
+ hdev->sysfs_data.authencr.buf))
+ goto store_hdcpauthencr_end;
+
+ result = HDMI_RESULT_OK;
+
+store_hdcpauthencr_end:
+ hdev->sysfs_data.authencr.result = result;
+ return count;
+}
+
+static ssize_t show_hdcpauthencr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int len;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ /* result */
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x",
+ hdev->sysfs_data.authencr.result);
+ index += 2;
+ } else
+ *(buf + index++) = hdev->sysfs_data.authencr.result;
+
+ dev_dbg(hdev->dev, "result:%02x\n", hdev->sysfs_data.authencr.result);
+
+ /* resp_size */
+ len = hdev->sysfs_data.authencr.buf_len;
+ if (len > AUTH_BUF_LEN)
+ len = AUTH_BUF_LEN;
+ dev_dbg(hdev->dev, "resp_size:%d\n", len);
+
+ /* resp */
+ cnt = 0;
+ while (cnt < len) {
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x",
+ hdev->sysfs_data.authencr.buf[cnt]);
+ index += 2;
+
+ dev_dbg(hdev->dev, "%02x ",
+ hdev->sysfs_data.authencr.buf[cnt]);
+
+ } else
+ *(buf + index++) = hdev->sysfs_data.authencr.buf[cnt];
+
+ cnt++;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t show_hdcpstateget(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ u8 hdcp_state;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (av8100_reg_gen_status_r(NULL, NULL, NULL, NULL, NULL, &hdcp_state))
+ return -EINVAL;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", hdcp_state);
+ index += 2;
+ } else
+ *(buf + index++) = hdcp_state;
+
+ dev_dbg(hdev->dev, "status:%02x\n", hdcp_state);
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t show_evread(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+ u8 ev;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ ev = events_read(hdev);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", ev);
+ index += 2;
+ } else
+ *(buf + index++) = ev;
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ /* Events are read: clear events */
+ events_clear(hdev, EVENTS_MASK);
+
+ return index;
+}
+
+static ssize_t store_evclr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ u8 ev;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_EVCLR_TEXT_SIZE) &&
+ (count != HDMI_EVCLR_TEXT_SIZE + 1))
+ return -EINVAL;
+
+ ev = htoi(&buf[index]);
+ index += 2;
+ } else {
+ if (count != HDMI_EVCLR_BIN_SIZE)
+ return -EINVAL;
+
+ ev = *(buf + index++);
+ }
+
+ events_clear(hdev, ev);
+
+ return count;
+}
+
+static ssize_t store_audiocfg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct audio_cfg audio_cfg;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_AUDIOCFG_TEXT_SIZE) &&
+ (count != HDMI_AUDIOCFG_TEXT_SIZE + 1))
+ return -EINVAL;
+
+ audio_cfg.if_format = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.i2s_entries = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.freq = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.word_length = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.format = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.if_mode = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.mute = htoi(&buf[index]);
+ index += 2;
+ } else {
+ if (count != HDMI_AUDIOCFG_BIN_SIZE)
+ return -EINVAL;
+
+ audio_cfg.if_format = *(buf + index++);
+ audio_cfg.i2s_entries = *(buf + index++);
+ audio_cfg.freq = *(buf + index++);
+ audio_cfg.word_length = *(buf + index++);
+ audio_cfg.format = *(buf + index++);
+ audio_cfg.if_mode = *(buf + index++);
+ audio_cfg.mute = *(buf + index++);
+ }
+
+ audiocfg(hdev, &audio_cfg);
+
+ return count;
+}
+
+static ssize_t show_plugstatus(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+ struct av8100_status av8100_status;
+ u8 plstat;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ av8100_status = av8100_status_get();
+ plstat = av8100_status.av8100_plugin_status == AV8100_HDMI_PLUGIN;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", plstat);
+ index += 2;
+ } else
+ *(buf + index++) = plstat;
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_poweronoff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ bool enable = false;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_POWERONOFF_TEXT_SIZE) &&
+ (count != HDMI_POWERONOFF_TEXT_SIZE + 1))
+ return -EINVAL;
+ if ((*buf == '0') && (*(buf + 1) == '1'))
+ enable = true;
+ } else {
+ if (count != HDMI_POWERONOFF_BIN_SIZE)
+ return -EINVAL;
+ if (*buf == 0x01)
+ enable = true;
+ }
+
+ if (enable == 0) {
+ if (av8100_powerdown() != 0) {
+ dev_err(hdev->dev, "av8100_powerdown FAIL\n");
+ return -EINVAL;
+ }
+ } else {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ return count;
+}
+
+static ssize_t show_poweronoff(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+ struct av8100_status status;
+ u8 power_state;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_SCAN)
+ power_state = 0;
+ else
+ power_state = 1;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", power_state);
+ index += 3;
+ } else {
+ *(buf + index++) = power_state;
+ }
+
+ return index;
+}
+
+static ssize_t store_evwakeup(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ event_wakeup(hdev);
+
+ return count;
+}
+
+static int hdmi_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int hdmi_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/* ioctl */
+static long hdmi_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ u8 value = 0;
+ struct hdmi_register reg;
+ struct av8100_status status;
+ u8 aes_status;
+ struct hdmi_device *hdev = devnr_to_hdev(HDMI_DEVNR_DEFAULT);
+
+ switch (cmd) {
+ case IOC_PLUG_DETECT_ENABLE:
+ {
+ struct plug_detect plug_detect;
+
+ if (copy_from_user(&plug_detect, (void *)arg,
+ sizeof(struct plug_detect)))
+ return -EINVAL;
+
+ if (plugdeten(hdev, &plug_detect))
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_EDID_READ:
+ {
+ struct edid_read edid_read;
+
+ if (copy_from_user(&edid_read, (void *)arg,
+ sizeof(struct edid_read)))
+ return -EINVAL;
+
+ if (edidread(hdev, &edid_read, &edid_read.data_length,
+ edid_read.data))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&edid_read,
+ sizeof(struct edid_read))) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_CEC_EVENT_ENABLE:
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EINVAL;
+
+ event_enable(hdev, value != 0,
+ HDMI_EVENT_CEC | HDMI_EVENT_CECTXERR |
+ HDMI_EVENT_CECTX);
+ break;
+
+ case IOC_CEC_READ:
+ {
+ struct cec_rw cec_read;
+
+ if (cecread(hdev, &cec_read.src, &cec_read.dest,
+ &cec_read.length, cec_read.data))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&cec_read,
+ sizeof(struct cec_rw))) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_CEC_SEND:
+ {
+ struct cec_rw cec_send;
+
+ if (copy_from_user(&cec_send, (void *)arg,
+ sizeof(struct cec_rw)))
+ return -EINVAL;
+
+ if (cecsend(hdev, cec_send.src, cec_send.dest, cec_send.length,
+ cec_send.data))
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_INFOFRAME_SEND:
+ {
+ struct info_fr info_fr;
+
+ if (copy_from_user(&info_fr, (void *)arg,
+ sizeof(struct info_fr)))
+ return -EINVAL;
+
+ if (infofrsend(hdev, info_fr.type, info_fr.ver, info_fr.crc,
+ info_fr.length, info_fr.data))
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDCP_EVENT_ENABLE:
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EINVAL;
+
+ event_enable(hdev, value != 0, HDMI_EVENT_HDCP);
+ break;
+
+ case IOC_HDCP_CHKAESOTP:
+ if (hdcpchkaesotp(hdev, &value, &aes_status))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&aes_status,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDCP_FUSEAES:
+ {
+ struct hdcp_fuseaes hdcp_fuseaes;
+
+ if (copy_from_user(&hdcp_fuseaes, (void *)arg,
+ sizeof(struct hdcp_fuseaes)))
+ return -EINVAL;
+
+ if (hdcpfuseaes(hdev, hdcp_fuseaes.key, hdcp_fuseaes.crc,
+ &hdcp_fuseaes.result))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&hdcp_fuseaes,
+ sizeof(struct hdcp_fuseaes))) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_HDCP_LOADAES:
+ {
+ int block_cnt;
+ struct hdcp_loadaesone hdcp_loadaesone;
+ struct hdcp_loadaesall hdcp_loadaesall;
+
+ if (copy_from_user(&hdcp_loadaesall, (void *)arg,
+ sizeof(struct hdcp_loadaesall)))
+ return -EINVAL;
+
+ if (hdcpchkaesotp(hdev, &value, &aes_status))
+ return -EINVAL;
+
+ if (!aes_status) {
+ /* AES is not fused */
+ hdcp_loadaesone.result = HDMI_AES_NOT_FUSED;
+ goto ioc_hdcploadaes_err;
+ }
+
+ /* AES */
+ block_cnt = 0;
+ while (block_cnt < HDMI_HDCP_AES_NR_OF_BLOCKS) {
+ memcpy(hdcp_loadaesone.key, hdcp_loadaesall.key +
+ block_cnt * HDMI_HDCP_AES_KEYSIZE,
+ HDMI_HDCP_AES_KEYSIZE);
+
+ if (hdcploadaes(hdev,
+ block_cnt + HDMI_HDCP_AES_BLOCK_START,
+ HDMI_HDCP_AES_KEYSIZE,
+ hdcp_loadaesone.key,
+ &hdcp_loadaesone.result,
+ hdcp_loadaesone.crc32))
+ return -EINVAL;
+
+ if (hdcp_loadaesone.result)
+ return -EINVAL;
+
+ block_cnt++;
+ }
+
+ /* KSV */
+ memset(hdcp_loadaesone.key, 0, HDMI_HDCP_AES_KSVZEROESSIZE);
+ memcpy(hdcp_loadaesone.key + HDMI_HDCP_AES_KSVZEROESSIZE,
+ hdcp_loadaesall.ksv, HDMI_HDCP_AES_KSVSIZE);
+
+ if (hdcploadaes(hdev, HDMI_HDCP_KSV_BLOCK,
+ HDMI_HDCP_AES_KSVSIZE +
+ HDMI_HDCP_AES_KSVZEROESSIZE,
+ hdcp_loadaesone.key,
+ &hdcp_loadaesone.result,
+ NULL))
+ return -EINVAL;
+
+ if (hdcp_loadaesone.result)
+ return -EINVAL;
+
+ /* CRC32 */
+ if (memcmp(hdcp_loadaesall.crc32, hdcp_loadaesone.crc32,
+ CRC32_SIZE)) {
+ dev_dbg(hdev->dev, "crc32exp:%02x%02x%02x%02x\n",
+ hdcp_loadaesall.crc32[0],
+ hdcp_loadaesall.crc32[1],
+ hdcp_loadaesall.crc32[2],
+ hdcp_loadaesall.crc32[3]);
+ hdcp_loadaesone.result = HDMI_RESULT_CRC_MISMATCH;
+ goto ioc_hdcploadaes_err;
+ }
+
+ioc_hdcploadaes_err:
+ hdcp_loadaesall.result = hdcp_loadaesone.result;
+
+ if (copy_to_user((void *)arg, (void *)&hdcp_loadaesall,
+ sizeof(struct hdcp_loadaesall))) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_HDCP_AUTHENCR_REQ:
+ {
+ struct hdcp_authencr hdcp_authencr;
+ int result = HDMI_RESULT_NOT_OK;
+
+ u8 buf[AUTH_BUF_LEN];
+
+ if (copy_from_user(&hdcp_authencr, (void *)arg,
+ sizeof(struct hdcp_authencr)))
+ return -EINVAL;
+
+ /* Default not OK */
+ hdcp_authencr.resp_size = 0;
+
+ if (hdcpchkaesotp(hdev, &value, &aes_status)) {
+ result = HDMI_AES_NOT_FUSED;
+ goto hdcp_authencr_end;
+ }
+
+ if (!aes_status) {
+ /* AES is not fused */
+ result = HDMI_AES_NOT_FUSED;
+ goto hdcp_authencr_end;
+ }
+
+ if (hdcpauthencr(hdev, hdcp_authencr.auth_type,
+ hdcp_authencr.encr_type,
+ &value,
+ buf)) {
+ result = HDMI_RESULT_NOT_OK;
+ goto hdcp_authencr_end;
+ }
+
+ if (value > AUTH_BUF_LEN)
+ value = AUTH_BUF_LEN;
+
+ result = HDMI_RESULT_OK;
+ hdcp_authencr.resp_size = value;
+ memcpy(hdcp_authencr.resp, buf, value);
+
+hdcp_authencr_end:
+ hdcp_authencr.result = result;
+ if (copy_to_user((void *)arg, (void *)&hdcp_authencr,
+ sizeof(struct hdcp_authencr)))
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDCP_STATE_GET:
+ if (av8100_reg_gen_status_r(NULL, NULL, NULL, NULL, NULL,
+ &value))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&value,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_EVENTS_READ:
+ value = events_read(hdev);
+
+ if (copy_to_user((void *)arg, (void *)&value,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+
+ /* Events are read: clear events */
+ events_clear(hdev, EVENTS_MASK);
+ break;
+
+ case IOC_EVENTS_CLEAR:
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EINVAL;
+
+ events_clear(hdev, value);
+ break;
+
+ case IOC_AUDIO_CFG:
+ {
+ struct audio_cfg audio_cfg;
+
+ if (copy_from_user(&audio_cfg, (void *)arg,
+ sizeof(struct audio_cfg)))
+ return -EINVAL;
+
+ audiocfg(hdev, &audio_cfg);
+ }
+ break;
+
+ case IOC_PLUG_STATUS:
+ status = av8100_status_get();
+ value = status.av8100_plugin_status == AV8100_HDMI_PLUGIN;
+
+ if (copy_to_user((void *)arg, (void *)&value,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_POWERONOFF:
+ /* Get desired power state on or off */
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EINVAL;
+
+ if (value == 0) {
+ if (av8100_powerdown() != 0) {
+ dev_err(hdev->dev, "av8100_powerdown FAIL\n");
+ return -EINVAL;
+ }
+ } else {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup FAIL\n");
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_EVENT_WAKEUP:
+ /* Trigger event */
+ event_wakeup(hdev);
+ break;
+
+ case IOC_POWERSTATE:
+ status = av8100_status_get();
+ value = status.av8100_state >= AV8100_OPMODE_SCAN;
+
+ if (copy_to_user((void *)arg, (void *)&value,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+ break;
+
+ /* Internal */
+ case IOC_HDMI_ENABLE_INTERRUPTS:
+ av8100_disable_interrupt();
+ if (av8100_enable_interrupt() != 0) {
+ dev_err(hdev->dev, "av8100_ei FAIL\n");
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_DOWNLOAD_FW:
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_ONOFF:
+ {
+ union av8100_configuration config;
+
+ /* Get desired HDMI mode on or off */
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EFAULT;
+
+ if (av8100_conf_get(AV8100_COMMAND_HDMI, &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_get FAIL\n");
+ return -EINVAL;
+ }
+ if (value == 0)
+ config.hdmi_format.hdmi_mode = AV8100_HDMI_OFF;
+ else
+ config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+
+ if (av8100_conf_prep(AV8100_COMMAND_HDMI, &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+ if (av8100_conf_w(AV8100_COMMAND_HDMI, NULL, NULL,
+ I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_HDMI_REGISTER_WRITE:
+ if (copy_from_user(&reg, (void *)arg,
+ sizeof(struct hdmi_register))) {
+ return -EINVAL;
+ }
+
+ if (av8100_reg_w(reg.offset, reg.value) != 0) {
+ dev_err(hdev->dev, "hdmi_register_write FAIL\n");
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_REGISTER_READ:
+ if (copy_from_user(&reg, (void *)arg,
+ sizeof(struct hdmi_register))) {
+ return -EINVAL;
+ }
+
+ if (av8100_reg_r(reg.offset, &reg.value) != 0) {
+ dev_err(hdev->dev, "hdmi_register_write FAIL\n");
+ return -EINVAL;
+ }
+
+ if (copy_to_user((void *)arg, (void *)&reg,
+ sizeof(struct hdmi_register))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_STATUS_GET:
+ status = av8100_status_get();
+
+ if (copy_to_user((void *)arg, (void *)&status,
+ sizeof(struct av8100_status))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_CONFIGURATION_WRITE:
+ {
+ struct hdmi_command_register command_reg;
+
+ if (copy_from_user(&command_reg, (void *)arg,
+ sizeof(struct hdmi_command_register)) != 0) {
+ dev_err(hdev->dev, "IOC_HDMI_CONFIGURATION_WRITE "
+ "fail 1\n");
+ command_reg.return_status = EINVAL;
+ } else {
+ command_reg.return_status = 0;
+ if (av8100_conf_w_raw(command_reg.cmd_id,
+ command_reg.buf_len,
+ command_reg.buf,
+ &(command_reg.buf_len),
+ command_reg.buf) != 0) {
+ dev_err(hdev->dev,
+ "IOC_HDMI_CONFIGURATION_WRITE "
+ "fail 2\n");
+ command_reg.return_status = EINVAL;
+ }
+ }
+
+ if (copy_to_user((void *)arg, (void *)&command_reg,
+ sizeof(struct hdmi_command_register)) != 0) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned int
+hdmi_poll(struct file *filp, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct hdmi_device *hdev;
+
+ hdev = devnr_to_hdev(HDMI_DEVNR_DEFAULT);
+ if (!hdev)
+ return 0;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ poll_wait(filp, &hdev->event_wq , wait);
+
+ LOCK_HDMI_EVENTS;
+ if (hdev->events_received == true) {
+ hdev->events_received = false;
+ mask = POLLIN | POLLRDNORM;
+ }
+ UNLOCK_HDMI_EVENTS;
+
+ return mask;
+}
+
+static const struct file_operations hdmi_fops = {
+ .owner = THIS_MODULE,
+ .open = hdmi_open,
+ .release = hdmi_release,
+ .unlocked_ioctl = hdmi_ioctl,
+ .poll = hdmi_poll
+};
+
+/* Event callback function called by hw driver */
+void hdmi_event(enum av8100_hdmi_event ev)
+{
+ int events_old;
+ int events_new;
+ struct hdmi_device *hdev;
+ struct kobject *kobj;
+
+ hdev = devnr_to_hdev(HDMI_DEVNR_DEFAULT);
+ if (!hdev)
+ return;
+
+ dev_dbg(hdev->dev, "hdmi_event %02x\n", ev);
+
+ kobj = &(hdev->dev->kobj);
+
+ LOCK_HDMI_EVENTS;
+
+ events_old = hdev->events;
+
+ /* Set event */
+ switch (ev) {
+ case AV8100_HDMI_EVENT_HDMI_PLUGIN:
+ hdev->events &= ~HDMI_EVENT_HDMI_PLUGOUT;
+ hdev->events |= HDMI_EVENT_HDMI_PLUGIN;
+ break;
+
+ case AV8100_HDMI_EVENT_HDMI_PLUGOUT:
+ hdev->events &= ~HDMI_EVENT_HDMI_PLUGIN;
+ hdev->events |= HDMI_EVENT_HDMI_PLUGOUT;
+ cec_tx_status(hdev, CEC_TX_SET_FREE);
+ break;
+
+ case AV8100_HDMI_EVENT_CEC:
+ hdev->events |= HDMI_EVENT_CEC;
+ break;
+
+ case AV8100_HDMI_EVENT_HDCP:
+ hdev->events |= HDMI_EVENT_HDCP;
+ break;
+
+ case AV8100_HDMI_EVENT_CECTXERR:
+ hdev->events |= HDMI_EVENT_CECTXERR;
+ cec_tx_status(hdev, CEC_TX_SET_FREE);
+ break;
+
+ case AV8100_HDMI_EVENT_CECTX:
+ hdev->events |= HDMI_EVENT_CECTX;
+ cec_tx_status(hdev, CEC_TX_SET_FREE);
+ break;
+
+ default:
+ break;
+ }
+
+ events_new = hdev->events_mask & hdev->events;
+
+ UNLOCK_HDMI_EVENTS;
+
+ dev_dbg(hdev->dev, "hdmi events:%02x, events_old:%02x mask:%02x\n",
+ events_new, events_old, hdev->events_mask);
+
+ if (events_new != events_old) {
+ /* Wake up application waiting for event via call to poll() */
+ sysfs_notify(kobj, NULL, SYSFS_EVENT_FILENAME);
+
+ LOCK_HDMI_EVENTS;
+ hdev->events_received = true;
+ UNLOCK_HDMI_EVENTS;
+
+ wake_up_interruptible(&hdev->event_wq);
+ }
+}
+EXPORT_SYMBOL(hdmi_event);
+
+int hdmi_device_register(struct hdmi_device *hdev)
+{
+ hdev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ hdev->miscdev.name = "hdmi";
+ hdev->miscdev.fops = &hdmi_fops;
+
+ if (misc_register(&hdev->miscdev)) {
+ pr_err("hdmi misc_register failed\n");
+ return -EFAULT;
+ }
+
+ hdev->dev = hdev->miscdev.this_device;
+
+ return 0;
+}
+
+int __init hdmi_init(void)
+{
+ struct hdmi_device *hdev;
+ int i;
+ int ret;
+
+ /* Allocate device data */
+ hdev = kzalloc(sizeof(struct hdmi_device), GFP_KERNEL);
+ if (!hdev) {
+ pr_err("%s: Alloc failure\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Add to list */
+ list_add_tail(&hdev->list, &hdmi_device_list);
+
+ if (hdmi_device_register(hdev)) {
+ pr_err("%s: Alloc failure\n", __func__);
+ return -EFAULT;
+ }
+
+ hdev->devnr = HDMI_DEVNR_DEFAULT;
+
+ /* Default sysfs file format is hextext */
+ hdev->sysfs_data.store_as_hextext = true;
+
+ init_waitqueue_head(&hdev->event_wq);
+
+ /* Create sysfs attrs */
+ for (i = 0; attr_name(hdmi_sysfs_attrs[i]); i++) {
+ ret = device_create_file(hdev->dev, &hdmi_sysfs_attrs[i]);
+ if (ret)
+ dev_err(hdev->dev,
+ "Unable to create sysfs attr %s (%d)\n",
+ hdmi_sysfs_attrs[i].attr.name, ret);
+ }
+
+ /* Register event callback */
+ av8100_hdmi_event_cb_set(hdmi_event);
+
+ return 0;
+}
+late_initcall(hdmi_init);
+
+void hdmi_exit(void)
+{
+ struct hdmi_device *hdev = NULL;
+ int i;
+
+ if (list_empty(&hdmi_device_list))
+ return;
+ else
+ hdev = list_entry(hdmi_device_list.next,
+ struct hdmi_device, list);
+
+ /* Deregister event callback */
+ av8100_hdmi_event_cb_set(NULL);
+
+ /* Remove sysfs attrs */
+ for (i = 0; attr_name(hdmi_sysfs_attrs[i]); i++)
+ device_remove_file(hdev->dev, &hdmi_sysfs_attrs[i]);
+
+ misc_deregister(&hdev->miscdev);
+
+ /* Remove from list */
+ list_del(&hdev->list);
+
+ /* Free device data */
+ kfree(hdev);
+}
diff --git a/drivers/video/av8100/hdmi_loc.h b/drivers/video/av8100/hdmi_loc.h
new file mode 100644
index 00000000000..20314910db9
--- /dev/null
+++ b/drivers/video/av8100/hdmi_loc.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __HDMI_LOC__H__
+#define __HDMI_LOC__H__
+
+#define EDID_BUF_LEN 128
+#define COMMAND_BUF_LEN 128
+#define AES_KEY_SIZE 16
+#define CRC32_SIZE 4
+#define AUTH_BUF_LEN 126
+#define CECTX_TRY 20
+#define CECTX_WAITTIME 25
+
+struct edid_data {
+ u8 buf_len;
+ u8 buf[EDID_BUF_LEN];
+};
+
+struct authencr {
+ int result;
+ u8 buf_len;
+ u8 buf[AUTH_BUF_LEN];
+};
+
+struct hdmi_register {
+ unsigned char value;
+ unsigned char offset;
+};
+
+struct hdcp_loadaesone {
+ u8 key[AES_KEY_SIZE];
+ u8 result;
+ u8 crc32[CRC32_SIZE];
+};
+
+struct hdmi_sysfs_data {
+ bool store_as_hextext;
+ struct plug_detect plug_detect;
+ bool enable_cec_event;
+ struct edid_data edid_data;
+ struct cec_rw cec_read;
+ bool fuse_result;
+ int loadaes_result;
+ struct authencr authencr;
+};
+
+struct hdmi_command_register {
+ unsigned char cmd_id; /* input */
+ unsigned char buf_len; /* input, output */
+ unsigned char buf[COMMAND_BUF_LEN]; /* input, output */
+ unsigned char return_status; /* output */
+};
+
+enum cec_tx_status_action {
+ CEC_TX_SET_FREE,
+ CEC_TX_SET_BUSY,
+ CEC_TX_CHECK
+};
+
+/* Internal */
+#define IOC_HDMI_ENABLE_INTERRUPTS _IOWR(HDMI_IOC_MAGIC, 32, int)
+#define IOC_HDMI_DOWNLOAD_FW _IOWR(HDMI_IOC_MAGIC, 33, int)
+#define IOC_HDMI_ONOFF _IOWR(HDMI_IOC_MAGIC, 34, int)
+#define IOC_HDMI_REGISTER_WRITE _IOWR(HDMI_IOC_MAGIC, 35, int)
+#define IOC_HDMI_REGISTER_READ _IOWR(HDMI_IOC_MAGIC, 36, int)
+#define IOC_HDMI_STATUS_GET _IOWR(HDMI_IOC_MAGIC, 37, int)
+#define IOC_HDMI_CONFIGURATION_WRITE _IOWR(HDMI_IOC_MAGIC, 38, int)
+
+#endif /* __HDMI_LOC__H__ */
diff --git a/drivers/video/b2r2/Kconfig b/drivers/video/b2r2/Kconfig
new file mode 100644
index 00000000000..8cc81876de7
--- /dev/null
+++ b/drivers/video/b2r2/Kconfig
@@ -0,0 +1,134 @@
+config FB_B2R2
+ tristate "B2R2 engine support"
+ default n
+ help
+ B2R2 engine does various bit-blitting operations,post-processor operations
+ and various compositions.
+
+config B2R2_PLUG_CONF
+ bool "B2R2 bus plug configuration"
+ depends on FB_B2R2
+ default n
+ help
+ Configures how B2R2 access the memory bus. Enabling this will increase
+ the performance of B2R2 at the cost of using the bus more heavily.
+
+ If this is set to 'n', the hardware defaults will be used.
+
+choice
+ prompt "Opcode size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_OPSIZE_64
+
+ config B2R2_OPSIZE_8
+ bool "8 bytes"
+ config B2R2_OPSIZE_16
+ bool "16 bytes"
+ config B2R2_OPSIZE_32
+ bool "32 bytes"
+ config B2R2_OPSIZE_64
+ bool "64 bytes"
+
+endchoice
+
+choice
+ prompt "Chunk size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_CHSIZE_128
+
+ config B2R2_CHSIZE_1
+ bool "1 op"
+ config B2R2_CHSIZE_2
+ bool "2 ops"
+ config B2R2_CHSIZE_4
+ bool "4 ops"
+ config B2R2_CHSIZE_8
+ bool "8 ops"
+ config B2R2_CHSIZE_16
+ bool "16 ops"
+ config B2R2_CHSIZE_32
+ bool "32 ops"
+ config B2R2_CHSIZE_64
+ bool "64 ops"
+ config B2R2_CHSIZE_128
+ bool "128 ops"
+endchoice
+
+choice
+ prompt "Message size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_MGSIZE_128
+
+ config B2R2_MGSIZE_1
+ bool "1 chunk"
+ config B2R2_MGSIZE_2
+ bool "2 chunks"
+ config B2R2_MGSIZE_4
+ bool "4 chunks"
+ config B2R2_MGSIZE_8
+ bool "8 s"
+ config B2R2_MGSIZE_16
+ bool "16 chunks"
+ config B2R2_MGSIZE_32
+ bool "32 chunks"
+ config B2R2_MGSIZE_64
+ bool "64 chunks"
+ config B2R2_MGSIZE_128
+ bool "128 chunks"
+endchoice
+
+choice
+ prompt "Page size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_PGSIZE_256
+
+ config B2R2_PGSIZE_64
+ bool "64 bytes"
+ config B2R2_PGSIZE_128
+ bool "128 bytes"
+ config B2R2_PGSIZE_256
+ bool "256 bytes"
+endchoice
+
+config B2R2_DEBUG
+ bool "B2R2 debugging"
+ default n
+ depends on FB_B2R2
+ help
+ Enable debugging features for the B2R2 driver.
+
+config B2R2_PROFILER
+ tristate "B2R2 profiler"
+ default n
+ depends on FB_B2R2
+ help
+ Enables the profiler for the B2R2 driver.
+
+ It is recommended to build this as a module, since the configuration
+ of filters etc. is done at load time.
+
+config B2R2_GENERIC
+ bool "B2R2 generic path"
+ default y
+ depends on FB_B2R2
+ help
+ Enables support for the generic path in the B2R2 driver. This path should
+ be used when there is no optimized implementation for a request.
+
+choice
+ prompt "Generic usage mode"
+ depends on B2R2_GENERIC
+ default B2R2_GENERIC_FALLBACK
+
+ config B2R2_GENERIC_FALLBACK
+ bool "Fallback"
+ help
+ The optimized path will be used for all supported operations, and the
+ generic path will be used as a fallback for the ones not implemented.
+
+ config B2R2_GENERIC_ONLY
+ bool "Always"
+ help
+ The generic path will be used for all operations.
+
+endchoice
diff --git a/drivers/video/b2r2/Makefile b/drivers/video/b2r2/Makefile
new file mode 100644
index 00000000000..0150ad6f761
--- /dev/null
+++ b/drivers/video/b2r2/Makefile
@@ -0,0 +1,15 @@
+# Make file for compiling and loadable module B2R2
+
+obj-$(CONFIG_FB_B2R2) += b2r2.o
+
+b2r2-objs = b2r2_blt_main.o b2r2_core.o b2r2_mem_alloc.o b2r2_generic.o b2r2_node_gen.o b2r2_node_split.o b2r2_profiler_socket.o b2r2_timing.o b2r2_filters.o b2r2_utils.o b2r2_input_validation.o
+
+ifdef CONFIG_B2R2_DEBUG
+b2r2-objs += b2r2_debug.o
+endif
+
+ifeq ($(CONFIG_FB_B2R2),m)
+obj-y += b2r2_kernel_if.o
+endif
+
+obj-$(CONFIG_B2R2_PROFILER) += b2r2_profiler/
diff --git a/drivers/video/b2r2/b2r2_blt_main.c b/drivers/video/b2r2/b2r2_blt_main.c
new file mode 100644
index 00000000000..5d1d58c53cc
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_blt_main.c
@@ -0,0 +1,3322 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 Blitter module
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#ifdef CONFIG_ANDROID_PMEM
+#include <linux/android_pmem.h>
+#endif
+#include <linux/fb.h>
+#include <linux/uaccess.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+#include <asm/cacheflush.h>
+#include <linux/smp.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/hwmem.h>
+
+#include "b2r2_internal.h"
+#include "b2r2_node_split.h"
+#include "b2r2_generic.h"
+#include "b2r2_mem_alloc.h"
+#include "b2r2_profiler_socket.h"
+#include "b2r2_timing.h"
+#include "b2r2_debug.h"
+#include "b2r2_utils.h"
+#include "b2r2_input_validation.h"
+
+#define B2R2_HEAP_SIZE (4 * PAGE_SIZE)
+#define MAX_TMP_BUF_SIZE (128 * PAGE_SIZE)
+
+/*
+ * TODO:
+ * Implementation of query cap
+ * Support for user space virtual pointer to physically consecutive memory
+ * Support for user space virtual pointer to physically scattered memory
+ * Callback reads lagging behind in blt_api_stress app
+ * Store smaller items in the report list instead of the whole request
+ * Support read of many report records at once.
+ */
+
+/**
+ * b2r2_blt_dev - Our device, /dev/b2r2_blt
+ */
+static struct miscdevice *b2r2_blt_dev;
+
+static struct {
+ struct b2r2_work_buf buf;
+ bool in_use;
+} tmp_bufs[MAX_TMP_BUFS_NEEDED];
+
+/* Statistics */
+
+/**
+ * stat_lock - Spin lock protecting the statistics
+ */
+static struct mutex stat_lock;
+/**
+ * stat_n_jobs_added - Number of jobs added to b2r2_core
+ */
+static unsigned long stat_n_jobs_added;
+/**
+ * stat_n_jobs_released - Number of jobs released (job_release called)
+ */
+static unsigned long stat_n_jobs_released;
+/**
+ * stat_n_jobs_in_report_list - Number of jobs currently in the report list
+ */
+static unsigned long stat_n_jobs_in_report_list;
+/**
+ * stat_n_in_blt - Number of client threads currently exec inside b2r2_blt()
+ */
+static unsigned long stat_n_in_blt;
+/**
+ * stat_n_in_blt_synch - Nunmber of client threads currently waiting for synch
+ */
+static unsigned long stat_n_in_blt_synch;
+/**
+ * stat_n_in_blt_add - Number of client threads currenlty adding in b2r2_blt
+ */
+static unsigned long stat_n_in_blt_add;
+/**
+ * stat_n_in_blt_wait - Number of client threads currently waiting in b2r2_blt
+ */
+static unsigned long stat_n_in_blt_wait;
+/**
+ * stat_n_in_sync_0 - Number of client threads currently in b2r2_blt_sync
+ * waiting for all client jobs to finish
+ */
+static unsigned long stat_n_in_synch_0;
+/**
+ * stat_n_in_sync_job - Number of client threads currently in b2r2_blt_sync
+ * waiting specific job to finish
+ */
+static unsigned long stat_n_in_synch_job;
+/**
+ * stat_n_in_query_cap - Number of clients currently in query cap
+ */
+static unsigned long stat_n_in_query_cap;
+/**
+ * stat_n_in_open - Number of clients currently in b2r2_blt_open
+ */
+static unsigned long stat_n_in_open;
+/**
+ * stat_n_in_release - Number of clients currently in b2r2_blt_release
+ */
+static unsigned long stat_n_in_release;
+
+/* Debug file system support */
+#ifdef CONFIG_DEBUG_FS
+/**
+ * debugfs_latest_request - Copy of the latest request issued
+ */
+struct b2r2_blt_request debugfs_latest_request;
+/**
+ * debugfs_root_dir - The debugfs root directory, i.e. /debugfs/b2r2
+ */
+static struct dentry *debugfs_root_dir;
+
+static int sprintf_req(struct b2r2_blt_request *request, char *buf, int size);
+#endif
+
+/* Local functions */
+static void inc_stat(unsigned long *stat);
+static void dec_stat(unsigned long *stat);
+static int b2r2_blt_synch(struct b2r2_blt_instance *instance,
+ int request_id);
+static int b2r2_blt_query_cap(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_query_cap *query_cap);
+
+#ifndef CONFIG_B2R2_GENERIC_ONLY
+static int b2r2_blt(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_request *request);
+
+static void job_callback(struct b2r2_core_job *job);
+static void job_release(struct b2r2_core_job *job);
+static int job_acquire_resources(struct b2r2_core_job *job, bool atomic);
+static void job_release_resources(struct b2r2_core_job *job, bool atomic);
+#endif
+
+#ifdef CONFIG_B2R2_GENERIC
+static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_request *request);
+
+static void job_callback_gen(struct b2r2_core_job *job);
+static void job_release_gen(struct b2r2_core_job *job);
+static int job_acquire_resources_gen(struct b2r2_core_job *job, bool atomic);
+static void job_release_resources_gen(struct b2r2_core_job *job, bool atomic);
+static void tile_job_callback_gen(struct b2r2_core_job *job);
+static void tile_job_release_gen(struct b2r2_core_job *job);
+#endif
+
+
+static int resolve_buf(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst,
+ struct b2r2_resolved_buf *resolved);
+static void unresolve_buf(struct b2r2_blt_buf *buf,
+ struct b2r2_resolved_buf *resolved);
+static void sync_buf(struct b2r2_blt_img *img,
+ struct b2r2_resolved_buf *resolved,
+ bool is_dst,
+ struct b2r2_blt_rect *rect);
+static bool is_report_list_empty(struct b2r2_blt_instance *instance);
+static bool is_synching(struct b2r2_blt_instance *instance);
+static void get_actual_dst_rect(struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *actual_dst_rect);
+static void set_up_hwmem_region(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect, struct hwmem_region *region);
+static int resolve_hwmem(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used, bool is_dst,
+ struct b2r2_resolved_buf *resolved_buf);
+static void unresolve_hwmem(struct b2r2_resolved_buf *resolved_buf);
+
+/**
+ * struct sync_args - Data for clean/flush
+ *
+ * @start: Virtual start address
+ * @end: Virtual end address
+ */
+struct sync_args {
+ unsigned long start;
+ unsigned long end;
+};
+/**
+ * flush_l1_cache_range_curr_cpu() - Cleans and invalidates L1 cache on the current CPU
+ *
+ * @arg: Pointer to sync_args structure
+ */
+static inline void flush_l1_cache_range_curr_cpu(void *arg)
+{
+ struct sync_args *sa = (struct sync_args *)arg;
+
+ dmac_flush_range((void *)sa->start, (void *)sa->end);
+}
+
+#ifdef CONFIG_SMP
+/**
+ * inv_l1_cache_range_all_cpus() - Cleans and invalidates L1 cache on all CPU:s
+ *
+ * @sa: Pointer to sync_args structure
+ */
+static void flush_l1_cache_range_all_cpus(struct sync_args *sa)
+{
+ on_each_cpu(flush_l1_cache_range_curr_cpu, sa, 1);
+}
+#endif
+
+/**
+ * clean_l1_cache_range_curr_cpu() - Cleans L1 cache on current CPU
+ *
+ * Ensures that data is written out from the CPU:s L1 cache,
+ * it will still be in the cache.
+ *
+ * @arg: Pointer to sync_args structure
+ */
+static inline void clean_l1_cache_range_curr_cpu(void *arg)
+{
+ struct sync_args *sa = (struct sync_args *)arg;
+
+ dmac_map_area((void *)sa->start,
+ (void *)sa->end - (void *)sa->start,
+ DMA_TO_DEVICE);
+}
+
+#ifdef CONFIG_SMP
+/**
+ * clean_l1_cache_range_all_cpus() - Cleans L1 cache on all CPU:s
+ *
+ * Ensures that data is written out from all CPU:s L1 cache,
+ * it will still be in the cache.
+ *
+ * @sa: Pointer to sync_args structure
+ */
+static void clean_l1_cache_range_all_cpus(struct sync_args *sa)
+{
+ on_each_cpu(clean_l1_cache_range_curr_cpu, sa, 1);
+}
+#endif
+
+/**
+ * b2r2_blt_open - Implements file open on the b2r2_blt device
+ *
+ * @inode: File system inode
+ * @filp: File pointer
+ *
+ * A B2R2 BLT instance is created and stored in the file structure.
+ */
+static int b2r2_blt_open(struct inode *inode, struct file *filp)
+{
+ int ret = 0;
+ struct b2r2_blt_instance *instance;
+
+ b2r2_log_info("%s\n", __func__);
+
+ inc_stat(&stat_n_in_open);
+
+ /* Allocate and initialize the instance */
+ instance = (struct b2r2_blt_instance *)
+ kmalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance) {
+ b2r2_log_err("%s: Failed to alloc\n", __func__);
+ goto instance_alloc_failed;
+ }
+ memset(instance, 0, sizeof(*instance));
+ INIT_LIST_HEAD(&instance->report_list);
+ mutex_init(&instance->lock);
+ init_waitqueue_head(&instance->report_list_waitq);
+ init_waitqueue_head(&instance->synch_done_waitq);
+
+ /*
+ * Remember the instance so that we can retrieve it in
+ * other functions
+ */
+ filp->private_data = instance;
+ goto out;
+
+instance_alloc_failed:
+out:
+ dec_stat(&stat_n_in_open);
+
+ return ret;
+}
+
+/**
+ * b2r2_blt_release - Implements last close on an instance of
+ * the b2r2_blt device
+ *
+ * @inode: File system inode
+ * @filp: File pointer
+ *
+ * All active jobs are finished or cancelled and allocated data
+ * is released.
+ */
+static int b2r2_blt_release(struct inode *inode, struct file *filp)
+{
+ int ret;
+ struct b2r2_blt_instance *instance;
+
+ b2r2_log_info("%s\n", __func__);
+
+ inc_stat(&stat_n_in_release);
+
+ instance = (struct b2r2_blt_instance *) filp->private_data;
+
+ /* Finish all outstanding requests */
+ ret = b2r2_blt_synch(instance, 0);
+ if (ret < 0)
+ b2r2_log_warn(
+ "%s: b2r2_blt_sync failed with %d\n", __func__, ret);
+
+ /* Now cancel any remaining outstanding request */
+ if (instance->no_of_active_requests) {
+ struct b2r2_core_job *job;
+
+ b2r2_log_warn("%s: %d active requests\n",
+ __func__, instance->no_of_active_requests);
+
+ /* Find and cancel all jobs belonging to us */
+ job = b2r2_core_job_find_first_with_tag((int) instance);
+ while (job) {
+ b2r2_core_job_cancel(job);
+ /* Matches addref in b2r2_core_job_find... */
+ b2r2_core_job_release(job, __func__);
+ job = b2r2_core_job_find_first_with_tag((int) instance);
+ }
+
+ b2r2_log_warn(
+ "%s: %d active requests after cancel\n",
+ __func__, instance->no_of_active_requests);
+ }
+
+ /* Release jobs in report list */
+ mutex_lock(&instance->lock);
+ while (!list_empty(&instance->report_list)) {
+ struct b2r2_blt_request *request = list_first_entry(
+ &instance->report_list,
+ struct b2r2_blt_request,
+ list);
+ list_del_init(&request->list);
+ mutex_unlock(&instance->lock);
+ /*
+ * This release matches the addref when the job was put into
+ * the report list
+ */
+ b2r2_core_job_release(&request->job, __func__);
+ mutex_lock(&instance->lock);
+ }
+ mutex_unlock(&instance->lock);
+
+ /* Release our instance */
+ kfree(instance);
+
+ dec_stat(&stat_n_in_release);
+
+ return 0;
+}
+
+/**
+ * b2r2_blt_ioctl - This routine implements b2r2_blt ioctl interface
+ *
+ * @file: file pointer.
+ * @cmd :ioctl command.
+ * @arg: input argument for ioctl.
+ *
+ * Returns 0 if OK else negative error code
+ */
+static long b2r2_blt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct b2r2_blt_instance *instance;
+
+ /** Process actual ioctl */
+
+ b2r2_log_info("%s\n", __func__);
+
+ /* Get the instance from the file structure */
+ instance = (struct b2r2_blt_instance *) file->private_data;
+
+ switch (cmd) {
+ case B2R2_BLT_IOC: {
+ /* This is the "blit" command */
+
+ /* arg is user pointer to struct b2r2_blt_request */
+ struct b2r2_blt_request *request =
+ kmalloc(sizeof(*request), GFP_KERNEL);
+ if (!request) {
+ b2r2_log_err("%s: Failed to alloc mem\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Initialize the structure */
+ memset(request, 0, sizeof(*request));
+ INIT_LIST_HEAD(&request->list);
+ request->instance = instance;
+
+ /*
+ * The user request is a sub structure of the
+ * kernel request structure.
+ */
+
+ /* Get the user data */
+ if (copy_from_user(&request->user_req, (void *)arg,
+ sizeof(request->user_req))) {
+ b2r2_log_err(
+ "%s: copy_from_user failed\n",
+ __func__);
+ kfree(request);
+ return -EFAULT;
+ }
+
+ if (!b2r2_validate_user_req(&request->user_req)) {
+ kfree(request);
+ return -EINVAL;
+ }
+
+ request->profile = is_profiler_registered_approx();
+
+ /*
+ * If the user specified a color look-up table,
+ * make a copy that the HW can use.
+ */
+ if ((request->user_req.flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
+ request->clut = dma_alloc_coherent(b2r2_blt_device(),
+ CLUT_SIZE, &(request->clut_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (request->clut == NULL) {
+ b2r2_log_err("%s CLUT allocation failed.\n",
+ __func__);
+ kfree(request);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(request->clut,
+ request->user_req.clut, CLUT_SIZE)) {
+ b2r2_log_err("%s: CLUT copy_from_user failed\n",
+ __func__);
+ dma_free_coherent(b2r2_blt_device(), CLUT_SIZE,
+ request->clut, request->clut_phys_addr);
+ request->clut = NULL;
+ request->clut_phys_addr = 0;
+ kfree(request);
+ return -EFAULT;
+ }
+ }
+
+ /* Perform the blit */
+
+#ifdef CONFIG_B2R2_GENERIC_ONLY
+ /* Use the generic path for all operations */
+ ret = b2r2_generic_blt(instance, request);
+#else
+ /* Use the optimized path */
+ ret = b2r2_blt(instance, request);
+#endif
+
+#ifdef CONFIG_B2R2_GENERIC_FALLBACK
+ /* Fall back to generic path if operation was not supported */
+ if (ret == -ENOSYS) {
+ struct b2r2_blt_request *request_gen;
+ b2r2_log_info("b2r2_blt=%d Going generic.\n", ret);
+ request_gen = kmalloc(sizeof(*request_gen), GFP_KERNEL);
+ if (!request_gen) {
+ b2r2_log_err("%s: Failed to alloc mem for "
+ "request_gen\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Initialize the structure */
+ memset(request_gen, 0, sizeof(*request_gen));
+ INIT_LIST_HEAD(&request_gen->list);
+ request_gen->instance = instance;
+
+ /*
+ * The user request is a sub structure of the
+ * kernel request structure.
+ */
+
+ /* Get the user data */
+ if (copy_from_user(&request_gen->user_req, (void *)arg,
+ sizeof(request_gen->user_req))) {
+ b2r2_log_err(
+ "%s: copy_from_user failed\n",
+ __func__);
+ kfree(request_gen);
+ return -EFAULT;
+ }
+
+ /*
+ * If the user specified a color look-up table,
+ * make a copy that the HW can use.
+ */
+ if ((request_gen->user_req.flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION)
+ != 0) {
+ request_gen->clut =
+ dma_alloc_coherent(b2r2_blt_device(),
+ CLUT_SIZE,
+ &(request_gen->clut_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (request_gen->clut == NULL) {
+ b2r2_log_err("%s CLUT allocation "
+ "failed.\n", __func__);
+ kfree(request_gen);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(request_gen->clut,
+ request_gen->user_req.clut,
+ CLUT_SIZE)) {
+ b2r2_log_err("%s: CLUT copy_from_user "
+ "failed\n", __func__);
+ dma_free_coherent(b2r2_blt_device(),
+ CLUT_SIZE, request_gen->clut,
+ request_gen->clut_phys_addr);
+ request_gen->clut = NULL;
+ request_gen->clut_phys_addr = 0;
+ kfree(request_gen);
+ return -EFAULT;
+ }
+ }
+
+ request_gen->profile = is_profiler_registered_approx();
+
+ ret = b2r2_generic_blt(instance, request_gen);
+ b2r2_log_info("\nb2r2_generic_blt=%d Generic done.\n",
+ ret);
+ }
+#endif /* CONFIG_B2R2_GENERIC_FALLBACK */
+
+ break;
+ }
+
+ case B2R2_BLT_SYNCH_IOC:
+ /* This is the "synch" command */
+
+ /* arg is request_id */
+ ret = b2r2_blt_synch(instance, (int) arg);
+ break;
+
+ case B2R2_BLT_QUERY_CAP_IOC:
+ {
+ /* This is the "query capabilities" command */
+
+ /* Arg is struct b2r2_blt_query_cap */
+ struct b2r2_blt_query_cap query_cap;
+
+ /* Get the user data */
+ if (copy_from_user(&query_cap, (void *)arg,
+ sizeof(query_cap))) {
+ b2r2_log_err(
+ "%s: copy_from_user failed\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ /* Fill in our capabilities */
+ ret = b2r2_blt_query_cap(instance, &query_cap);
+
+ /* Return data to user */
+ if (copy_to_user((void *)arg, &query_cap,
+ sizeof(query_cap))) {
+ b2r2_log_err("%s: copy_to_user failed\n",
+ __func__);
+ return -EFAULT;
+ }
+ break;
+ }
+
+ default:
+ /* Unknown command */
+ b2r2_log_err(
+ "%s: Unknown cmd %d\n", __func__, cmd);
+ ret = -EINVAL;
+ break;
+
+ }
+
+ if (ret < 0)
+ b2r2_log_err("EC %d OK!\n", -ret);
+
+ return ret;
+}
+
+/**
+ * b2r2_blt_poll - Support for user-space poll, select & epoll.
+ * Used for user-space callback
+ *
+ * @filp: File to poll on
+ * @wait: Poll table to wait on
+ *
+ * This function checks if there are anything to read
+ */
+static unsigned b2r2_blt_poll(struct file *filp, poll_table *wait)
+{
+ struct b2r2_blt_instance *instance;
+ unsigned int mask = 0;
+
+ b2r2_log_info("%s\n", __func__);
+
+ /* Get the instance from the file structure */
+ instance = (struct b2r2_blt_instance *) filp->private_data;
+
+ poll_wait(filp, &instance->report_list_waitq, wait);
+ mutex_lock(&instance->lock);
+ if (!list_empty(&instance->report_list))
+ mask |= POLLIN | POLLRDNORM;
+ mutex_unlock(&instance->lock);
+
+ return mask;
+}
+
+/**
+ * b2r2_blt_read - Read report data, user for user-space callback
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static ssize_t b2r2_blt_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ int ret = 0;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_blt_request *request;
+ struct b2r2_blt_report report;
+
+ b2r2_log_info("%s\n", __func__);
+
+ /* Get the instance from the file structure */
+ instance = (struct b2r2_blt_instance *) filp->private_data;
+
+ /*
+ * We return only complete report records, one at a time.
+ * Might be more efficient to support read of many.
+ */
+ count = (count / sizeof(struct b2r2_blt_report)) *
+ sizeof(struct b2r2_blt_report);
+ if (count > sizeof(struct b2r2_blt_report))
+ count = sizeof(struct b2r2_blt_report);
+ if (count == 0)
+ return count;
+
+ /*
+ * Loop and wait here until we have anything to return or
+ * until interrupted
+ */
+ mutex_lock(&instance->lock);
+ while (list_empty(&instance->report_list)) {
+ mutex_unlock(&instance->lock);
+
+ /* Return if non blocking read */
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ b2r2_log_info("%s - Going to sleep\n", __func__);
+ if (wait_event_interruptible(
+ instance->report_list_waitq,
+ !is_report_list_empty(instance)))
+ /* signal: tell the fs layer to handle it */
+ return -ERESTARTSYS;
+
+ /* Otherwise loop, but first reaquire the lock */
+ mutex_lock(&instance->lock);
+ }
+
+ /* Ok, we have something to return */
+
+ /* Return */
+ request = NULL;
+ if (!list_empty(&instance->report_list))
+ request = list_first_entry(
+ &instance->report_list, struct b2r2_blt_request, list);
+
+ if (request) {
+ /* Remove from list to avoid reading twice */
+ list_del_init(&request->list);
+
+ report.request_id = request->request_id;
+ report.report1 = request->user_req.report1;
+ report.report2 = request->user_req.report2;
+ report.usec_elapsed = 0; /* TBD */
+
+ mutex_unlock(&instance->lock);
+ if (copy_to_user(buf,
+ &report,
+ sizeof(report)))
+ ret = -EFAULT;
+ mutex_lock(&instance->lock);
+
+ if (ret) {
+ /* copy to user failed, re-insert into list */
+ list_add(&request->list,
+ &request->instance->report_list);
+ request = NULL;
+ }
+ }
+ mutex_unlock(&instance->lock);
+
+ if (request)
+ /*
+ * Release matching the addref when the job was put into
+ * the report list
+ */
+ b2r2_core_job_release(&request->job, __func__);
+
+ return count;
+}
+
+/**
+ * b2r2_blt_fops - File operations for b2r2_blt
+ */
+static const struct file_operations b2r2_blt_fops = {
+ .owner = THIS_MODULE,
+ .open = b2r2_blt_open,
+ .release = b2r2_blt_release,
+ .unlocked_ioctl = b2r2_blt_ioctl,
+ .poll = b2r2_blt_poll,
+ .read = b2r2_blt_read,
+};
+
+/**
+ * b2r2_blt_misc_dev - Misc device config for b2r2_blt
+ */
+static struct miscdevice b2r2_blt_misc_dev = {
+ MISC_DYNAMIC_MINOR,
+ "b2r2_blt",
+ &b2r2_blt_fops
+};
+
+
+#ifndef CONFIG_B2R2_GENERIC_ONLY
+/**
+ * b2r2_blt - Implementation of the B2R2 blit request
+ *
+ * @instance: The B2R2 BLT instance
+ * @request; The request to perform
+ */
+static int b2r2_blt(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_request *request)
+{
+ int ret = 0;
+ struct b2r2_blt_rect actual_dst_rect;
+ int request_id = 0;
+ struct b2r2_node *last_node = request->first_node;
+ int node_count;
+
+ u32 thread_runtime_at_start = 0;
+
+ if (request->profile) {
+ request->start_time_nsec = b2r2_get_curr_nsec();
+ thread_runtime_at_start = (u32)task_sched_runtime(current);
+ }
+
+ b2r2_log_info("%s\n", __func__);
+
+ inc_stat(&stat_n_in_blt);
+
+ /* Debug prints of incoming request */
+ b2r2_log_info(
+ "src.fmt=%#010x src.buf={%d,%d,%d} "
+ "src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n",
+ request->user_req.src_img.fmt,
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
+ b2r2_log_info(
+ "dst.fmt=%#010x dst.buf={%d,%d,%d} "
+ "dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n",
+ request->user_req.dst_img.fmt,
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height);
+
+ inc_stat(&stat_n_in_blt_synch);
+
+ /* Wait here if synch is ongoing */
+ ret = wait_event_interruptible(instance->synch_done_waitq,
+ !is_synching(instance));
+ if (ret) {
+ b2r2_log_warn(
+ "%s: Sync wait interrupted, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ dec_stat(&stat_n_in_blt_synch);
+ goto synch_interrupted;
+ }
+
+ dec_stat(&stat_n_in_blt_synch);
+
+ /* Resolve the buffers */
+
+ /* Source buffer */
+ ret = resolve_buf(&request->user_req.src_img,
+ &request->user_req.src_rect, false, &request->src_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(
+ "%s: Resolve src buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_buf_failed;
+ }
+
+ /* Source mask buffer */
+ ret = resolve_buf(&request->user_req.src_mask,
+ &request->user_req.src_rect, false,
+ &request->src_mask_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(
+ "%s: Resolve src mask buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_mask_buf_failed;
+ }
+
+ /* Destination buffer */
+ get_actual_dst_rect(&request->user_req, &actual_dst_rect);
+ ret = resolve_buf(&request->user_req.dst_img, &actual_dst_rect,
+ true, &request->dst_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(
+ "%s: Resolve dst buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_dst_buf_failed;
+ }
+
+ /* Debug prints of resolved buffers */
+ b2r2_log_info("src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->src_resolved.physical_address,
+ request->src_resolved.virtual_address,
+ request->src_resolved.is_pmem,
+ request->src_resolved.filep,
+ request->src_resolved.file_physical_start,
+ request->src_resolved.file_virtual_start,
+ request->src_resolved.file_len);
+
+ b2r2_log_info("dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->dst_resolved.physical_address,
+ request->dst_resolved.virtual_address,
+ request->dst_resolved.is_pmem,
+ request->dst_resolved.filep,
+ request->dst_resolved.file_physical_start,
+ request->dst_resolved.file_virtual_start,
+ request->dst_resolved.file_len);
+
+ /* Calculate the number of nodes (and resources) needed for this job */
+ ret = b2r2_node_split_analyze(request, MAX_TMP_BUF_SIZE,
+ &node_count, &request->bufs, &request->buf_count,
+ &request->node_split_job);
+ if (ret == -ENOSYS) {
+ /* There was no optimized path for this request */
+ b2r2_log_info(
+ "%s: No optimized path for request\n", __func__);
+ goto no_optimized_path;
+
+ } else if (ret < 0) {
+ b2r2_log_warn(
+ "%s: Failed to analyze request, ret = %d\n",
+ __func__, ret);
+#ifdef CONFIG_DEBUG_FS
+ {
+ /* Failed, dump job to dmesg */
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(
+ "%s: Analyze failed for:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info("%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info("Unable to print the request. "
+ "Message buffer allocation failed.\n");
+ }
+ }
+#endif
+ goto generate_nodes_failed;
+ }
+
+ /* Allocate the nodes needed */
+#ifdef B2R2_USE_NODE_GEN
+ request->first_node = b2r2_blt_alloc_nodes(node_count);
+ if (request->first_node == NULL) {
+ b2r2_log_warn(
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#else
+ ret = b2r2_node_alloc(node_count, &(request->first_node));
+ if (ret < 0 || request->first_node == NULL) {
+ b2r2_log_warn(
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#endif
+
+ /* Build the B2R2 node list */
+ ret = b2r2_node_split_configure(&request->node_split_job,
+ request->first_node);
+
+ if (ret < 0) {
+ b2r2_log_warn(
+ "%s: Failed to perform node split, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+
+ /* Exit here if dry run */
+ if (request->user_req.flags & B2R2_BLT_FLAG_DRY_RUN)
+ goto exit_dry_run;
+
+ /* Configure the request */
+ last_node = request->first_node;
+ while (last_node && last_node->next)
+ last_node = last_node->next;
+
+ request->job.tag = (int) instance;
+ request->job.prio = request->user_req.prio;
+ request->job.first_node_address =
+ request->first_node->physical_address;
+ request->job.last_node_address =
+ last_node->physical_address;
+ request->job.callback = job_callback;
+ request->job.release = job_release;
+ request->job.acquire_resources = job_acquire_resources;
+ request->job.release_resources = job_release_resources;
+
+ /* Synchronize memory occupied by the buffers */
+
+ /* Source buffer */
+ if (!(request->user_req.flags &
+ B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH) &&
+ (request->user_req.src_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(&request->user_req.src_img,
+ &request->src_resolved,
+ false, /*is_dst*/
+ &request->user_req.src_rect);
+
+ /* Source mask buffer */
+ if (!(request->user_req.flags &
+ B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH) &&
+ (request->user_req.src_mask.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_mask.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(&request->user_req.src_mask,
+ &request->src_mask_resolved,
+ false, /*is_dst*/
+ NULL);
+
+ /* Destination buffer */
+ if (!(request->user_req.flags &
+ B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH) &&
+ (request->user_req.dst_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.dst_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(&request->user_req.dst_img,
+ &request->dst_resolved,
+ true, /*is_dst*/
+ &request->user_req.dst_rect);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Remember latest request for debugfs */
+ debugfs_latest_request = *request;
+#endif
+
+ /* Submit the job */
+ b2r2_log_info("%s: Submitting job\n", __func__);
+
+ inc_stat(&stat_n_in_blt_add);
+
+ if (request->profile)
+ request->nsec_active_in_cpu =
+ (s32)((u32)task_sched_runtime(current) -
+ thread_runtime_at_start);
+
+ mutex_lock(&instance->lock);
+
+ /* Add the job to b2r2_core */
+ request_id = b2r2_core_job_add(&request->job);
+ request->request_id = request_id;
+
+ dec_stat(&stat_n_in_blt_add);
+
+ if (request_id < 0) {
+ b2r2_log_warn("%s: Failed to add job, ret = %d\n",
+ __func__, request_id);
+ ret = request_id;
+ mutex_unlock(&instance->lock);
+ goto job_add_failed;
+ }
+
+ inc_stat(&stat_n_jobs_added);
+
+ instance->no_of_active_requests++;
+ mutex_unlock(&instance->lock);
+
+ /* Wait for the job to be done if synchronous */
+ if ((request->user_req.flags & B2R2_BLT_FLAG_ASYNCH) == 0) {
+ b2r2_log_info("%s: Synchronous, waiting\n",
+ __func__);
+
+ inc_stat(&stat_n_in_blt_wait);
+
+ ret = b2r2_core_job_wait(&request->job);
+
+ dec_stat(&stat_n_in_blt_wait);
+
+ if (ret < 0 && ret != -ENOENT)
+ b2r2_log_warn(
+ "%s: Failed to wait job, ret = %d\n",
+ __func__, ret);
+ else
+ b2r2_log_info(
+ "%s: Synchronous wait done\n", __func__);
+ ret = 0;
+ }
+
+ /*
+ * Release matching the addref in b2r2_core_job_add,
+ * the request must not be accessed after this call
+ */
+ b2r2_core_job_release(&request->job, __func__);
+
+ dec_stat(&stat_n_in_blt);
+
+ return ret >= 0 ? request_id : ret;
+
+job_add_failed:
+exit_dry_run:
+no_optimized_path:
+generate_nodes_failed:
+ unresolve_buf(&request->user_req.dst_img.buf,
+ &request->dst_resolved);
+resolve_dst_buf_failed:
+ unresolve_buf(&request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+resolve_src_mask_buf_failed:
+ unresolve_buf(&request->user_req.src_img.buf,
+ &request->src_resolved);
+resolve_src_buf_failed:
+synch_interrupted:
+ job_release(&request->job);
+ dec_stat(&stat_n_jobs_released);
+ if ((request->user_req.flags & B2R2_BLT_FLAG_DRY_RUN) == 0 || ret)
+ b2r2_log_warn(
+ "%s returns with error %d\n", __func__, ret);
+
+ dec_stat(&stat_n_in_blt);
+
+ return ret;
+}
+
+/**
+ * Called when job is done or cancelled
+ *
+ * @job: The job
+ */
+static void job_callback(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+
+ if (b2r2_blt_device())
+ b2r2_log_info("%s\n", __func__);
+
+ /* Local addref / release within this func */
+ b2r2_core_job_addref(job, __func__);
+
+ /* Unresolve the buffers */
+ unresolve_buf(&request->user_req.src_img.buf,
+ &request->src_resolved);
+ unresolve_buf(&request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+ unresolve_buf(&request->user_req.dst_img.buf,
+ &request->dst_resolved);
+
+ /* Move to report list if the job shall be reported */
+ /* FIXME: Use a smaller struct? */
+ mutex_lock(&request->instance->lock);
+ if (request->user_req.flags & B2R2_BLT_FLAG_REPORT_WHEN_DONE) {
+ /* Move job to report list */
+ list_add_tail(&request->list,
+ &request->instance->report_list);
+ inc_stat(&stat_n_jobs_in_report_list);
+
+ /* Wake up poll */
+ wake_up_interruptible(
+ &request->instance->report_list_waitq);
+
+ /* Add a reference because we put the job in the report list */
+ b2r2_core_job_addref(job, __func__);
+ }
+
+ /*
+ * Decrease number of active requests and wake up
+ * synching threads if active requests reaches zero
+ */
+ BUG_ON(request->instance->no_of_active_requests == 0);
+ request->instance->no_of_active_requests--;
+ if (request->instance->synching &&
+ request->instance->no_of_active_requests == 0) {
+ request->instance->synching = false;
+ /* Wake up all syncing */
+
+ wake_up_interruptible_all(
+ &request->instance->synch_done_waitq);
+ }
+ mutex_unlock(&request->instance->lock);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Dump job if cancelled */
+ if (job->job_state == B2R2_CORE_JOB_CANCELED) {
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info("%s: Job cancelled:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info("%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info("Unable to print the request. "
+ "Message buffer allocation failed.\n");
+ }
+ }
+#endif
+
+ if (request->profile) {
+ request->total_time_nsec =
+ (s32)(b2r2_get_curr_nsec() - request->start_time_nsec);
+ b2r2_call_profiler_blt_done(request);
+ }
+
+ /* Local addref / release within this func */
+ b2r2_core_job_release(job, __func__);
+}
+
+/**
+ * Called when job should be released (free memory etc.)
+ *
+ * @job: The job
+ */
+static void job_release(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+
+ inc_stat(&stat_n_jobs_released);
+
+ b2r2_log_info("%s, first_node=%p, ref_count=%d\n",
+ __func__, request->first_node, request->job.ref_count);
+
+ b2r2_node_split_cancel(&request->node_split_job);
+
+ if (request->first_node) {
+ b2r2_debug_job_done(request->first_node);
+#ifdef B2R2_USE_NODE_GEN
+ b2r2_blt_free_nodes(request->first_node);
+#else
+ b2r2_node_free(request->first_node);
+#endif
+ }
+
+ /* Release memory for the request */
+ if (request->clut != NULL) {
+ dma_free_coherent(b2r2_blt_device(), CLUT_SIZE, request->clut,
+ request->clut_phys_addr);
+ request->clut = NULL;
+ request->clut_phys_addr = 0;
+ }
+ kfree(request);
+}
+
+/**
+ * Tells the job to try to allocate the resources needed to execute the job.
+ * Called just before execution of a job.
+ *
+ * @job: The job
+ * @atomic: true if called from atomic (i.e. interrupt) context. If function
+ * can't allocate in atomic context it should return error, it
+ * will then be called later from non-atomic context.
+ */
+static int job_acquire_resources(struct b2r2_core_job *job, bool atomic)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ int ret;
+ int i;
+
+ b2r2_log_info("%s\n", __func__);
+
+ if (request->buf_count == 0)
+ return 0;
+
+ if (request->buf_count > MAX_TMP_BUFS_NEEDED) {
+ b2r2_log_err("%s: request->buf_count > MAX_TMP_BUFS_NEEDED\n",
+ __func__);
+ return -ENOMSG;
+ }
+
+ /*
+ * 1 to 1 mapping between request temp buffers and temp buffers
+ * (request temp buf 0 is always temp buf 0, request temp buf 1 is
+ * always temp buf 1 and so on) to avoid starvation of jobs that
+ * require multiple temp buffers. Not optimal in terms of memory
+ * usage but we avoid get into a situation where lower prio jobs can
+ * delay higher prio jobs that require more temp buffers.
+ */
+ if (tmp_bufs[0].in_use)
+ return -EAGAIN;
+
+ for (i = 0; i < request->buf_count; i++) {
+ if (tmp_bufs[i].buf.size < request->bufs[i].size) {
+ b2r2_log_err("%s: tmp_bufs[i].buf.size < "
+ "request->bufs[i].size\n",
+ __func__);
+ ret = -ENOMSG;
+ goto error;
+ }
+
+ tmp_bufs[i].in_use = true;
+ request->bufs[i].phys_addr = tmp_bufs[i].buf.phys_addr;
+ request->bufs[i].virt_addr = tmp_bufs[i].buf.virt_addr;
+
+ b2r2_log_info("%s: phys=%p, virt=%p\n",
+ __func__, (void *)request->bufs[i].phys_addr,
+ request->bufs[i].virt_addr);
+
+ ret = b2r2_node_split_assign_buffers(&request->node_split_job,
+ request->first_node, request->bufs,
+ request->buf_count);
+ if (ret < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < request->buf_count; i++)
+ tmp_bufs[i].in_use = false;
+
+ return ret;
+}
+
+/**
+ * Tells the job to free the resources needed to execute the job.
+ * Called after execution of a job.
+ *
+ * @job: The job
+ * @atomic: true if called from atomic (i.e. interrupt) context. If function
+ * can't allocate in atomic context it should return error, it
+ * will then be called later from non-atomic context.
+ */
+static void job_release_resources(struct b2r2_core_job *job, bool atomic)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ int i;
+
+ b2r2_log_info("%s\n", __func__);
+
+ /* Free any temporary buffers */
+ for (i = 0; i < request->buf_count; i++) {
+
+ b2r2_log_info("%s: freeing %d bytes\n",
+ __func__, request->bufs[i].size);
+ tmp_bufs[i].in_use = false;
+ memset(&request->bufs[i], 0, sizeof(request->bufs[i]));
+ }
+ request->buf_count = 0;
+
+ /*
+ * Early release of nodes
+ * FIXME: If nodes are to be reused we don't want to release here
+ */
+ if (!atomic && request->first_node) {
+ b2r2_debug_job_done(request->first_node);
+
+#ifdef B2R2_USE_NODE_GEN
+ b2r2_blt_free_nodes(request->first_node);
+#else
+ b2r2_node_free(request->first_node);
+#endif
+ request->first_node = NULL;
+ }
+}
+
+#endif /* !CONFIG_B2R2_GENERIC_ONLY */
+
+#ifdef CONFIG_B2R2_GENERIC
+/**
+ * Called when job for one tile is done or cancelled
+ * in the generic path.
+ *
+ * @job: The job
+ */
+static void tile_job_callback_gen(struct b2r2_core_job *job)
+{
+ if (b2r2_blt_device())
+ b2r2_log_info("%s\n", __func__);
+
+ /* Local addref / release within this func */
+ b2r2_core_job_addref(job, __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Notify if a tile job is cancelled */
+ if (job->job_state == B2R2_CORE_JOB_CANCELED)
+ b2r2_log_info("%s: Tile job cancelled:\n", __func__);
+#endif
+
+ /* Local addref / release within this func */
+ b2r2_core_job_release(job, __func__);
+}
+
+/**
+ * Called when job is done or cancelled.
+ * Used for the last tile in the generic path
+ * to notify waiting clients.
+ *
+ * @job: The job
+ */
+static void job_callback_gen(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+
+ if (b2r2_blt_device())
+ b2r2_log_info("%s\n", __func__);
+
+ /* Local addref / release within this func */
+ b2r2_core_job_addref(job, __func__);
+
+ /* Unresolve the buffers */
+ unresolve_buf(&request->user_req.src_img.buf,
+ &request->src_resolved);
+ unresolve_buf(&request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+ unresolve_buf(&request->user_req.dst_img.buf,
+ &request->dst_resolved);
+
+ /* Move to report list if the job shall be reported */
+ /* FIXME: Use a smaller struct? */
+ mutex_lock(&request->instance->lock);
+
+ if (request->user_req.flags & B2R2_BLT_FLAG_REPORT_WHEN_DONE) {
+ /* Move job to report list */
+ list_add_tail(&request->list,
+ &request->instance->report_list);
+ inc_stat(&stat_n_jobs_in_report_list);
+
+ /* Wake up poll */
+ wake_up_interruptible(
+ &request->instance->report_list_waitq);
+
+ /*
+ * Add a reference because we put the
+ * job in the report list
+ */
+ b2r2_core_job_addref(job, __func__);
+ }
+
+ /*
+ * Decrease number of active requests and wake up
+ * synching threads if active requests reaches zero
+ */
+ BUG_ON(request->instance->no_of_active_requests == 0);
+ request->instance->no_of_active_requests--;
+ if (request->instance->synching &&
+ request->instance->no_of_active_requests == 0) {
+ request->instance->synching = false;
+ /* Wake up all syncing */
+
+ wake_up_interruptible_all(
+ &request->instance->synch_done_waitq);
+ }
+ mutex_unlock(&request->instance->lock);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Dump job if cancelled */
+ if (job->job_state == B2R2_CORE_JOB_CANCELED) {
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info("%s: Job cancelled:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info("%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info("Unable to print the request. "
+ "Message buffer allocation failed.\n");
+ }
+ }
+#endif
+
+ /* Local addref / release within this func */
+ b2r2_core_job_release(job, __func__);
+}
+
+/**
+ * Called when tile job should be released (free memory etc.)
+ * Should be used only for tile jobs. Tile jobs should only be used
+ * by b2r2_core, thus making ref_count trigger their release.
+ *
+ * @job: The job
+ */
+
+static void tile_job_release_gen(struct b2r2_core_job *job)
+{
+ inc_stat(&stat_n_jobs_released);
+
+ b2r2_log_info("%s, first_node_address=0x%.8x, ref_count=%d\n",
+ __func__, job->first_node_address, job->ref_count);
+
+ /* Release memory for the job */
+ kfree(job);
+}
+
+/**
+ * Called when job should be released (free memory etc.)
+ *
+ * @job: The job
+ */
+
+static void job_release_gen(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+
+ inc_stat(&stat_n_jobs_released);
+
+ b2r2_log_info("%s, first_node=%p, ref_count=%d\n",
+ __func__, request->first_node, request->job.ref_count);
+
+ if (request->first_node) {
+ b2r2_debug_job_done(request->first_node);
+
+ /* Free nodes */
+#ifdef B2R2_USE_NODE_GEN
+ b2r2_blt_free_nodes(request->first_node);
+#else
+ b2r2_node_free(request->first_node);
+#endif
+ }
+
+ /* Release memory for the request */
+ if (request->clut != NULL) {
+ dma_free_coherent(b2r2_blt_device(), CLUT_SIZE, request->clut,
+ request->clut_phys_addr);
+ request->clut = NULL;
+ request->clut_phys_addr = 0;
+ }
+ kfree(request);
+}
+
+static int job_acquire_resources_gen(struct b2r2_core_job *job, bool atomic)
+{
+ /* Nothing so far. Temporary buffers are pre-allocated */
+ return 0;
+}
+static void job_release_resources_gen(struct b2r2_core_job *job, bool atomic)
+{
+ /* Nothing so far. Temporary buffers are pre-allocated */
+}
+
+/**
+ * b2r2_generic_blt - Generic implementation of the B2R2 blit request
+ *
+ * @instance: The B2R2 BLT instance
+ * @request; The request to perform
+ */
+static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_request *request)
+{
+ int ret = 0;
+ struct b2r2_blt_rect actual_dst_rect;
+ int request_id = 0;
+ struct b2r2_node *last_node = request->first_node;
+ int node_count;
+ s32 tmp_buf_width = 0;
+ s32 tmp_buf_height = 0;
+ u32 tmp_buf_count = 0;
+ s32 x;
+ s32 y;
+ const struct b2r2_blt_rect *dst_rect = &(request->user_req.dst_rect);
+ const s32 dst_img_width = request->user_req.dst_img.width;
+ const s32 dst_img_height = request->user_req.dst_img.height;
+ const enum b2r2_blt_flag flags = request->user_req.flags;
+ /* Descriptors for the temporary buffers */
+ struct b2r2_work_buf work_bufs[4];
+ struct b2r2_blt_rect dst_rect_tile;
+ int i;
+
+ u32 thread_runtime_at_start = 0;
+ s32 nsec_active_in_b2r2 = 0;
+
+ /*
+ * Early exit if zero blt.
+ * dst_rect outside of dst_img or
+ * dst_clip_rect outside of dst_img.
+ */
+ if (dst_rect->x + dst_rect->width <= 0 ||
+ dst_rect->y + dst_rect->height <= 0 ||
+ dst_img_width <= dst_rect->x ||
+ dst_img_height <= dst_rect->y ||
+ ((flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0 &&
+ (dst_img_width <= request->user_req.dst_clip_rect.x ||
+ dst_img_height <= request->user_req.dst_clip_rect.y ||
+ request->user_req.dst_clip_rect.x +
+ request->user_req.dst_clip_rect.width <= 0 ||
+ request->user_req.dst_clip_rect.y +
+ request->user_req.dst_clip_rect.height <= 0))) {
+ goto zero_blt;
+ }
+
+ if (request->profile) {
+ request->start_time_nsec = b2r2_get_curr_nsec();
+ thread_runtime_at_start = (u32)task_sched_runtime(current);
+ }
+
+ memset(work_bufs, 0, sizeof(work_bufs));
+
+ b2r2_log_info("%s\n", __func__);
+
+ inc_stat(&stat_n_in_blt);
+
+ /* Debug prints of incoming request */
+ b2r2_log_info(
+ "src.fmt=%#010x flags=0x%.8x src.buf={%d,%d,0x%.8x}\n"
+ "src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n",
+ request->user_req.src_img.fmt,
+ request->user_req.flags,
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
+ b2r2_log_info(
+ "dst.fmt=%#010x dst.buf={%d,%d,0x%.8x}\n"
+ "dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n"
+ "dst_clip_rect={%d,%d,%d,%d}\n",
+ request->user_req.dst_img.fmt,
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height,
+ request->user_req.dst_clip_rect.x,
+ request->user_req.dst_clip_rect.y,
+ request->user_req.dst_clip_rect.width,
+ request->user_req.dst_clip_rect.height);
+
+ inc_stat(&stat_n_in_blt_synch);
+
+ /* Wait here if synch is ongoing */
+ ret = wait_event_interruptible(instance->synch_done_waitq,
+ !is_synching(instance));
+ if (ret) {
+ b2r2_log_warn(
+ "%s: Sync wait interrupted, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ dec_stat(&stat_n_in_blt_synch);
+ goto synch_interrupted;
+ }
+
+ dec_stat(&stat_n_in_blt_synch);
+
+ /* Resolve the buffers */
+
+ /* Source buffer */
+ ret = resolve_buf(&request->user_req.src_img,
+ &request->user_req.src_rect, false, &request->src_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(
+ "%s: Resolve src buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_buf_failed;
+ }
+
+ /* Source mask buffer */
+ ret = resolve_buf(&request->user_req.src_mask,
+ &request->user_req.src_rect, false,
+ &request->src_mask_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(
+ "%s: Resolve src mask buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_mask_buf_failed;
+ }
+
+ /* Destination buffer */
+ get_actual_dst_rect(&request->user_req, &actual_dst_rect);
+ ret = resolve_buf(&request->user_req.dst_img, &actual_dst_rect,
+ true, &request->dst_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(
+ "%s: Resolve dst buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_dst_buf_failed;
+ }
+
+ /* Debug prints of resolved buffers */
+ b2r2_log_info("src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->src_resolved.physical_address,
+ request->src_resolved.virtual_address,
+ request->src_resolved.is_pmem,
+ request->src_resolved.filep,
+ request->src_resolved.file_physical_start,
+ request->src_resolved.file_virtual_start,
+ request->src_resolved.file_len);
+
+ b2r2_log_info("dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->dst_resolved.physical_address,
+ request->dst_resolved.virtual_address,
+ request->dst_resolved.is_pmem,
+ request->dst_resolved.filep,
+ request->dst_resolved.file_physical_start,
+ request->dst_resolved.file_virtual_start,
+ request->dst_resolved.file_len);
+
+ /* Calculate the number of nodes (and resources) needed for this job */
+ ret = b2r2_generic_analyze(request, &tmp_buf_width,
+ &tmp_buf_height, &tmp_buf_count, &node_count);
+ if (ret < 0) {
+ b2r2_log_warn(
+ "%s: Failed to analyze request, ret = %d\n",
+ __func__, ret);
+#ifdef CONFIG_DEBUG_FS
+ {
+ /* Failed, dump job to dmesg */
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(
+ "%s: Analyze failed for:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info("%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info("Unable to print the request. "
+ "Message buffer allocation failed.\n");
+ }
+ }
+#endif
+ goto generate_nodes_failed;
+ }
+
+ /* Allocate the nodes needed */
+#ifdef B2R2_USE_NODE_GEN
+ request->first_node = b2r2_blt_alloc_nodes(node_count);
+ if (request->first_node == NULL) {
+ b2r2_log_warn(
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#else
+ ret = b2r2_node_alloc(node_count, &(request->first_node));
+ if (ret < 0 || request->first_node == NULL) {
+ b2r2_log_warn(
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#endif
+
+ /* Allocate the temporary buffers */
+ for (i = 0; i < tmp_buf_count; i++) {
+ void *virt;
+ work_bufs[i].size = tmp_buf_width * tmp_buf_height * 4;
+
+ virt = dma_alloc_coherent(b2r2_blt_device(),
+ work_bufs[i].size,
+ &(work_bufs[i].phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (virt == NULL) {
+ ret = -ENOMEM;
+ goto alloc_work_bufs_failed;
+ }
+
+ work_bufs[i].virt_addr = virt;
+ memset(work_bufs[i].virt_addr, 0xff, work_bufs[i].size);
+ }
+ ret = b2r2_generic_configure(request,
+ request->first_node, &work_bufs[0], tmp_buf_count);
+
+ if (ret < 0) {
+ b2r2_log_warn(
+ "%s: Failed to perform generic configure, ret = %d\n",
+ __func__, ret);
+ goto generic_conf_failed;
+ }
+
+ /* Exit here if dry run */
+ if (flags & B2R2_BLT_FLAG_DRY_RUN)
+ goto exit_dry_run;
+
+ /*
+ * Configure the request and make sure
+ * that its job is run only for the LAST tile.
+ * This is when the request is complete
+ * and waiting clients should be notified.
+ */
+ last_node = request->first_node;
+ while (last_node && last_node->next)
+ last_node = last_node->next;
+
+ request->job.tag = (int) instance;
+ request->job.prio = request->user_req.prio;
+ request->job.first_node_address =
+ request->first_node->physical_address;
+ request->job.last_node_address =
+ last_node->physical_address;
+ request->job.callback = job_callback_gen;
+ request->job.release = job_release_gen;
+ /* Work buffers and nodes are pre-allocated */
+ request->job.acquire_resources = job_acquire_resources_gen;
+ request->job.release_resources = job_release_resources_gen;
+
+ /* Flush the L1/L2 cache for the buffers */
+
+ /* Source buffer */
+ if (!(flags & B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH) &&
+ (request->user_req.src_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(&request->user_req.src_img,
+ &request->src_resolved,
+ false, /*is_dst*/
+ &request->user_req.src_rect);
+
+ /* Source mask buffer */
+ if (!(flags & B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH) &&
+ (request->user_req.src_mask.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_mask.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(&request->user_req.src_mask,
+ &request->src_mask_resolved,
+ false, /*is_dst*/
+ NULL);
+
+ /* Destination buffer */
+ if (!(flags & B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH) &&
+ (request->user_req.dst_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.dst_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(&request->user_req.dst_img,
+ &request->dst_resolved,
+ true, /*is_dst*/
+ &request->user_req.dst_rect);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Remember latest request */
+ debugfs_latest_request = *request;
+#endif
+
+ /*
+ * Same nodes are reused for all the jobs needed to complete the blit.
+ * Nodes are NOT released together with associated job,
+ * as is the case with optimized b2r2_blt() path.
+ */
+ mutex_lock(&instance->lock);
+ instance->no_of_active_requests++;
+ mutex_unlock(&instance->lock);
+ /*
+ * Process all but the last row in the destination rectangle.
+ * Consider only the tiles that will actually end up inside
+ * the destination image.
+ * dst_rect->height - tmp_buf_height being <=0 is allright.
+ * The loop will not be entered since y will always be equal to or
+ * greater than zero.
+ * Early exit check at the beginning handles the cases when nothing
+ * at all should be processed.
+ */
+ y = 0;
+ if (dst_rect->y < 0)
+ y = -dst_rect->y;
+
+ for (; y < dst_rect->height - tmp_buf_height &&
+ y + dst_rect->y < dst_img_height - tmp_buf_height;
+ y += tmp_buf_height) {
+ /* Tile in the destination rectangle being processed */
+ struct b2r2_blt_rect dst_rect_tile;
+ dst_rect_tile.y = y;
+ dst_rect_tile.width = tmp_buf_width;
+ dst_rect_tile.height = tmp_buf_height;
+
+ x = 0;
+ if (dst_rect->x < 0)
+ x = -dst_rect->x;
+
+ for (; x < dst_rect->width &&
+ x + dst_rect->x < dst_img_width;
+ x += tmp_buf_width) {
+ /*
+ * Tile jobs are freed by the supplied release function
+ * when ref_count on a tile_job reaches zero.
+ */
+ struct b2r2_core_job *tile_job =
+ kmalloc(sizeof(*tile_job), GFP_KERNEL);
+ if (tile_job == NULL) {
+ /*
+ * Skip this tile. Do not abort,
+ * just hope for better luck
+ * with rest of the tiles.
+ * Memory might become available.
+ */
+ b2r2_log_info("%s: Failed to alloc job. "
+ "Skipping tile at (x, y)=(%d, %d)\n",
+ __func__, x, y);
+ continue;
+ }
+ tile_job->tag = request->job.tag;
+ tile_job->prio = request->job.prio;
+ tile_job->first_node_address =
+ request->job.first_node_address;
+ tile_job->last_node_address =
+ request->job.last_node_address;
+ tile_job->callback = tile_job_callback_gen;
+ tile_job->release = tile_job_release_gen;
+ /* Work buffers and nodes are pre-allocated */
+ tile_job->acquire_resources = job_acquire_resources_gen;
+ tile_job->release_resources = job_release_resources_gen;
+
+ dst_rect_tile.x = x;
+ if (x + dst_rect->x + tmp_buf_width > dst_img_width) {
+ /*
+ * Only a part of the tile can be written.
+ * Limit imposed by buffer size.
+ */
+ dst_rect_tile.width =
+ dst_img_width - (x + dst_rect->x);
+ } else if (x + tmp_buf_width > dst_rect->width) {
+ /*
+ * Only a part of the tile can be written.
+ * In this case limit imposed by dst_rect size.
+ */
+ dst_rect_tile.width = dst_rect->width - x;
+ } else {
+ /* Whole tile can be written. */
+ dst_rect_tile.width = tmp_buf_width;
+ }
+ /*
+ * Where applicable, calculate area in src buffer
+ * that is needed to generate the specified part
+ * of destination rectangle.
+ */
+ b2r2_generic_set_areas(request,
+ request->first_node, &dst_rect_tile);
+ /* Submit the job */
+ b2r2_log_info("%s: Submitting job\n", __func__);
+
+ inc_stat(&stat_n_in_blt_add);
+
+ mutex_lock(&instance->lock);
+
+ request_id = b2r2_core_job_add(tile_job);
+
+ dec_stat(&stat_n_in_blt_add);
+
+ if (request_id < 0) {
+ b2r2_log_warn("%s: "
+ "Failed to add tile job, ret = %d\n",
+ __func__, request_id);
+ ret = request_id;
+ mutex_unlock(&instance->lock);
+ goto job_add_failed;
+ }
+
+ inc_stat(&stat_n_jobs_added);
+
+ mutex_unlock(&instance->lock);
+
+ /* Wait for the job to be done */
+ b2r2_log_info("%s: Synchronous, waiting\n",
+ __func__);
+
+ inc_stat(&stat_n_in_blt_wait);
+
+ ret = b2r2_core_job_wait(tile_job);
+
+ dec_stat(&stat_n_in_blt_wait);
+
+ if (ret < 0 && ret != -ENOENT)
+ b2r2_log_warn(
+ "%s: Failed to wait job, ret = %d\n",
+ __func__, ret);
+ else {
+ b2r2_log_info(
+ "%s: Synchronous wait done\n",
+ __func__);
+
+ nsec_active_in_b2r2 +=
+ tile_job->nsec_active_in_hw;
+ }
+ /* Release matching the addref in b2r2_core_job_add */
+ b2r2_core_job_release(tile_job, __func__);
+ }
+ }
+
+ x = 0;
+ if (dst_rect->x < 0)
+ x = -dst_rect->x;
+
+ for (; x < dst_rect->width &&
+ x + dst_rect->x < dst_img_width; x += tmp_buf_width) {
+ struct b2r2_core_job *tile_job = NULL;
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ /*
+ * Tile jobs are freed by the supplied release function
+ * when ref_count on a tile_job reaches zero.
+ * Do NOT allocate a tile_job for the last tile.
+ * Send the job from the request. This way clients
+ * will be notified when the whole blit is complete
+ * and not just part of it.
+ */
+ tile_job = kmalloc(sizeof(*tile_job), GFP_KERNEL);
+ if (tile_job == NULL) {
+ b2r2_log_info("%s: Failed to alloc job. "
+ "Skipping tile at (x, y)=(%d, %d)\n",
+ __func__, x, y);
+ continue;
+ }
+ tile_job->tag = request->job.tag;
+ tile_job->prio = request->job.prio;
+ tile_job->first_node_address =
+ request->job.first_node_address;
+ tile_job->last_node_address =
+ request->job.last_node_address;
+ tile_job->callback = tile_job_callback_gen;
+ tile_job->release = tile_job_release_gen;
+ tile_job->acquire_resources = job_acquire_resources_gen;
+ tile_job->release_resources = job_release_resources_gen;
+ }
+
+ dst_rect_tile.x = x;
+ if (x + dst_rect->x + tmp_buf_width > dst_img_width) {
+ /*
+ * Only a part of the tile can be written.
+ * Limit imposed by buffer size.
+ */
+ dst_rect_tile.width = dst_img_width - (x + dst_rect->x);
+ } else if (x + tmp_buf_width > dst_rect->width) {
+ /*
+ * Only a part of the tile can be written.
+ * In this case limit imposed by dst_rect size.
+ */
+ dst_rect_tile.width = dst_rect->width - x;
+ } else {
+ /* Whole tile can be written. */
+ dst_rect_tile.width = tmp_buf_width;
+ }
+ /*
+ * y is now the last row. Either because the whole dst_rect
+ * has been processed, or because the last row that will be
+ * written to dst_img has been reached. Limits imposed in
+ * the same way as for width.
+ */
+ dst_rect_tile.y = y;
+ if (y + dst_rect->y + tmp_buf_height > dst_img_height)
+ dst_rect_tile.height =
+ dst_img_height - (y + dst_rect->y);
+ else if (y + tmp_buf_height > dst_rect->height)
+ dst_rect_tile.height = dst_rect->height - y;
+ else
+ dst_rect_tile.height = tmp_buf_height;
+
+ b2r2_generic_set_areas(request,
+ request->first_node, &dst_rect_tile);
+
+ b2r2_log_info("%s: Submitting job\n", __func__);
+ inc_stat(&stat_n_in_blt_add);
+
+ mutex_lock(&instance->lock);
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ request_id = b2r2_core_job_add(tile_job);
+ } else {
+ /*
+ * Last tile. Send the job-struct from the request.
+ * Clients will be notified once it completes.
+ */
+ request_id = b2r2_core_job_add(&request->job);
+ }
+
+ dec_stat(&stat_n_in_blt_add);
+
+ if (request_id < 0) {
+ b2r2_log_warn("%s: Failed to add tile job, ret = %d\n",
+ __func__, request_id);
+ ret = request_id;
+ mutex_unlock(&instance->lock);
+ if (tile_job != NULL)
+ kfree(tile_job);
+ goto job_add_failed;
+ }
+
+ inc_stat(&stat_n_jobs_added);
+ mutex_unlock(&instance->lock);
+
+ b2r2_log_info("%s: Synchronous, waiting\n",
+ __func__);
+
+ inc_stat(&stat_n_in_blt_wait);
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ ret = b2r2_core_job_wait(tile_job);
+ } else {
+ /*
+ * This is the last tile. Wait for the job-struct from
+ * the request.
+ */
+ ret = b2r2_core_job_wait(&request->job);
+ }
+ dec_stat(&stat_n_in_blt_wait);
+
+ if (ret < 0 && ret != -ENOENT)
+ b2r2_log_warn(
+ "%s: Failed to wait job, ret = %d\n",
+ __func__, ret);
+ else {
+ b2r2_log_info(
+ "%s: Synchronous wait done\n", __func__);
+
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width)
+ nsec_active_in_b2r2 +=
+ tile_job->nsec_active_in_hw;
+ else
+ nsec_active_in_b2r2 +=
+ request->job.nsec_active_in_hw;
+ }
+
+ /*
+ * Release matching the addref in b2r2_core_job_add.
+ * Make sure that the correct job-struct is released
+ * when the last tile is processed.
+ */
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ b2r2_core_job_release(tile_job, __func__);
+ } else {
+ /*
+ * Update profiling information before
+ * the request is released together with
+ * its core_job.
+ */
+ if (request->profile) {
+ request->nsec_active_in_cpu =
+ (s32)((u32)task_sched_runtime(current) -
+ thread_runtime_at_start);
+ request->total_time_nsec =
+ (s32)(b2r2_get_curr_nsec() -
+ request->start_time_nsec);
+ request->job.nsec_active_in_hw =
+ nsec_active_in_b2r2;
+
+ b2r2_call_profiler_blt_done(request);
+ }
+
+ b2r2_core_job_release(&request->job, __func__);
+ }
+ }
+
+ dec_stat(&stat_n_in_blt);
+
+ for (i = 0; i < tmp_buf_count; i++) {
+ dma_free_coherent(b2r2_blt_device(),
+ work_bufs[i].size,
+ work_bufs[i].virt_addr,
+ work_bufs[i].phys_addr);
+ memset(&(work_bufs[i]), 0, sizeof(work_bufs[i]));
+ }
+
+ return request_id;
+
+job_add_failed:
+exit_dry_run:
+generic_conf_failed:
+alloc_work_bufs_failed:
+ for (i = 0; i < 4; i++) {
+ if (work_bufs[i].virt_addr != 0) {
+ dma_free_coherent(b2r2_blt_device(),
+ work_bufs[i].size,
+ work_bufs[i].virt_addr,
+ work_bufs[i].phys_addr);
+ memset(&(work_bufs[i]), 0, sizeof(work_bufs[i]));
+ }
+ }
+
+generate_nodes_failed:
+ unresolve_buf(&request->user_req.dst_img.buf,
+ &request->dst_resolved);
+resolve_dst_buf_failed:
+ unresolve_buf(&request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+resolve_src_mask_buf_failed:
+ unresolve_buf(&request->user_req.src_img.buf,
+ &request->src_resolved);
+resolve_src_buf_failed:
+synch_interrupted:
+zero_blt:
+ job_release_gen(&request->job);
+ dec_stat(&stat_n_jobs_released);
+ dec_stat(&stat_n_in_blt);
+
+ b2r2_log_info("b2r2:%s ret=%d", __func__, ret);
+ return ret;
+}
+#endif /* CONFIG_B2R2_GENERIC */
+
+/**
+ * b2r2_blt_synch - Implements wait for all or a specified job
+ *
+ * @instance: The B2R2 BLT instance
+ * @request_id: If 0, wait for all requests on this instance to finish.
+ * Else wait for request with given request id to finish.
+ */
+static int b2r2_blt_synch(struct b2r2_blt_instance *instance,
+ int request_id)
+{
+ int ret = 0;
+ b2r2_log_info("%s, request_id=%d\n", __func__, request_id);
+
+ if (request_id == 0) {
+ /* Wait for all requests */
+ inc_stat(&stat_n_in_synch_0);
+
+ /* Enter state "synching" if we have any active request */
+ mutex_lock(&instance->lock);
+ if (instance->no_of_active_requests)
+ instance->synching = true;
+ mutex_unlock(&instance->lock);
+
+ /* Wait until no longer in state synching */
+ ret = wait_event_interruptible(instance->synch_done_waitq,
+ !is_synching(instance));
+ dec_stat(&stat_n_in_synch_0);
+ } else {
+ struct b2r2_core_job *job;
+
+ inc_stat(&stat_n_in_synch_job);
+
+ /* Wait for specific job */
+ job = b2r2_core_job_find(request_id);
+ if (job) {
+ /* Wait on find job */
+ ret = b2r2_core_job_wait(job);
+ /* Release matching the addref in b2r2_core_job_find */
+ b2r2_core_job_release(job, __func__);
+ }
+
+ /* If job not found we assume that is has been run */
+
+ dec_stat(&stat_n_in_synch_job);
+ }
+
+ b2r2_log_info(
+ "%s, request_id=%d, returns %d\n", __func__, request_id, ret);
+
+ return ret;
+}
+
+/**
+ * Query B2R2 capabilities
+ *
+ * @instance: The B2R2 BLT instance
+ * @query_cap: The structure receiving the capabilities
+ */
+static int b2r2_blt_query_cap(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_query_cap *query_cap)
+{
+ /* FIXME: Not implemented yet */
+ return -ENOSYS;
+}
+
+static void get_actual_dst_rect(struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *actual_dst_rect)
+{
+ struct b2r2_blt_rect dst_img_bounds;
+
+ b2r2_get_img_bounding_rect(&req->dst_img, &dst_img_bounds);
+
+ b2r2_intersect_rects(&req->dst_rect, &dst_img_bounds, actual_dst_rect);
+
+ if (req->flags & B2R2_BLT_FLAG_DESTINATION_CLIP)
+ b2r2_intersect_rects(actual_dst_rect, &req->dst_clip_rect,
+ actual_dst_rect);
+}
+
+static void set_up_hwmem_region(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect, struct hwmem_region *region)
+{
+ s32 img_size;
+
+ memset(region, 0, sizeof(*region));
+
+ if (b2r2_is_zero_area_rect(rect))
+ return;
+
+ img_size = b2r2_get_img_size(img);
+
+ if (b2r2_is_single_plane_fmt(img->fmt) &&
+ b2r2_is_independent_pixel_fmt(img->fmt)) {
+ int img_fmt_bpp = b2r2_get_fmt_bpp(img->fmt);
+ u32 img_pitch = b2r2_get_img_pitch(img);
+
+ region->offset = (u32)(img->buf.offset + (rect->y *
+ img_pitch));
+ region->count = (u32)rect->height;
+ region->start = (u32)((rect->x * img_fmt_bpp) / 8);
+ region->end = (u32)b2r2_div_round_up(
+ (rect->x + rect->width) * img_fmt_bpp, 8);
+ region->size = img_pitch;
+ } else {
+ /*
+ * TODO: Locking entire buffer as a quick safe solution. In the
+ * future we should lock less to avoid unecessary cache
+ * synching. Pixel interleaved YCbCr formats should be quite
+ * easy, just align start and stop points on 2.
+ */
+ region->offset = (u32)img->buf.offset;
+ region->count = 1;
+ region->start = 0;
+ region->end = (u32)img_size;
+ region->size = (u32)img_size;
+ }
+}
+
+static int resolve_hwmem(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst,
+ struct b2r2_resolved_buf *resolved_buf)
+{
+ int return_value = 0;
+ enum hwmem_mem_type mem_type;
+ enum hwmem_access access;
+ enum hwmem_access required_access;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+ struct hwmem_region region;
+
+ resolved_buf->hwmem_alloc =
+ hwmem_resolve_by_name(img->buf.hwmem_buf_name);
+ if (IS_ERR(resolved_buf->hwmem_alloc)) {
+ return_value = PTR_ERR(resolved_buf->hwmem_alloc);
+ b2r2_log_info("%s: hwmem_resolve_by_name failed, "
+ "error code: %i\n", __func__, return_value);
+ goto resolve_failed;
+ }
+
+ hwmem_get_info(resolved_buf->hwmem_alloc, &resolved_buf->file_len,
+ &mem_type, &access);
+
+ required_access = (is_dst ? HWMEM_ACCESS_WRITE : HWMEM_ACCESS_READ) |
+ HWMEM_ACCESS_IMPORT;
+ if ((required_access & access) != required_access) {
+ b2r2_log_info("%s: Insufficient access to hwmem buffer.\n",
+ __func__);
+ return_value = -EACCES;
+ goto access_check_failed;
+ }
+
+ if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS) {
+ b2r2_log_info("%s: Hwmem buffer is scattered.\n", __func__);
+ return_value = -EINVAL;
+ goto buf_scattered;
+ }
+
+ if (resolved_buf->file_len <
+ img->buf.offset + (__u32)b2r2_get_img_size(img)) {
+ b2r2_log_info("%s: Hwmem buffer too small.\n", __func__);
+ return_value = -EINVAL;
+ goto size_check_failed;
+ }
+
+ return_value = hwmem_pin(resolved_buf->hwmem_alloc, &mem_chunk,
+ &mem_chunk_length);
+ if (return_value < 0) {
+ b2r2_log_info("%s: hwmem_pin failed, "
+ "error code: %i\n", __func__, return_value);
+ goto pin_failed;
+ }
+ resolved_buf->file_physical_start = mem_chunk.paddr;
+
+ set_up_hwmem_region(img, rect_2b_used, &region);
+ return_value = hwmem_set_domain(resolved_buf->hwmem_alloc,
+ required_access, HWMEM_DOMAIN_SYNC, &region);
+ if (return_value < 0) {
+ b2r2_log_info("%s: hwmem_set_domain failed, "
+ "error code: %i\n", __func__, return_value);
+ goto set_domain_failed;
+ }
+
+ resolved_buf->physical_address =
+ resolved_buf->file_physical_start + img->buf.offset;
+
+ goto out;
+
+set_domain_failed:
+ hwmem_unpin(resolved_buf->hwmem_alloc);
+pin_failed:
+size_check_failed:
+buf_scattered:
+access_check_failed:
+ hwmem_release(resolved_buf->hwmem_alloc);
+resolve_failed:
+
+out:
+ return return_value;
+}
+
+static void unresolve_hwmem(struct b2r2_resolved_buf *resolved_buf)
+{
+ hwmem_unpin(resolved_buf->hwmem_alloc);
+ hwmem_release(resolved_buf->hwmem_alloc);
+}
+
+/**
+ * unresolve_buf() - Must be called after resolve_buf
+ *
+ * @buf: The buffer specification as supplied from user space
+ * @resolved: Gathered information about the buffer
+ *
+ * Returns 0 if OK else negative error code
+ */
+static void unresolve_buf(struct b2r2_blt_buf *buf,
+ struct b2r2_resolved_buf *resolved)
+{
+#ifdef CONFIG_ANDROID_PMEM
+ if (resolved->is_pmem && resolved->filep)
+ put_pmem_file(resolved->filep);
+#endif
+ if (resolved->hwmem_alloc != NULL)
+ unresolve_hwmem(resolved);
+}
+
+/**
+ * resolve_buf() - Returns the physical & virtual addresses of a B2R2 blt buffer
+ *
+ * @img: The image specification as supplied from user space
+ * @rect_2b_used: The part of the image b2r2 will use.
+ * @usage: Specifies how the buffer will be used.
+ * @resolved: Gathered information about the buffer
+ *
+ * Returns 0 if OK else negative error code
+ */
+static int resolve_buf(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst,
+ struct b2r2_resolved_buf *resolved)
+{
+ int ret = 0;
+
+ memset(resolved, 0, sizeof(*resolved));
+
+ switch (img->buf.type) {
+ case B2R2_BLT_PTR_NONE:
+ break;
+
+ case B2R2_BLT_PTR_PHYSICAL:
+ resolved->physical_address = img->buf.offset;
+ resolved->file_len = img->buf.len;
+ break;
+
+ /* FD + OFFSET type */
+ case B2R2_BLT_PTR_FD_OFFSET: {
+ /*
+ * TODO: Do we need to check if the process is allowed to
+ * read/write (depending on if it's dst or src) to the file?
+ */
+ struct file *file;
+ int put_needed;
+ int i;
+
+#ifdef CONFIG_ANDROID_PMEM
+ if (!get_pmem_file(
+ img->buf.fd,
+ (unsigned long *) &resolved->file_physical_start,
+ (unsigned long *) &resolved->file_virtual_start,
+ (unsigned long *) &resolved->file_len,
+ &resolved->filep)) {
+ resolved->physical_address =
+ resolved->file_physical_start +
+ img->buf.offset;
+ resolved->virtual_address = (void *)
+ (resolved->file_virtual_start +
+ img->buf.offset);
+ resolved->is_pmem = true;
+ } else
+#endif
+ {
+ /* Will be set to 0 if a matching dev is found */
+ ret = -EINVAL;
+
+ file = fget_light(img->buf.fd, &put_needed);
+ if (file == NULL)
+ return -EINVAL;
+#ifdef CONFIG_FB
+ if (MAJOR(file->f_dentry->d_inode->i_rdev) ==
+ FB_MAJOR) {
+ /*
+ * This is a frame buffer device, find fb_info
+ * (OK to do it like this, no locking???)
+ */
+
+ for (i = 0; i < num_registered_fb; i++) {
+ struct fb_info *info = registered_fb[i];
+
+ if (info && info->dev &&
+ MINOR(info->dev->devt) ==
+ MINOR(file->f_dentry->
+ d_inode->i_rdev)) {
+ resolved->file_physical_start =
+ info->fix.smem_start;
+ resolved->file_virtual_start =
+ (u32)info->screen_base;
+ resolved->file_len =
+ info->fix.smem_len;
+
+ resolved->physical_address =
+ resolved->file_physical_start +
+ img->buf.offset;
+ resolved->virtual_address =
+ (void *) (resolved->
+ file_virtual_start +
+ img->buf.offset);
+
+ ret = 0;
+ break;
+ }
+ }
+ }
+#endif
+
+ fput_light(file, put_needed);
+ }
+
+ /* Check bounds */
+ if (ret >= 0 && img->buf.offset + img->buf.len >
+ resolved->file_len) {
+ ret = -ESPIPE;
+ unresolve_buf(&img->buf, resolved);
+ }
+
+ break;
+ }
+
+ case B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET:
+ ret = resolve_hwmem(img, rect_2b_used, is_dst, resolved);
+ break;
+
+ default:
+ b2r2_log_warn(
+ "%s: Failed to resolve buf type %d\n",
+ __func__, img->buf.type);
+
+ ret = -EINVAL;
+ break;
+
+ }
+
+ return ret;
+}
+
+/**
+ * sync_buf - Synchronizes the memory occupied by an image buffer.
+ *
+ * @buf: User buffer specification
+ * @resolved_buf: Gathered info (physical address etc.) about buffer
+ * @is_dst: true if the buffer is a destination buffer, false if the buffer is a
+ * source buffer.
+ * @rect: rectangle in the image buffer that should be synced.
+ * NULL if the buffer is a source mask.
+ * @img_width: width of the complete image buffer
+ * @fmt: buffer format
+*/
+static void sync_buf(struct b2r2_blt_img *img,
+ struct b2r2_resolved_buf *resolved,
+ bool is_dst,
+ struct b2r2_blt_rect *rect)
+{
+ struct sync_args sa;
+ u32 start_phys, end_phys;
+
+ if (B2R2_BLT_PTR_NONE == img->buf.type ||
+ B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == img->buf.type)
+ return;
+
+ start_phys = resolved->physical_address;
+ end_phys = resolved->physical_address + img->buf.len;
+
+ /*
+ * TODO: Very ugly. We should find out whether the memory is coherent in
+ * some generic way but cache handling will be rewritten soon so there
+ * is no use spending time on it. In the new design this will probably
+ * not be a problem.
+ */
+ /* Frame buffer is coherent, at least now. */
+ if (!resolved->is_pmem) {
+ /*
+ * Drain the write buffers as they are not always part of the
+ * coherent concept.
+ */
+ wmb();
+
+ return;
+ }
+
+ /*
+ * src_mask does not have rect.
+ * Also flush full buffer for planar and semiplanar YUV formats
+ */
+ if (rect == NULL ||
+ (img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR) ||
+ (img->fmt ==
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE) ||
+ (img->fmt ==
+ B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE)) {
+ sa.start = (unsigned long)resolved->virtual_address;
+ sa.end = (unsigned long)resolved->virtual_address +
+ img->buf.len;
+ start_phys = resolved->physical_address;
+ end_phys = resolved->physical_address + img->buf.len;
+ } else {
+ /*
+ * buffer is not a src_mask so make use of rect when
+ * clean & flush caches
+ */
+ u32 bpp; /* Bits per pixel */
+ u32 pitch;
+
+ switch (img->fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_ARGB1555: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_RGB565: /* Fall through */
+ case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ bpp = 16;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_ARGB8565: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ bpp = 24;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ARGB8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ bpp = 32;
+ break;
+ default:
+ bpp = 12;
+ }
+ if (img->pitch == 0)
+ pitch = (img->width * bpp) / 8;
+ else
+ pitch = img->pitch;
+
+ /*
+ * For 422I formats 2 horizontal pixels share color data.
+ * Thus, the x position must be aligned down to closest even
+ * number and width must be aligned up.
+ */
+ {
+ s32 x;
+ s32 width;
+
+ switch (img->fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ x = (rect->x / 2) * 2;
+ width = ((rect->width + 1) / 2) * 2;
+ break;
+ default:
+ x = rect->x;
+ width = rect->width;
+ break;
+ }
+
+ sa.start = (unsigned long)resolved->virtual_address +
+ rect->y * pitch + (x * bpp) / 8;
+ sa.end = (unsigned long)sa.start +
+ (rect->height - 1) * pitch +
+ (width * bpp) / 8;
+
+ start_phys = resolved->physical_address +
+ rect->y * pitch + (x * bpp) / 8;
+ end_phys = start_phys +
+ (rect->height - 1) * pitch +
+ (width * bpp) / 8;
+ }
+ }
+
+ /*
+ * The virtual address to a pmem buffer is retrieved from ioremap, not
+ * sure if it's ok to use such an address as a kernel virtual address.
+ * When doing it at a higher level such as dma_map_single it triggers an
+ * error but at lower levels such as dmac_clean_range it seems to work,
+ * hence the low level stuff.
+ */
+
+ if (is_dst) {
+ /*
+ * According to ARM's docs you must clean before invalidating
+ * (ie flush) to avoid loosing data.
+ */
+
+ /* Flush L1 cache */
+#ifdef CONFIG_SMP
+ flush_l1_cache_range_all_cpus(&sa);
+#else
+ flush_l1_cache_range_curr_cpu(&sa);
+#endif
+
+ /* Flush L2 cache */
+ outer_flush_range(start_phys, end_phys);
+ } else {
+ /* Clean L1 cache */
+#ifdef CONFIG_SMP
+ clean_l1_cache_range_all_cpus(&sa);
+#else
+ clean_l1_cache_range_curr_cpu(&sa);
+#endif
+
+ /* Clean L2 cache */
+ outer_clean_range(start_phys, end_phys);
+ }
+}
+
+/**
+ * is_report_list_empty() - Spin lock protected check of report list
+ *
+ * @instance: The B2R2 BLT instance
+ */
+static bool is_report_list_empty(struct b2r2_blt_instance *instance)
+{
+ bool is_empty;
+
+ mutex_lock(&instance->lock);
+ is_empty = list_empty(&instance->report_list);
+ mutex_unlock(&instance->lock);
+
+ return is_empty;
+}
+
+/**
+ * is_synching() - Spin lock protected check if synching
+ *
+ * @instance: The B2R2 BLT instance
+ */
+static bool is_synching(struct b2r2_blt_instance *instance)
+{
+ bool is_synching;
+
+ mutex_lock(&instance->lock);
+ is_synching = instance->synching;
+ mutex_unlock(&instance->lock);
+
+ return is_synching;
+}
+
+/**
+ * b2r2_blt_devide() - Returns the B2R2 blt device for logging
+ */
+struct device *b2r2_blt_device(void)
+{
+ return b2r2_blt_dev ? b2r2_blt_dev->this_device : NULL;
+}
+
+/**
+ * inc_stat() - Spin lock protected increment of statistics variable
+ *
+ * @stat: Pointer to statistics variable that should be incremented
+ */
+static void inc_stat(unsigned long *stat)
+{
+ mutex_lock(&stat_lock);
+ (*stat)++;
+ mutex_unlock(&stat_lock);
+}
+
+/**
+ * inc_stat() - Spin lock protected decrement of statistics variable
+ *
+ * @stat: Pointer to statistics variable that should be decremented
+ */
+static void dec_stat(unsigned long *stat)
+{
+ mutex_lock(&stat_lock);
+ (*stat)--;
+ mutex_unlock(&stat_lock);
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * sprintf_req() - Builds a string representing the request, for debug
+ *
+ * @request:Request that should be encoded into a string
+ * @buf: Receiving buffer
+ * @size: Size of receiving buffer
+ *
+ * Returns number of characters in string, excluding null terminator
+ */
+static int sprintf_req(struct b2r2_blt_request *request, char *buf, int size)
+{
+ size_t dev_size = 0;
+
+ dev_size += sprintf(buf + dev_size,
+ "instance: %p\n\n",
+ request->instance);
+
+ dev_size += sprintf(buf + dev_size,
+ "size: %d bytes\n",
+ request->user_req.size);
+ dev_size += sprintf(buf + dev_size,
+ "flags: %8lX\n",
+ (unsigned long) request->user_req.flags);
+ dev_size += sprintf(buf + dev_size,
+ "transform: %3lX\n",
+ (unsigned long) request->user_req.transform);
+ dev_size += sprintf(buf + dev_size,
+ "prio: %d\n",
+ request->user_req.transform);
+ dev_size += sprintf(buf + dev_size,
+ "src_img.fmt: %#010x\n",
+ request->user_req.src_img.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "src_img.buf: {type=%d,hwmem_buf_name=%d,fd=%d,"
+ "offset=%d,len=%d}\n",
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.hwmem_buf_name,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "src_img.{width=%d,height=%d,pitch=%d}\n",
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_img.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask.fmt: %#010x\n",
+ request->user_req.src_mask.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask.buf: {type=%d,hwmem_buf_name=%d,fd=%d,"
+ "offset=%d,len=%d}\n",
+ request->user_req.src_mask.buf.type,
+ request->user_req.src_mask.buf.hwmem_buf_name,
+ request->user_req.src_mask.buf.fd,
+ request->user_req.src_mask.buf.offset,
+ request->user_req.src_mask.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask.{width=%d,height=%d,pitch=%d}\n",
+ request->user_req.src_mask.width,
+ request->user_req.src_mask.height,
+ request->user_req.src_mask.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "src_rect.{x=%d,y=%d,width=%d,height=%d}\n",
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
+ dev_size += sprintf(buf + dev_size,
+ "src_color=%08lX\n",
+ (unsigned long) request->user_req.src_color);
+
+ dev_size += sprintf(buf + dev_size,
+ "dst_img.fmt: %#010x\n",
+ request->user_req.dst_img.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "dst_img.buf: {type=%d,hwmem_buf_name=%d,fd=%d,"
+ "offset=%d,len=%d}\n",
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.hwmem_buf_name,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "dst_img.{width=%d,height=%d,pitch=%d}\n",
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_img.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "dst_rect.{x=%d,y=%d,width=%d,height=%d}\n",
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height);
+ dev_size += sprintf(buf + dev_size,
+ "dst_clip_rect.{x=%d,y=%d,width=%d,height=%d}\n",
+ request->user_req.dst_clip_rect.x,
+ request->user_req.dst_clip_rect.y,
+ request->user_req.dst_clip_rect.width,
+ request->user_req.dst_clip_rect.height);
+ dev_size += sprintf(buf + dev_size,
+ "dst_color=%08lX\n",
+ (unsigned long) request->user_req.dst_color);
+ dev_size += sprintf(buf + dev_size,
+ "global_alpha=%d\n",
+ (int) request->user_req.global_alpha);
+ dev_size += sprintf(buf + dev_size,
+ "report1=%08lX\n",
+ (unsigned long) request->user_req.report1);
+ dev_size += sprintf(buf + dev_size,
+ "report2=%08lX\n",
+ (unsigned long) request->user_req.report2);
+
+ dev_size += sprintf(buf + dev_size,
+ "request_id: %d\n",
+ request->request_id);
+
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.physical: %lX\n",
+ (unsigned long) request->src_resolved.
+ physical_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.virtual: %p\n",
+ request->src_resolved.virtual_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.filep: %p\n",
+ request->src_resolved.filep);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.filep_physical_start: %lX\n",
+ (unsigned long) request->src_resolved.
+ file_physical_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.filep_virtual_start: %p\n",
+ (void *) request->src_resolved.file_virtual_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.file_len: %d\n",
+ request->src_resolved.file_len);
+
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.physical: %lX\n",
+ (unsigned long) request->src_mask_resolved.
+ physical_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.virtual: %p\n",
+ request->src_mask_resolved.virtual_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.filep: %p\n",
+ request->src_mask_resolved.filep);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.filep_physical_start: %lX\n",
+ (unsigned long) request->src_mask_resolved.
+ file_physical_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.filep_virtual_start: %p\n",
+ (void *) request->src_mask_resolved.
+ file_virtual_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.file_len: %d\n",
+ request->src_mask_resolved.file_len);
+
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.physical: %lX\n",
+ (unsigned long) request->dst_resolved.
+ physical_address);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.virtual: %p\n",
+ request->dst_resolved.virtual_address);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.filep: %p\n",
+ request->dst_resolved.filep);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.filep_physical_start: %lX\n",
+ (unsigned long) request->dst_resolved.
+ file_physical_start);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.filep_virtual_start: %p\n",
+ (void *) request->dst_resolved.file_virtual_start);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.file_len: %d\n",
+ request->dst_resolved.file_len);
+
+ return dev_size;
+}
+
+/**
+ * debugfs_b2r2_blt_request_read() - Implements debugfs read for B2R2 register
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_blt_request_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_size = sprintf_req(&debugfs_latest_request, Buf,
+ sizeof(char) * 4096);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_blt_request_fops - File operations for B2R2 request debugfs
+ */
+static const struct file_operations debugfs_b2r2_blt_request_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_blt_request_read,
+};
+
+/**
+ * struct debugfs_reg - Represents a B2R2 node "register"
+ *
+ * @name: Register name
+ * @offset: Offset within the node
+ */
+struct debugfs_reg {
+ const char name[30];
+ u32 offset;
+};
+
+/**
+ * debugfs_node_regs - Array with all the registers in a B2R2 node, for debug
+ */
+static const struct debugfs_reg debugfs_node_regs[] = {
+ {"GROUP0.B2R2_NIP", offsetof(struct b2r2_link_list, GROUP0.B2R2_NIP)},
+ {"GROUP0.B2R2_CIC", offsetof(struct b2r2_link_list, GROUP0.B2R2_CIC)},
+ {"GROUP0.B2R2_INS", offsetof(struct b2r2_link_list, GROUP0.B2R2_INS)},
+ {"GROUP0.B2R2_ACK", offsetof(struct b2r2_link_list, GROUP0.B2R2_ACK)},
+
+ {"GROUP1.B2R2_TBA", offsetof(struct b2r2_link_list, GROUP1.B2R2_TBA)},
+ {"GROUP1.B2R2_TTY", offsetof(struct b2r2_link_list, GROUP1.B2R2_TTY)},
+ {"GROUP1.B2R2_TXY", offsetof(struct b2r2_link_list, GROUP1.B2R2_TXY)},
+ {"GROUP1.B2R2_TSZ", offsetof(struct b2r2_link_list, GROUP1.B2R2_TSZ)},
+
+ {"GROUP2.B2R2_S1CF", offsetof(struct b2r2_link_list, GROUP2.B2R2_S1CF)},
+ {"GROUP2.B2R2_S2CF", offsetof(struct b2r2_link_list, GROUP2.B2R2_S2CF)},
+
+ {"GROUP3.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP3.B2R2_SBA)},
+ {"GROUP3.B2R2_STY", offsetof(struct b2r2_link_list, GROUP3.B2R2_STY)},
+ {"GROUP3.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP3.B2R2_SXY)},
+ {"GROUP3.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP3.B2R2_SSZ)},
+
+ {"GROUP4.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP4.B2R2_SBA)},
+ {"GROUP4.B2R2_STY", offsetof(struct b2r2_link_list, GROUP4.B2R2_STY)},
+ {"GROUP4.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP4.B2R2_SXY)},
+ {"GROUP4.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP4.B2R2_SSZ)},
+
+ {"GROUP5.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP5.B2R2_SBA)},
+ {"GROUP5.B2R2_STY", offsetof(struct b2r2_link_list, GROUP5.B2R2_STY)},
+ {"GROUP5.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP5.B2R2_SXY)},
+ {"GROUP5.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP5.B2R2_SSZ)},
+
+ {"GROUP6.B2R2_CWO", offsetof(struct b2r2_link_list, GROUP6.B2R2_CWO)},
+ {"GROUP6.B2R2_CWS", offsetof(struct b2r2_link_list, GROUP6.B2R2_CWS)},
+
+ {"GROUP7.B2R2_CCO", offsetof(struct b2r2_link_list, GROUP7.B2R2_CCO)},
+ {"GROUP7.B2R2_CML", offsetof(struct b2r2_link_list, GROUP7.B2R2_CML)},
+
+ {"GROUP8.B2R2_FCTL", offsetof(struct b2r2_link_list, GROUP8.B2R2_FCTL)},
+ {"GROUP8.B2R2_PMK", offsetof(struct b2r2_link_list, GROUP8.B2R2_PMK)},
+
+ {"GROUP9.B2R2_RSF", offsetof(struct b2r2_link_list, GROUP9.B2R2_RSF)},
+ {"GROUP9.B2R2_RZI", offsetof(struct b2r2_link_list, GROUP9.B2R2_RZI)},
+ {"GROUP9.B2R2_HFP", offsetof(struct b2r2_link_list, GROUP9.B2R2_HFP)},
+ {"GROUP9.B2R2_VFP", offsetof(struct b2r2_link_list, GROUP9.B2R2_VFP)},
+
+ {"GROUP10.B2R2_RSF", offsetof(struct b2r2_link_list, GROUP10.B2R2_RSF)},
+ {"GROUP10.B2R2_RZI", offsetof(struct b2r2_link_list, GROUP10.B2R2_RZI)},
+ {"GROUP10.B2R2_HFP", offsetof(struct b2r2_link_list, GROUP10.B2R2_HFP)},
+ {"GROUP10.B2R2_VFP", offsetof(struct b2r2_link_list, GROUP10.B2R2_VFP)},
+
+ {"GROUP11.B2R2_FF0", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF0)},
+ {"GROUP11.B2R2_FF1", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF1)},
+ {"GROUP11.B2R2_FF2", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF2)},
+ {"GROUP11.B2R2_FF3", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF3)},
+
+ {"GROUP12.B2R2_KEY1", offsetof(struct b2r2_link_list,
+ GROUP12.B2R2_KEY1)},
+ {"GROUP12.B2R2_KEY2", offsetof(struct b2r2_link_list,
+ GROUP12.B2R2_KEY2)},
+
+ {"GROUP13.B2R2_XYL", offsetof(struct b2r2_link_list, GROUP13.B2R2_XYL)},
+ {"GROUP13.B2R2_XYP", offsetof(struct b2r2_link_list, GROUP13.B2R2_XYP)},
+
+ {"GROUP14.B2R2_SAR", offsetof(struct b2r2_link_list, GROUP14.B2R2_SAR)},
+ {"GROUP14.B2R2_USR", offsetof(struct b2r2_link_list, GROUP14.B2R2_USR)},
+
+ {"GROUP15.B2R2_VMX0", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX0)},
+ {"GROUP15.B2R2_VMX1", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX1)},
+ {"GROUP15.B2R2_VMX2", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX2)},
+ {"GROUP15.B2R2_VMX3", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX3)},
+
+ {"GROUP16.B2R2_VMX0", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX0)},
+ {"GROUP16.B2R2_VMX1", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX1)},
+ {"GROUP16.B2R2_VMX2", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX2)},
+ {"GROUP16.B2R2_VMX3", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX3)},
+};
+
+/**
+ * debugfs_b2r2_blt_stat_read() - Implements debugfs read for B2R2 BLT
+ * statistics
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_blt_stat_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mutex_lock(&stat_lock);
+ dev_size += sprintf(Buf + dev_size, "Added jobs: %lu\n",
+ stat_n_jobs_added);
+ dev_size += sprintf(Buf + dev_size, "Released jobs: %lu\n",
+ stat_n_jobs_released);
+ dev_size += sprintf(Buf + dev_size, "Jobs in report list: %lu\n",
+ stat_n_jobs_in_report_list);
+ dev_size += sprintf(Buf + dev_size, "Clients in open: %lu\n",
+ stat_n_in_open);
+ dev_size += sprintf(Buf + dev_size, "Clients in release: %lu\n",
+ stat_n_in_release);
+ dev_size += sprintf(Buf + dev_size, "Clients in blt: %lu\n",
+ stat_n_in_blt);
+ dev_size += sprintf(Buf + dev_size, " synch: %lu\n",
+ stat_n_in_blt_synch);
+ dev_size += sprintf(Buf + dev_size, " add: %lu\n",
+ stat_n_in_blt_add);
+ dev_size += sprintf(Buf + dev_size, " wait: %lu\n",
+ stat_n_in_blt_wait);
+ dev_size += sprintf(Buf + dev_size, "Clients in synch 0: %lu\n",
+ stat_n_in_synch_0);
+ dev_size += sprintf(Buf + dev_size, "Clients in synch job: %lu\n",
+ stat_n_in_synch_job);
+ dev_size += sprintf(Buf + dev_size, "Clients in query_cap: %lu\n",
+ stat_n_in_query_cap);
+ mutex_unlock(&stat_lock);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_blt_stat_fops() - File operations for B2R2 BLT
+ * statistics debugfs
+ */
+static const struct file_operations debugfs_b2r2_blt_stat_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_blt_stat_read,
+};
+#endif
+
+static void init_tmp_bufs(void)
+{
+ int i = 0;
+
+ for (i = 0; i < MAX_TMP_BUFS_NEEDED; i++) {
+ tmp_bufs[i].buf.virt_addr = dma_alloc_coherent(
+ b2r2_blt_device(), MAX_TMP_BUF_SIZE,
+ &tmp_bufs[i].buf.phys_addr, GFP_DMA);
+ if (tmp_bufs[i].buf.virt_addr != NULL)
+ tmp_bufs[i].buf.size = MAX_TMP_BUF_SIZE;
+ else {
+ b2r2_log_err("%s: Failed to allocate temp buffer %i\n",
+ __func__, i);
+
+ tmp_bufs[i].buf.size = 0;
+ }
+ }
+}
+
+static void destroy_tmp_bufs(void)
+{
+ int i = 0;
+
+ for (i = 0; i < MAX_TMP_BUFS_NEEDED; i++) {
+ if (tmp_bufs[i].buf.size != 0) {
+ dma_free_coherent(b2r2_blt_device(),
+ tmp_bufs[i].buf.size,
+ tmp_bufs[i].buf.virt_addr,
+ tmp_bufs[i].buf.phys_addr);
+
+ tmp_bufs[i].buf.size = 0;
+ }
+ }
+}
+
+/**
+ * b2r2_blt_module_init() - Module init function
+ *
+ * Returns 0 if OK else negative error code
+ */
+int b2r2_blt_module_init(void)
+{
+ int ret;
+
+ mutex_init(&stat_lock);
+
+#ifdef CONFIG_B2R2_GENERIC
+ /* Initialize generic path */
+ b2r2_generic_init();
+#endif
+
+ /* Initialize node splitter */
+ ret = b2r2_node_split_init();
+ if (ret) {
+ printk(KERN_WARNING "%s: node split init fails\n",
+ __func__);
+ goto b2r2_node_split_init_fail;
+ }
+
+ /* Register b2r2 driver */
+ ret = misc_register(&b2r2_blt_misc_dev);
+ if (ret) {
+ printk(KERN_WARNING "%s: registering misc device fails\n",
+ __func__);
+ goto b2r2_misc_register_fail;
+ }
+
+ b2r2_blt_misc_dev.this_device->coherent_dma_mask = 0xFFFFFFFF;
+ b2r2_blt_dev = &b2r2_blt_misc_dev;
+ b2r2_log_info("%s\n", __func__);
+
+ /*
+ * FIXME: This stuff should be done before the first requests i.e.
+ * before misc_register, but they need the device which is not
+ * available until after misc_register.
+ */
+ init_tmp_bufs();
+
+ /* Initialize memory allocator */
+ ret = b2r2_mem_init(b2r2_blt_device(), B2R2_HEAP_SIZE,
+ 4, sizeof(struct b2r2_node));
+ if (ret) {
+ printk(KERN_WARNING "%s: initializing B2R2 memhandler fails\n",
+ __func__);
+ goto b2r2_mem_init_fail;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ /* Register debug fs */
+ if (!debugfs_root_dir) {
+ debugfs_root_dir = debugfs_create_dir("b2r2_blt", NULL);
+ debugfs_create_file("latest_request",
+ 0666, debugfs_root_dir,
+ 0,
+ &debugfs_b2r2_blt_request_fops);
+ debugfs_create_file("stat",
+ 0666, debugfs_root_dir,
+ 0,
+ &debugfs_b2r2_blt_stat_fops);
+ }
+#endif
+ goto out;
+
+b2r2_misc_register_fail:
+b2r2_mem_init_fail:
+ b2r2_node_split_exit();
+
+b2r2_node_split_init_fail:
+#ifdef CONFIG_B2R2_GENERIC
+ b2r2_generic_exit();
+#endif
+out:
+ return ret;
+}
+
+/**
+ * b2r2_module_exit() - Module exit function
+ */
+void b2r2_blt_module_exit(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ if (debugfs_root_dir) {
+ debugfs_remove_recursive(debugfs_root_dir);
+ debugfs_root_dir = NULL;
+ }
+#endif
+ if (b2r2_blt_dev) {
+ b2r2_log_info("%s\n", __func__);
+ b2r2_mem_exit();
+ destroy_tmp_bufs();
+ b2r2_blt_dev = NULL;
+ misc_deregister(&b2r2_blt_misc_dev);
+ }
+
+ b2r2_node_split_exit();
+
+#if defined(CONFIG_B2R2_GENERIC)
+ b2r2_generic_exit();
+#endif
+}
+
+MODULE_AUTHOR("Robert Fekete <robert.fekete@stericsson.com>");
+MODULE_DESCRIPTION("ST-Ericsson B2R2 Blitter module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/b2r2/b2r2_core.c b/drivers/video/b2r2/b2r2_core.c
new file mode 100644
index 00000000000..7a11a301d11
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_core.c
@@ -0,0 +1,2666 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 core driver
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/*
+ * TODO: Clock address from platform data
+ * Platform data should have string id instead of numbers
+ * b2r2_remove, some type of runtime problem when kernel hacking
+ * debug features on
+ *
+ * Is there already a priority list in kernel?
+ * Is it possible to handle clock using clock framework?
+ * uTimeOut, use mdelay instead?
+ * Measure performance
+ *
+ * Exchange our home-cooked ref count with kernel kref? See
+ * http://lwn.net/Articles/336224/
+ *
+ * B2R2:
+ * Source fill 2 bug
+ * Check with Symbian?
+ */
+
+/* include file */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include "b2r2_core.h"
+#include "b2r2_global.h"
+#include "b2r2_structures.h"
+#include "b2r2_internal.h"
+#include "b2r2_profiler_api.h"
+#include "b2r2_timing.h"
+#include "b2r2_debug.h"
+
+/**
+ * B2R2_DRIVER_TIMEOUT_VALUE - Busy loop timeout after soft reset
+ */
+#define B2R2_DRIVER_TIMEOUT_VALUE (1500)
+
+/**
+ * B2R2_CLK_FLAG - Value to write into clock reg to turn clock on
+ */
+#define B2R2_CLK_FLAG (0x125)
+
+/**
+ * DEBUG_CHECK_ADDREF_RELEASE - Define this to enable addref / release debug
+ */
+#define DEBUG_CHECK_ADDREF_RELEASE 1
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * HANDLE_TIMEOUTED_JOBS - Define this to check jobs for timeout and cancel them
+ */
+#define HANDLE_TIMEOUTED_JOBS 1
+#endif
+
+/**
+ * B2R2_CLOCK_ALWAYS_ON - Define this to disable power save clock turn off
+ */
+/* #define B2R2_CLOCK_ALWAYS_ON 1 */
+
+/**
+ * START_SENTINEL - Watch guard to detect job overwrites
+ */
+#define START_SENTINEL 0xBABEDEEA
+
+/**
+ * STOP_SENTINEL - Watch guard to detect job overwrites
+ */
+#define END_SENTINEL 0xDADBDCDD
+
+/**
+ * B2R2_CORE_LOWEST_PRIO - Lowest prio allowed
+ */
+#define B2R2_CORE_LOWEST_PRIO -19
+/**
+ * B2R2_CORE_HIGHEST_PRIO - Highest prio allowed
+ */
+#define B2R2_CORE_HIGHEST_PRIO 20
+
+
+/**
+ * B2R2 Hardware defines below
+ */
+
+/* - BLT_AQ_CTL */
+#define B2R2_AQ_Enab (0x80000000)
+#define B2R2_AQ_PRIOR_0 (0x0)
+#define B2R2_AQ_PRIOR_1 (0x1)
+#define B2R2_AQ_PRIOR_2 (0x2)
+#define B2R2_AQ_PRIOR_3 (0x3)
+#define B2R2_AQ_NODE_REPEAT_INT (0x100000)
+#define B2R2_AQ_STOP_INT (0x200000)
+#define B2R2_AQ_LNA_REACH_INT (0x400000)
+#define B2R2_AQ_COMPLETED_INT (0x800000)
+
+/* - BLT_CTL */
+#define B2R2BLT_CTLGLOBAL_soft_reset (0x80000000)
+#define B2R2BLT_CTLStep_By_Step (0x20000000)
+#define B2R2BLT_CTLBig_not_little (0x10000000)
+#define B2R2BLT_CTLMask (0xb0000000)
+#define B2R2BLT_CTLTestMask (0xb0000000)
+#define B2R2BLT_CTLInitialValue (0x0)
+#define B2R2BLT_CTLAccessType (INITIAL_TEST)
+#define B2R2BLT_CTL (0xa00)
+
+/* - BLT_ITS */
+#define B2R2BLT_ITSRLD_ERROR (0x80000000)
+#define B2R2BLT_ITSAQ4_Node_Notif (0x8000000)
+#define B2R2BLT_ITSAQ4_Node_repeat (0x4000000)
+#define B2R2BLT_ITSAQ4_Stopped (0x2000000)
+#define B2R2BLT_ITSAQ4_LNA_Reached (0x1000000)
+#define B2R2BLT_ITSAQ3_Node_Notif (0x800000)
+#define B2R2BLT_ITSAQ3_Node_repeat (0x400000)
+#define B2R2BLT_ITSAQ3_Stopped (0x200000)
+#define B2R2BLT_ITSAQ3_LNA_Reached (0x100000)
+#define B2R2BLT_ITSAQ2_Node_Notif (0x80000)
+#define B2R2BLT_ITSAQ2_Node_repeat (0x40000)
+#define B2R2BLT_ITSAQ2_Stopped (0x20000)
+#define B2R2BLT_ITSAQ2_LNA_Reached (0x10000)
+#define B2R2BLT_ITSAQ1_Node_Notif (0x8000)
+#define B2R2BLT_ITSAQ1_Node_repeat (0x4000)
+#define B2R2BLT_ITSAQ1_Stopped (0x2000)
+#define B2R2BLT_ITSAQ1_LNA_Reached (0x1000)
+#define B2R2BLT_ITSCQ2_Repaced (0x80)
+#define B2R2BLT_ITSCQ2_Node_Notif (0x40)
+#define B2R2BLT_ITSCQ2_retriggered (0x20)
+#define B2R2BLT_ITSCQ2_completed (0x10)
+#define B2R2BLT_ITSCQ1_Repaced (0x8)
+#define B2R2BLT_ITSCQ1_Node_Notif (0x4)
+#define B2R2BLT_ITSCQ1_retriggered (0x2)
+#define B2R2BLT_ITSCQ1_completed (0x1)
+#define B2R2BLT_ITSMask (0x8ffff0ff)
+#define B2R2BLT_ITSTestMask (0x8ffff0ff)
+#define B2R2BLT_ITSInitialValue (0x0)
+#define B2R2BLT_ITSAccessType (INITIAL_TEST)
+#define B2R2BLT_ITS (0xa04)
+
+/* - BLT_STA1 */
+#define B2R2BLT_STA1BDISP_IDLE (0x1)
+#define B2R2BLT_STA1Mask (0x1)
+#define B2R2BLT_STA1TestMask (0x1)
+#define B2R2BLT_STA1InitialValue (0x1)
+#define B2R2BLT_STA1AccessType (INITIAL_TEST)
+#define B2R2BLT_STA1 (0xa08)
+
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+
+/**
+ * struct addref_release - Represents one addref or release. Used
+ * to debug addref / release problems
+ *
+ * @addref: true if this represents an addref else it represents
+ * a release.
+ * @job: The job that was referenced
+ * @caller: The caller of the addref or release
+ * @ref_count: The job reference count after addref / release
+ */
+struct addref_release {
+ bool addref;
+ struct b2r2_core_job *job;
+ const char *caller;
+ int ref_count;
+};
+
+#endif
+
+/**
+ * struct b2r2_core - Administration data for B2R2 core
+ *
+ * @lock: Spin lock protecting the b2r2_core structure and the B2R2 HW
+ * @hw: B2R2 registers memory mapped
+ * @pmu_b2r2_clock: Control of B2R2 clock
+ * @log_dev: Device used for logging via dev_... functions
+ *
+ * @prio_queue: Queue of jobs sorted in priority order
+ * @active_jobs: Array containing pointer to zero or one job per queue
+ * @n_active_jobs: Number of active jobs
+ * @jiffies_last_active: jiffie value when adding last active job
+ * @jiffies_last_irq: jiffie value when last irq occured
+ * @timeout_work: Work structure for timeout work
+ *
+ * @next_job_id: Contains the job id that will be assigned to the next
+ * added job.
+ *
+ * @clock_request_count: When non-zero, clock is on
+ * @clock_off_timer: Kernel timer to handle delayed turn off of clock
+ *
+ * @work_queue: Work queue to handle done jobs (callbacks) and timeouts in
+ * non-interrupt context.
+ *
+ * @stat_n_irq: Number of interrupts (statistics)
+ * @stat_n_jobs_added: Number of jobs added (statistics)
+ * @stat_n_jobs_removed: Number of jobs removed (statistics)
+ * @stat_n_jobs_in_prio_list: Number of jobs in prio list (statistics)
+ *
+ * @debugfs_root_dir: Root directory for B2R2 debugfs
+ *
+ * @ar: Circular array of addref / release debug structs
+ * @ar_write: Where next write will occur
+ * @ar_read: First valid place to read. When ar_read == ar_write then
+ * the array is empty.
+ */
+struct b2r2_core {
+ spinlock_t lock;
+
+ struct b2r2_memory_map *hw;
+
+ u8 op_size;
+ u8 ch_size;
+ u8 pg_size;
+ u8 mg_size;
+ u16 min_req_time;
+ int irq;
+
+ struct device *log_dev;
+
+ struct list_head prio_queue;
+
+ struct b2r2_core_job *active_jobs[B2R2_CORE_QUEUE_NO_OF];
+ unsigned long n_active_jobs;
+
+ unsigned long jiffies_last_active;
+ unsigned long jiffies_last_irq;
+#ifdef HANDLE_TIMEOUTED_JOBS
+ struct delayed_work timeout_work;
+#endif
+ int next_job_id;
+
+ unsigned long clock_request_count;
+ struct timer_list clock_off_timer;
+
+ struct workqueue_struct *work_queue;
+
+ /* Statistics */
+ unsigned long stat_n_irq;
+ unsigned long stat_n_jobs_added;
+ unsigned long stat_n_jobs_removed;
+
+ unsigned long stat_n_jobs_in_prio_list;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_regs_dir;
+#endif
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ /* Tracking release bug...*/
+ struct addref_release ar[100];
+ int ar_write;
+ int ar_read;
+#endif
+
+ /* Power management variables */
+ struct mutex domain_lock;
+ struct delayed_work domain_disable_work;
+
+ /*
+ * We need to keep track of both the number of domain_enable/disable()
+ * calls and whether the power was actually turned off, since the
+ * power off is done in a delayed job.
+ */
+ bool domain_enabled;
+ int domain_request_count;
+
+ struct clk *b2r2_clock;
+ struct regulator *b2r2_reg;
+};
+
+/**
+ * b2r2_core - Administration data for B2R2 core (singleton)
+ */
+static struct b2r2_core b2r2_core;
+
+/* Local functions */
+static void check_prio_list(bool atomic);
+static void clear_interrupts(void);
+static void trigger_job(struct b2r2_core_job *job);
+static void exit_job_list(struct list_head *job_list);
+static int get_next_job_id(void);
+static void job_work_function(struct work_struct *ptr);
+static void init_job(struct b2r2_core_job *job);
+static void insert_into_prio_list(struct b2r2_core_job *job);
+static struct b2r2_core_job *find_job_in_list(
+ int job_id,
+ struct list_head *list);
+static struct b2r2_core_job *find_job_in_active_jobs(int job_id);
+static struct b2r2_core_job *find_tag_in_list(
+ int tag,
+ struct list_head *list);
+static struct b2r2_core_job *find_tag_in_active_jobs(int tag);
+
+static int domain_enable(void);
+static void domain_disable(void);
+
+static void stop_queue(enum b2r2_core_queue queue);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+static void printk_regs(void);
+static int hw_reset(void);
+static void timeout_work_function(struct work_struct *ptr);
+#endif
+
+static void reset_hw_timer(struct b2r2_core_job *job);
+static void start_hw_timer(struct b2r2_core_job *job);
+static void stop_hw_timer(struct b2r2_core_job *job);
+
+static int init_hw(void);
+static void exit_hw(void);
+
+/* Tracking release bug... */
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+/**
+ * ar_add() - Adds an addref or a release to the array
+ *
+ * @job: The job that has been referenced
+ * @caller: The caller of addref / release
+ * @addref: true if it is an addref else false for release
+ */
+void ar_add(struct b2r2_core_job *job, const char *caller, bool addref)
+{
+ b2r2_core.ar[b2r2_core.ar_write].addref = addref;
+ b2r2_core.ar[b2r2_core.ar_write].job = job;
+ b2r2_core.ar[b2r2_core.ar_write].caller = caller;
+ b2r2_core.ar[b2r2_core.ar_write].ref_count = job->ref_count;
+ b2r2_core.ar_write = (b2r2_core.ar_write + 1) %
+ ARRAY_SIZE(b2r2_core.ar);
+ if (b2r2_core.ar_write == b2r2_core.ar_read)
+ b2r2_core.ar_read = (b2r2_core.ar_read + 1) %
+ ARRAY_SIZE(b2r2_core.ar);
+}
+
+/**
+ * sprintf_ar() - Writes all addref / release to a string buffer
+ *
+ * @buf: Receiving character bufefr
+ * @job: Which job to write or NULL for all
+ *
+ * NOTE! No buffer size check!!
+ */
+char *sprintf_ar(char *buf, struct b2r2_core_job *job)
+{
+ int i;
+ int size = 0;
+
+ for (i = b2r2_core.ar_read;
+ i != b2r2_core.ar_write;
+ i = (i + 1) % ARRAY_SIZE(b2r2_core.ar)) {
+ struct addref_release *ar = &b2r2_core.ar[i];
+ if (!job || job == ar->job)
+ size += sprintf(buf + size,
+ "%s on %p from %s, ref = %d\n",
+ ar->addref ? "addref" : "release",
+ ar->job, ar->caller, ar->ref_count);
+ }
+
+ return buf;
+}
+
+/**
+ * printk_ar() - Writes all addref / release using dev_info
+ *
+ * @job: Which job to write or NULL for all
+ */
+void printk_ar(struct b2r2_core_job *job)
+{
+ int i;
+
+ for (i = b2r2_core.ar_read;
+ i != b2r2_core.ar_write;
+ i = (i + 1) % ARRAY_SIZE(b2r2_core.ar)) {
+ struct addref_release *ar = &b2r2_core.ar[i];
+ if (!job || job == ar->job)
+ b2r2_log_info("%s on %p from %s,"
+ " ref = %d\n",
+ ar->addref ? "addref" : "release",
+ ar->job, ar->caller, ar->ref_count);
+ }
+}
+#endif
+
+/**
+ * internal_job_addref() - Increments the reference count for a job
+ *
+ * @job: Which job to increment reference count for
+ * @caller: Name of function calling addref (for debug)
+ *
+ * Note that b2r2_core.lock _must_ be held
+ */
+static void internal_job_addref(struct b2r2_core_job *job, const char *caller)
+{
+ u32 ref_count;
+
+ b2r2_log_info("%s (%p) (from %s)\n",
+ __func__, job, caller);
+
+ /* Sanity checks */
+ BUG_ON(job == NULL);
+
+ if (job->start_sentinel != START_SENTINEL ||
+ job->end_sentinel != END_SENTINEL ||
+ job->ref_count == 0 || job->ref_count > 10) {
+ b2r2_log_info(
+ "%s: (%p) start=%X end=%X ref_count=%d\n",
+ __func__, job, job->start_sentinel,
+ job->end_sentinel, job->ref_count);
+
+ /* Something is wrong, print the addref / release array */
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ printk_ar(NULL);
+#endif
+ }
+
+
+ BUG_ON(job->start_sentinel != START_SENTINEL);
+ BUG_ON(job->end_sentinel != END_SENTINEL);
+
+ /* Do the actual reference count increment */
+ ref_count = ++job->ref_count;
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ /* Keep track of addref / release */
+ ar_add(job, caller, true);
+#endif
+
+ b2r2_log_info("%s called from %s (%p): Ref Count is %d\n",
+ __func__, caller, job, job->ref_count);
+}
+
+/**
+ * internal_job_release() - Decrements the reference count for a job
+ *
+ * @job: Which job to decrement reference count for
+ * @caller: Name of function calling release (for debug)
+ *
+ * Returns true if job_release should be called by caller
+ * (reference count reached zero).
+ *
+ * Note that b2r2_core.lock _must_ be held
+ */
+bool internal_job_release(struct b2r2_core_job *job, const char *caller)
+{
+ u32 ref_count;
+ bool call_release = false;
+
+ b2r2_log_info("%s (%p) (from %s)\n",
+ __func__, job, caller);
+
+ /* Sanity checks */
+ BUG_ON(job == NULL);
+
+ if (job->start_sentinel != START_SENTINEL ||
+ job->end_sentinel != END_SENTINEL ||
+ job->ref_count == 0 || job->ref_count > 10) {
+ b2r2_log_info(
+ "%s: (%p) start=%X end=%X ref_count=%d\n",
+ __func__, job, job->start_sentinel,
+ job->end_sentinel, job->ref_count);
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ printk_ar(NULL);
+#endif
+ }
+
+
+ BUG_ON(job->start_sentinel != START_SENTINEL);
+ BUG_ON(job->end_sentinel != END_SENTINEL);
+
+ BUG_ON(job->ref_count == 0 || job->ref_count > 10);
+
+ /* Do the actual decrement */
+ ref_count = --job->ref_count;
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ ar_add(job, caller, false);
+#endif
+ b2r2_log_info("%s called from %s (%p) Ref Count is %d\n",
+ __func__, caller, job, ref_count);
+
+ if (!ref_count && job->release) {
+ call_release = true;
+ /* Job will now cease to exist */
+ job->start_sentinel = 0xFFFFFFFF;
+ job->end_sentinel = 0xFFFFFFFF;
+ }
+ return call_release;
+}
+
+
+
+/* Exported functions */
+
+/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
+void b2r2_core_job_addref(struct b2r2_core_job *job, const char *caller)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+ internal_job_addref(job, caller);
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+}
+
+/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
+void b2r2_core_job_release(struct b2r2_core_job *job, const char *caller)
+{
+ unsigned long flags;
+ bool call_release = false;
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+ call_release = internal_job_release(job, caller);
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+
+ if (call_release)
+ job->release(job);
+}
+
+/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
+int b2r2_core_job_add(struct b2r2_core_job *job)
+{
+ unsigned long flags;
+
+ b2r2_log_info("%s (%p)\n", __func__, job);
+
+ /* Enable B2R2 */
+ domain_enable();
+
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+ b2r2_core.stat_n_jobs_added++;
+
+ /* Initialise internal job data */
+ init_job(job);
+
+ /* Initial reference, should be released by caller of this function */
+ job->ref_count = 1;
+
+ /* Insert job into prio list */
+ insert_into_prio_list(job);
+
+ /* Check if we can dispatch job */
+ check_prio_list(false);
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+
+ return 0;
+}
+
+/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
+struct b2r2_core_job *b2r2_core_job_find(int job_id)
+{
+ unsigned long flags;
+ struct b2r2_core_job *job;
+
+ b2r2_log_info("%s (%d)\n", __func__, job_id);
+
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+ /* Look through prio queue */
+ job = find_job_in_list(job_id, &b2r2_core.prio_queue);
+
+ if (!job)
+ job = find_job_in_active_jobs(job_id);
+
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+
+ return job;
+}
+
+/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
+struct b2r2_core_job *b2r2_core_job_find_first_with_tag(int tag)
+{
+ unsigned long flags;
+ struct b2r2_core_job *job;
+
+ b2r2_log_info("%s (%d)\n", __func__, tag);
+
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+ /* Look through prio queue */
+ job = find_tag_in_list(tag, &b2r2_core.prio_queue);
+
+ if (!job)
+ job = find_tag_in_active_jobs(tag);
+
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+
+ return job;
+}
+
+/**
+ * is_job_done() - Spin lock protected check if job is done
+ *
+ * @job: Job to check
+ *
+ * Returns true if job is done or cancelled
+ *
+ * b2r2_core.lock must _NOT_ be held when calling this function
+ */
+static bool is_job_done(struct b2r2_core_job *job)
+{
+ unsigned long flags;
+ bool job_is_done;
+
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+ job_is_done =
+ job->job_state != B2R2_CORE_JOB_QUEUED &&
+ job->job_state != B2R2_CORE_JOB_RUNNING;
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+
+ return job_is_done;
+}
+
+/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
+int b2r2_core_job_wait(struct b2r2_core_job *job)
+{
+ int ret = 0;
+
+ b2r2_log_info("%s (%p)\n", __func__, job);
+ /* Check that we have the job */
+ if (job->job_state == B2R2_CORE_JOB_IDLE) {
+ /* Never or not queued */
+ b2r2_log_info("%s: Job not queued\n", __func__);
+ return -ENOENT;
+ }
+
+ /* Wait for the job to be done */
+ ret = wait_event_interruptible(
+ job->event,
+ is_job_done(job));
+
+ if (ret)
+ b2r2_log_warn(
+ "%s: wait_event_interruptible returns %d, state is %d",
+ __func__, ret, job->job_state);
+ return ret;
+}
+
+/**
+ * cancel_job() - Cancels a job (removes it from prio list or active jobs) and
+ * calls the job callback
+ *
+ * @job: Job to cancel
+ *
+ * Returns true if the job was found and cancelled
+ *
+ * b2r2_core.lock must be held when calling this function
+ */
+static bool cancel_job(struct b2r2_core_job *job)
+{
+ bool found_job = false;
+ bool job_was_active = false;
+
+ /* Remove from prio list */
+ if (job->job_state == B2R2_CORE_JOB_QUEUED) {
+ list_del_init(&job->list);
+ found_job = true;
+ }
+
+ /* Remove from active jobs */
+ if (!found_job) {
+ if (b2r2_core.n_active_jobs > 0) {
+ int i;
+
+ /* Look for timeout:ed jobs and put them in tmp list */
+ for (i = 0;
+ i < ARRAY_SIZE(b2r2_core.active_jobs);
+ i++) {
+ if (b2r2_core.active_jobs[i] == job) {
+ stop_queue((enum b2r2_core_queue)i);
+ stop_hw_timer(job);
+ b2r2_core.active_jobs[i] = NULL;
+ b2r2_core.n_active_jobs--;
+ found_job = true;
+ job_was_active = true;
+ }
+ }
+ }
+ }
+
+
+ /* Handle done list & callback */
+ if (found_job) {
+ /* Job is canceled */
+ job->job_state = B2R2_CORE_JOB_CANCELED;
+
+ queue_work(b2r2_core.work_queue, &job->work);
+
+ /* Statistics */
+ if (!job_was_active)
+ b2r2_core.stat_n_jobs_in_prio_list--;
+
+ }
+
+ return found_job;
+}
+
+/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
+int b2r2_core_job_cancel(struct b2r2_core_job *job)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ b2r2_log_info("%s (%p) (%d)\n", __func__,
+ job, job->job_state);
+ /* Check that we have the job */
+ if (job->job_state == B2R2_CORE_JOB_IDLE) {
+ /* Never or not queued */
+ b2r2_log_info("%s: Job not queued\n", __func__);
+ return -ENOENT;
+ }
+
+ /* Remove from prio list */
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+ cancel_job(job);
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+
+ return ret;
+}
+
+/* LOCAL FUNCTIONS BELOW */
+
+#define B2R2_DOMAIN_DISABLE_TIMEOUT (HZ/100)
+
+static void domain_disable_work_function(struct work_struct *work)
+{
+ if (!mutex_trylock(&b2r2_core.domain_lock))
+ return;
+
+ if (b2r2_core.domain_request_count == 0) {
+ exit_hw();
+ clk_disable(b2r2_core.b2r2_clock);
+ regulator_disable(b2r2_core.b2r2_reg);
+ b2r2_core.domain_enabled = false;
+ }
+
+ mutex_unlock(&b2r2_core.domain_lock);
+}
+
+#define B2R2_REGULATOR_RETRY_COUNT 10
+
+static int domain_enable(void)
+{
+ mutex_lock(&b2r2_core.domain_lock);
+ b2r2_core.domain_request_count++;
+
+ if (!b2r2_core.domain_enabled) {
+ int retry = 0;
+ int ret;
+
+again:
+ /*
+ * Since regulator_enable() may sleep we have to handle
+ * interrupts.
+ */
+ ret = regulator_enable(b2r2_core.b2r2_reg);
+ if ((ret == -EAGAIN) &&
+ ((retry++) < B2R2_REGULATOR_RETRY_COUNT))
+ goto again;
+ else if (ret < 0)
+ goto regulator_enable_failed;
+
+ clk_enable(b2r2_core.b2r2_clock);
+ if (init_hw() < 0)
+ goto init_hw_failed;
+ b2r2_core.domain_enabled = true;
+ }
+
+ mutex_unlock(&b2r2_core.domain_lock);
+
+ return 0;
+
+init_hw_failed:
+ b2r2_log_err("%s: Could not initialize hardware!\n", __func__);
+
+ clk_disable(b2r2_core.b2r2_clock);
+
+ if (regulator_disable(b2r2_core.b2r2_reg) < 0)
+ b2r2_log_err("%s: regulator_disable failed!\n", __func__);
+
+regulator_enable_failed:
+ b2r2_core.domain_request_count--;
+ mutex_unlock(&b2r2_core.domain_lock);
+
+ return -EFAULT;
+}
+
+static void domain_disable(void)
+{
+ mutex_lock(&b2r2_core.domain_lock);
+
+ if (b2r2_core.domain_request_count == 0) {
+ b2r2_log_err("%s: Unbalanced domain_disable()\n", __func__);
+ } else {
+ b2r2_core.domain_request_count--;
+
+ /* Cancel any existing work */
+ cancel_delayed_work_sync(&b2r2_core.domain_disable_work);
+
+ /* Add a work to disable the power and clock after a delay */
+ queue_delayed_work(b2r2_core.work_queue,
+ &b2r2_core.domain_disable_work,
+ B2R2_DOMAIN_DISABLE_TIMEOUT);
+ }
+
+ mutex_unlock(&b2r2_core.domain_lock);
+}
+
+/**
+ * stop_queue() - Stops the specified queue.
+ */
+static void stop_queue(enum b2r2_core_queue queue)
+{
+ /* TODO: Implement! If this function is not implemented canceled jobs will
+ use b2r2 which is a waste of resources. Not stopping jobs will also screw up
+ the hardware timing, the job the canceled job intrerrupted (if any) will be
+ billed for the time between the point where the job is cancelled and when it
+ stops. */
+}
+
+/**
+ * exit_job_list() - Empties a job queue by canceling the jobs
+ *
+ * b2r2_core.lock _must_ be held when calling this function
+ */
+static void exit_job_list(struct list_head *job_queue)
+{
+ while (!list_empty(job_queue)) {
+ struct b2r2_core_job *job =
+ list_entry(job_queue->next,
+ struct b2r2_core_job,
+ list);
+ /* Add reference to prevent job from disappearing
+ in the middle of our work, released below */
+ internal_job_addref(job, __func__);
+
+ cancel_job(job);
+
+ /* Matching release to addref above */
+ internal_job_release(job, __func__);
+
+ }
+}
+
+/**
+ * get_next_job_id() - Return a new job id.
+ */
+static int get_next_job_id(void)
+{
+ int job_id;
+
+ if (b2r2_core.next_job_id < 1)
+ b2r2_core.next_job_id = 1;
+ job_id = b2r2_core.next_job_id++;
+
+ return job_id;
+}
+
+/**
+ * job_work_function() - Work queue function that calls callback(s) and
+ * checks if B2R2 can accept a new job
+ *
+ * @ptr: Pointer to work struct (embedded in struct b2r2_core_job)
+ */
+static void job_work_function(struct work_struct *ptr)
+{
+ unsigned long flags;
+ struct b2r2_core_job *job = container_of(
+ ptr, struct b2r2_core_job, work);
+
+ /* Disable B2R2 */
+ domain_disable();
+
+ /* Release resources */
+ if (job->release_resources)
+ job->release_resources(job, false);
+
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+
+ /* Dispatch a new job if possible */
+ check_prio_list(false);
+
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+
+ /* Tell the client */
+ if (job->callback)
+ job->callback(job);
+
+ /* Drop our reference, matches the
+ addref in handle_queue_event or b2r2_core_job_cancel */
+ b2r2_core_job_release(job, __func__);
+}
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+/**
+ * timeout_work_function() - Work queue function that checks for
+ * timeout:ed jobs. B2R2 might silently refuse
+ * to execute some jobs, i.e. SRC2 fill
+ *
+ * @ptr: Pointer to work struct (embedded in struct b2r2_core)
+ *
+ */
+static void timeout_work_function(struct work_struct *ptr)
+{
+ unsigned long flags;
+ struct list_head job_list;
+
+ INIT_LIST_HEAD(&job_list);
+
+ /* Cancel all jobs if too long time since last irq */
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+ if (b2r2_core.n_active_jobs > 0) {
+ unsigned long diff =
+ (long) jiffies - (long) b2r2_core.jiffies_last_irq;
+ if (diff > HZ/2) {
+ /* Active jobs and more than a second since last irq! */
+ int i;
+
+ /* Look for timeout:ed jobs and put them in tmp list. It's
+ important that the application queues are killed in order
+ of decreasing priority */
+ for (i = 0;
+ i < ARRAY_SIZE(b2r2_core.active_jobs);
+ i++) {
+ struct b2r2_core_job *job =
+ b2r2_core.active_jobs[i];
+
+ if (job) {
+ stop_hw_timer(job);
+
+ b2r2_core.active_jobs[i] = NULL;
+ b2r2_core.n_active_jobs--;
+ list_add_tail(&job->list,
+ &job_list);
+ }
+ }
+
+ /* Print the B2R2 register and reset B2R2 */
+ printk_regs();
+ hw_reset();
+ }
+ }
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+
+ /* Handle timeout:ed jobs */
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+ while (!list_empty(&job_list)) {
+ struct b2r2_core_job *job =
+ list_entry(job_list.next,
+ struct b2r2_core_job,
+ list);
+
+ b2r2_log_warn("%s: Job timeout\n", __func__);
+
+ list_del_init(&job->list);
+
+ /* Job is cancelled */
+ job->job_state = B2R2_CORE_JOB_CANCELED;
+
+ /* Handle done */
+ wake_up_interruptible(&job->event);
+
+ /* Job callbacks handled via work queue */
+ queue_work(b2r2_core.work_queue, &job->work);
+ }
+
+ /* Requeue delayed work */
+ if (b2r2_core.n_active_jobs)
+ queue_delayed_work(
+ b2r2_core.work_queue,
+ &b2r2_core.timeout_work, HZ/2);
+
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+}
+#endif
+
+/**
+ * reset_hw_timer() - Resets a job's hardware timer. Must be called before
+ * the timer is used.
+ *
+ * @job: Pointer to job struct
+ *
+ * b2r2_core.lock _must_ be held when calling this function
+ */
+static void reset_hw_timer(struct b2r2_core_job *job)
+{
+ job->nsec_active_in_hw = 0;
+}
+
+/**
+ * start_hw_timer() - Times how long a job spends in hardware (active).
+ * Should be called immediatly before starting the
+ * hardware.
+ *
+ * @job: Pointer to job struct
+ *
+ * b2r2_core.lock _must_ be held when calling this function
+ */
+static void start_hw_timer(struct b2r2_core_job *job)
+{
+ job->hw_start_time = b2r2_get_curr_nsec();
+}
+
+/**
+ * stop_hw_timer() - Times how long a job spends in hardware (active).
+ * Should be called immediatly after the hardware has
+ * finished.
+ *
+ * @job: Pointer to job struct
+ *
+ * b2r2_core.lock _must_ be held when calling this function
+ */
+static void stop_hw_timer(struct b2r2_core_job *job)
+{
+ /* Assumes only app queues are used, which is the case right now. */
+ /* Not 100% accurate. When a higher prio job interrupts a lower prio job it does
+ so after the current node of the low prio job has finished. Currently we can not
+ sense when the actual switch takes place so the time reported for a job that
+ interrupts a lower prio job will on average contain the time it takes to process
+ half a node in the lower prio job in addition to the time it takes to process the
+ job's own nodes. This could possibly be solved by adding node notifications but
+ that would involve a significant amount of work and consume system resources due
+ to the extra interrupts. */
+ /* If a job takes more than ~2s (absolute time, including idleing in the hardware)
+ the state of the hardware timer will be corrupted and it will not report valid
+ values until b2r2 becomes idle (no active jobs on any queues). The maximum length
+ can possibly be increased by using 64 bit integers. */
+
+ int i;
+
+ u32 stop_time_raw = b2r2_get_curr_nsec();
+ /* We'll add an offset to all positions in time to make the current time equal to
+ 0xFFFFFFFF. This way we can compare positions in time to each other without having
+ to wory about wrapping (so long as all positions in time are in the past). */
+ u32 stop_time = 0xFFFFFFFF;
+ u32 time_pos_offset = 0xFFFFFFFF - stop_time_raw;
+ u32 nsec_in_hw = stop_time - (job->hw_start_time + time_pos_offset);
+ job->nsec_active_in_hw += (s32)nsec_in_hw;
+
+ /* Check if we have delayed the start of higher prio jobs. Can happen as queue
+ switching only can be done between nodes. */
+ for (i = (int)job->queue - 1; i >= (int)B2R2_CORE_QUEUE_AQ1; i--) {
+ struct b2r2_core_job *queue_active_job = b2r2_core.active_jobs[i];
+ if (NULL == queue_active_job)
+ continue;
+
+ queue_active_job->hw_start_time = stop_time_raw;
+ }
+
+ /* Check if the job has stolen time from lower prio jobs */
+ for (i = (int)job->queue + 1; i < B2R2_NUM_APPLICATIONS_QUEUES; i++) {
+ struct b2r2_core_job *queue_active_job = b2r2_core.active_jobs[i];
+ u32 queue_active_job_hw_start_time;
+
+ if (NULL == queue_active_job)
+ continue;
+
+ queue_active_job_hw_start_time = queue_active_job->hw_start_time + time_pos_offset;
+
+ if (queue_active_job_hw_start_time < stop_time) {
+ u32 queue_active_job_nsec_in_hw = stop_time - queue_active_job_hw_start_time;
+ u32 num_stolen_nsec = min(queue_active_job_nsec_in_hw, nsec_in_hw);
+
+ queue_active_job->nsec_active_in_hw -= (s32)num_stolen_nsec;
+
+ nsec_in_hw -= num_stolen_nsec;
+ stop_time -= num_stolen_nsec;
+ }
+
+ if (0 == nsec_in_hw)
+ break;
+ }
+}
+
+/**
+ * init_job() - Initializes a job structure from filled in client data.
+ * Reference count will be set to 1
+ *
+ * @job: Job to initialize
+ */
+static void init_job(struct b2r2_core_job *job)
+{
+ job->start_sentinel = START_SENTINEL;
+ job->end_sentinel = END_SENTINEL;
+
+ /* Get a job id*/
+ job->job_id = get_next_job_id();
+
+ /* Job is idle, never queued */
+ job->job_state = B2R2_CORE_JOB_IDLE;
+
+ /* Initialize internal data */
+ INIT_LIST_HEAD(&job->list);
+ init_waitqueue_head(&job->event);
+ INIT_WORK(&job->work, job_work_function);
+
+ /* Map given prio to B2R2 queues */
+ if (job->prio < B2R2_CORE_LOWEST_PRIO)
+ job->prio = B2R2_CORE_LOWEST_PRIO;
+ else if (job->prio > B2R2_CORE_HIGHEST_PRIO)
+ job->prio = B2R2_CORE_HIGHEST_PRIO;
+
+ if (job->prio > 10) {
+ job->queue = B2R2_CORE_QUEUE_AQ1;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ1_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_3);
+ } else if (job->prio > 0) {
+ job->queue = B2R2_CORE_QUEUE_AQ2;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ2_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_2);
+ } else if (job->prio > -10) {
+ job->queue = B2R2_CORE_QUEUE_AQ3;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ3_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_1);
+ } else {
+ job->queue = B2R2_CORE_QUEUE_AQ4;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ4_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_0);
+ }
+}
+
+/**
+ * clear_interrupts() - Disables all interrupts
+ *
+ * b2r2_core.lock must be held
+ */
+static void clear_interrupts(void)
+{
+ writel(0x0, &b2r2_core.hw->BLT_ITM0);
+ writel(0x0, &b2r2_core.hw->BLT_ITM1);
+ writel(0x0, &b2r2_core.hw->BLT_ITM2);
+ writel(0x0, &b2r2_core.hw->BLT_ITM3);
+}
+
+/**
+ * insert_into_prio_list() - Inserts the job into the sorted list of jobs.
+ * The list is sorted by priority.
+ *
+ * @job: Job to insert
+ *
+ * b2r2_core.lock must be held
+ */
+static void insert_into_prio_list(struct b2r2_core_job *job)
+{
+ /* Ref count is increased when job put in list,
+ should be released when job is removed from list */
+ internal_job_addref(job, __func__);
+
+ b2r2_core.stat_n_jobs_in_prio_list++;
+
+ /* Sort in the job */
+ if (list_empty(&b2r2_core.prio_queue))
+ list_add_tail(&job->list, &b2r2_core.prio_queue);
+ else {
+ struct b2r2_core_job *first_job =
+ list_entry(b2r2_core.prio_queue.next,
+ struct b2r2_core_job, list);
+ struct b2r2_core_job *last_job =
+ list_entry(b2r2_core.prio_queue.prev,
+ struct b2r2_core_job, list);
+
+ /* High prio job? */
+ if (job->prio > first_job->prio)
+ /* Insert first */
+ list_add(&job->list, &b2r2_core.prio_queue);
+ else if (job->prio <= last_job->prio)
+ /* Insert last */
+ list_add_tail(&job->list, &b2r2_core.prio_queue);
+ else {
+ /* We need to find where to put it */
+ struct list_head *ptr;
+
+ list_for_each(ptr, &b2r2_core.prio_queue) {
+ struct b2r2_core_job *list_job =
+ list_entry(ptr, struct b2r2_core_job,
+ list);
+ if (job->prio > list_job->prio) {
+ /* Add before */
+ list_add_tail(&job->list,
+ &list_job->list);
+ break;
+ }
+ }
+ }
+ }
+ /* The job is now queued */
+ job->job_state = B2R2_CORE_JOB_QUEUED;
+}
+
+/**
+ * check_prio_list() - Checks if the first job(s) in the prio list can
+ * be dispatched to B2R2
+ *
+ * @atomic: true if in atomic context (i.e. interrupt context)
+ *
+ * b2r2_core.lock must be held
+ */
+static void check_prio_list(bool atomic)
+{
+ bool dispatched_job;
+ int n_dispatched = 0;
+
+ /* Do we have anything in our prio list? */
+ do {
+ dispatched_job = false;
+ if (!list_empty(&b2r2_core.prio_queue)) {
+ /* The first job waiting */
+ struct b2r2_core_job *job =
+ list_first_entry(&b2r2_core.prio_queue,
+ struct b2r2_core_job,
+ list);
+
+ /* Is the B2R2 queue available? */
+ if (b2r2_core.active_jobs[job->queue] == NULL) {
+ /* Can we acquire resources? */
+ if (!job->acquire_resources ||
+ job->acquire_resources(job, atomic) == 0) {
+ /* Ok to dispatch job */
+
+ /* Remove from list */
+ list_del_init(&job->list);
+
+ /* The job is now active */
+ b2r2_core.active_jobs[job->queue] = job;
+ b2r2_core.n_active_jobs++;
+ job->jiffies = jiffies;
+ b2r2_core.jiffies_last_active =
+ jiffies;
+
+ /* Kick off B2R2 */
+ trigger_job(job);
+
+ dispatched_job = true;
+ n_dispatched++;
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ /* Check in one half second
+ if it hangs */
+ queue_delayed_work(
+ b2r2_core.work_queue,
+ &b2r2_core.timeout_work,
+ HZ/2);
+#endif
+ } else {
+ /* No resources */
+ if (!atomic &&
+ b2r2_core.n_active_jobs == 0) {
+ b2r2_log_warn(
+ "%s: No resource",
+ __func__);
+ cancel_job(job);
+ }
+ }
+ }
+ }
+ } while (dispatched_job);
+
+ b2r2_core.stat_n_jobs_in_prio_list -= n_dispatched;
+}
+
+/**
+ * find_job_in_list() - Finds job with job_id in list
+ *
+ * @jobid: Job id to find
+ * @list: List to find job id in
+ *
+ * Reference count will be incremented for found job.
+ *
+ * b2r2_core.lock must be held
+ */
+static struct b2r2_core_job *find_job_in_list(int job_id,
+ struct list_head *list)
+{
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ struct b2r2_core_job *job =
+ list_entry(ptr, struct b2r2_core_job, list);
+ if (job->job_id == job_id) {
+ /* Increase reference count, should be released by
+ the caller of b2r2_core_job_find */
+ internal_job_addref(job, __func__);
+ return job;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * find_job_in_active_jobs() - Finds job in active job queues
+ *
+ * @jobid: Job id to find
+ *
+ * Reference count will be incremented for found job.
+ *
+ * b2r2_core.lock must be held
+ */
+static struct b2r2_core_job *find_job_in_active_jobs(int job_id)
+{
+ int i;
+ struct b2r2_core_job *found_job = NULL;
+
+ if (b2r2_core.n_active_jobs) {
+ for (i = 0; i < ARRAY_SIZE(b2r2_core.active_jobs); i++) {
+ struct b2r2_core_job *job = b2r2_core.active_jobs[i];
+
+ if (job && job->job_id == job_id) {
+ internal_job_addref(job, __func__);
+ found_job = job;
+ break;
+ }
+ }
+ }
+ return found_job;
+}
+
+/**
+ * find_tag_in_list() - Finds first job with tag in list
+ *
+ * @tag: Tag to find
+ * @list: List to find job id in
+ *
+ * Reference count will be incremented for found job.
+ *
+ * b2r2_core.lock must be held
+ */
+static struct b2r2_core_job *find_tag_in_list(int tag, struct list_head *list)
+{
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ struct b2r2_core_job *job =
+ list_entry(ptr, struct b2r2_core_job, list);
+ if (job->tag == tag) {
+ /* Increase reference count, should be released by
+ the caller of b2r2_core_job_find */
+ internal_job_addref(job, __func__);
+ return job;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * find_tag_in_active_jobs() - Finds job with tag in active job queues
+ *
+ * @tag: Tag to find
+ *
+ * Reference count will be incremented for found job.
+ *
+ * b2r2_core.lock must be held
+ */
+static struct b2r2_core_job *find_tag_in_active_jobs(int tag)
+{
+ int i;
+ struct b2r2_core_job *found_job = NULL;
+
+ if (b2r2_core.n_active_jobs) {
+ for (i = 0; i < ARRAY_SIZE(b2r2_core.active_jobs); i++) {
+ struct b2r2_core_job *job = b2r2_core.active_jobs[i];
+
+ if (job && job->tag == tag) {
+ internal_job_addref(job, __func__);
+ found_job = job;
+ break;
+ }
+ }
+ }
+ return found_job;
+}
+
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+/**
+ * hw_reset() - Resets B2R2 hardware
+ *
+ * b2r2_core.lock must be held
+ */
+static int hw_reset(void)
+{
+ u32 uTimeOut = B2R2_DRIVER_TIMEOUT_VALUE;
+
+ /* Tell B2R2 to reset */
+ writel(readl(&b2r2_core.hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &b2r2_core.hw->BLT_CTL);
+ writel(0x00000000, &b2r2_core.hw->BLT_CTL);
+
+ b2r2_log_info("wait for B2R2 to be idle..\n");
+
+ /** Wait for B2R2 to be idle (on a timeout rather than while loop) */
+ while ((uTimeOut > 0) &&
+ ((readl(&b2r2_core.hw->BLT_STA1) &
+ B2R2BLT_STA1BDISP_IDLE) == 0x0))
+ uTimeOut--;
+
+ if (uTimeOut == 0) {
+ b2r2_log_warn(
+ "error-> after software reset B2R2 is not idle\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+
+}
+#endif
+
+/**
+ * trigger_job() - Put job in B2R2 HW queue
+ *
+ * @job: Job to trigger
+ *
+ * b2r2_core.lock must be held
+ */
+static void trigger_job(struct b2r2_core_job *job)
+{
+ /* Debug prints */
+ b2r2_log_info("queue 0x%x \n", job->queue);
+ b2r2_log_info("BLT TRIG_IP 0x%x (first node)\n",
+ job->first_node_address);
+ b2r2_log_info("BLT LNA_CTL 0x%x (last node)\n",
+ job->last_node_address);
+ b2r2_log_info("BLT TRIG_CTL 0x%x \n", job->control);
+ b2r2_log_info("BLT PACE_CTL 0x%x \n", job->pace_control);
+
+ reset_hw_timer(job);
+ job->job_state = B2R2_CORE_JOB_RUNNING;
+
+ /* Enable interrupt */
+ writel(readl(&b2r2_core.hw->BLT_ITM0) | job->interrupt_context,
+ &b2r2_core.hw->BLT_ITM0);
+
+ writel(min_t(u8, max_t(u8, b2r2_core.op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64),
+ &b2r2_core.hw->PLUGS1_OP2);
+ writel(min_t(u8, b2r2_core.ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &b2r2_core.hw->PLUGS1_CHZ);
+ writel(min_t(u8, b2r2_core.mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (b2r2_core.min_req_time << 16),
+ &b2r2_core.hw->PLUGS1_MSZ);
+ writel(min_t(u8, b2r2_core.pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &b2r2_core.hw->PLUGS1_PGZ);
+
+ writel(min_t(u8, max_t(u8, b2r2_core.op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64),
+ &b2r2_core.hw->PLUGS2_OP2);
+ writel(min_t(u8, b2r2_core.ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &b2r2_core.hw->PLUGS2_CHZ);
+ writel(min_t(u8, b2r2_core.mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (b2r2_core.min_req_time << 16),
+ &b2r2_core.hw->PLUGS2_MSZ);
+ writel(min_t(u8, b2r2_core.pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &b2r2_core.hw->PLUGS2_PGZ);
+
+ writel(min_t(u8, max_t(u8, b2r2_core.op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64),
+ &b2r2_core.hw->PLUGS3_OP2);
+ writel(min_t(u8, b2r2_core.ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &b2r2_core.hw->PLUGS3_CHZ);
+ writel(min_t(u8, b2r2_core.mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (b2r2_core.min_req_time << 16),
+ &b2r2_core.hw->PLUGS3_MSZ);
+ writel(min_t(u8, b2r2_core.pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &b2r2_core.hw->PLUGS3_PGZ);
+
+ writel(min_t(u8, max_t(u8, b2r2_core.op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64),
+ &b2r2_core.hw->PLUGT_OP2);
+ writel(min_t(u8, b2r2_core.ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &b2r2_core.hw->PLUGT_CHZ);
+ writel(min_t(u8, b2r2_core.mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (b2r2_core.min_req_time << 16),
+ &b2r2_core.hw->PLUGT_MSZ);
+ writel(min_t(u8, b2r2_core.pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &b2r2_core.hw->PLUGT_PGZ);
+
+ /* B2R2 kicks off when LNA is written, LNA write must be last! */
+ switch (job->queue) {
+ case B2R2_CORE_QUEUE_CQ1:
+ writel(job->first_node_address, &b2r2_core.hw->BLT_CQ1_TRIG_IP);
+ writel(job->control, &b2r2_core.hw->BLT_CQ1_TRIG_CTL);
+ writel(job->pace_control, &b2r2_core.hw->BLT_CQ1_PACE_CTL);
+ break;
+
+ case B2R2_CORE_QUEUE_CQ2:
+ writel(job->first_node_address, &b2r2_core.hw->BLT_CQ2_TRIG_IP);
+ writel(job->control, &b2r2_core.hw->BLT_CQ2_TRIG_CTL);
+ writel(job->pace_control, &b2r2_core.hw->BLT_CQ2_PACE_CTL);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ1:
+ writel(job->control, &b2r2_core.hw->BLT_AQ1_CTL);
+ writel(job->first_node_address, &b2r2_core.hw->BLT_AQ1_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &b2r2_core.hw->BLT_AQ1_LNA);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ2:
+ writel(job->control, &b2r2_core.hw->BLT_AQ2_CTL);
+ writel(job->first_node_address, &b2r2_core.hw->BLT_AQ2_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &b2r2_core.hw->BLT_AQ2_LNA);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ3:
+ writel(job->control, &b2r2_core.hw->BLT_AQ3_CTL);
+ writel(job->first_node_address, &b2r2_core.hw->BLT_AQ3_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &b2r2_core.hw->BLT_AQ3_LNA);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ4:
+ writel(job->control, &b2r2_core.hw->BLT_AQ4_CTL);
+ writel(job->first_node_address, &b2r2_core.hw->BLT_AQ4_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &b2r2_core.hw->BLT_AQ4_LNA);
+ break;
+
+ /** Handle the default case */
+ default:
+ break;
+
+ } /* end switch */
+
+}
+
+/**
+ * handle_queue_event() - Handles interrupt event for specified B2R2 queue
+ *
+ * @queue: Queue to handle event for
+ *
+ * b2r2_core.lock must be held
+ */
+static void handle_queue_event(enum b2r2_core_queue queue)
+{
+ struct b2r2_core_job *job;
+
+ job = b2r2_core.active_jobs[queue];
+ if (job) {
+ if (job->job_state != B2R2_CORE_JOB_RUNNING)
+ /* Should be running
+ Severe error. TBD */
+ b2r2_log_warn(
+ "%s: Job is not running", __func__);
+
+ stop_hw_timer(job);
+
+ /* Remove from queue */
+ BUG_ON(b2r2_core.n_active_jobs == 0);
+ b2r2_core.active_jobs[queue] = NULL;
+ b2r2_core.n_active_jobs--;
+ }
+
+ if (!job) {
+ /* No job, error? */
+ b2r2_log_warn("%s: No job", __func__);
+ return;
+ }
+
+
+ /* Atomic context release resources, release resources will
+ be called again later from process context (work queue) */
+ if (job->release_resources)
+ job->release_resources(job, true);
+
+ /* Job is done */
+ job->job_state = B2R2_CORE_JOB_DONE;
+
+ /* Handle done */
+ wake_up_interruptible(&job->event);
+
+ /* Dispatch to work queue to handle callbacks */
+ queue_work(b2r2_core.work_queue, &job->work);
+}
+
+/**
+ * process_events() - Handles interrupt events
+ *
+ * @status: Contents of the B2R2 ITS register
+ */
+static void process_events(u32 status)
+{
+ u32 mask = 0xF;
+ u32 disable_itm_mask = 0;
+
+ b2r2_log_info("Enters process_events \n");
+ b2r2_log_info("status 0x%x \n", status);
+
+ /* Composition queue 1 */
+ if (status & mask) {
+ handle_queue_event(B2R2_CORE_QUEUE_CQ1);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Composition queue 2 */
+ if (status & mask) {
+ handle_queue_event(B2R2_CORE_QUEUE_CQ2);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 8;
+
+ /* Application queue 1 */
+ if (status & mask) {
+ handle_queue_event(B2R2_CORE_QUEUE_AQ1);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Application queue 2 */
+ if (status & mask) {
+ handle_queue_event(B2R2_CORE_QUEUE_AQ2);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Application queue 3 */
+ if (status & mask) {
+ handle_queue_event(B2R2_CORE_QUEUE_AQ3);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Application queue 4 */
+ if (status & mask) {
+ handle_queue_event(B2R2_CORE_QUEUE_AQ4);
+ disable_itm_mask |= mask;
+ }
+
+ /* Clear received interrupt flags */
+ writel(status, &b2r2_core.hw->BLT_ITS);
+ /* Disable handled interrupts */
+ writel(readl(&b2r2_core.hw->BLT_ITM0) & ~disable_itm_mask,
+ &b2r2_core.hw->BLT_ITM0);
+
+ b2r2_log_info("Returns process_events \n");
+}
+
+/**
+ * b2r2_irq_handler() - B2R2 interrupt handler
+ *
+ * @irq: Interrupt number (not used)
+ * @x: ??? (Not used)
+ */
+static irqreturn_t b2r2_irq_handler(int irq, void *x)
+{
+ unsigned long flags;
+
+ /* Spin lock is need in irq handler (SMP) */
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+
+ /* Make sure that we have a clock */
+
+ /* Remember time for last irq (for timeout mgmt) */
+ b2r2_core.jiffies_last_irq = jiffies;
+ b2r2_core.stat_n_irq++;
+
+ /* Handle the interrupt(s) */
+ process_events(readl(&b2r2_core.hw->BLT_ITS));
+
+ /* Check if we can dispatch new jobs */
+ check_prio_list(true);
+
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * struct debugfs_reg - Represents one B2R2 register in debugfs
+ *
+ * @name: Register name
+ * @offset: Byte offset in B2R2 for register
+ */
+struct debugfs_reg {
+ const char name[30];
+ u32 offset;
+};
+
+/**
+ * debugfs_regs - Array of B2R2 debugfs registers
+ */
+static const struct debugfs_reg debugfs_regs[] = {
+ {"BLT_SSBA17", offsetof(struct b2r2_memory_map, BLT_SSBA17)},
+ {"BLT_SSBA18", offsetof(struct b2r2_memory_map, BLT_SSBA18)},
+ {"BLT_SSBA19", offsetof(struct b2r2_memory_map, BLT_SSBA19)},
+ {"BLT_SSBA20", offsetof(struct b2r2_memory_map, BLT_SSBA20)},
+ {"BLT_SSBA21", offsetof(struct b2r2_memory_map, BLT_SSBA21)},
+ {"BLT_SSBA22", offsetof(struct b2r2_memory_map, BLT_SSBA22)},
+ {"BLT_SSBA23", offsetof(struct b2r2_memory_map, BLT_SSBA23)},
+ {"BLT_SSBA24", offsetof(struct b2r2_memory_map, BLT_SSBA24)},
+ {"BLT_STBA5", offsetof(struct b2r2_memory_map, BLT_STBA5)},
+ {"BLT_STBA6", offsetof(struct b2r2_memory_map, BLT_STBA6)},
+ {"BLT_STBA7", offsetof(struct b2r2_memory_map, BLT_STBA7)},
+ {"BLT_STBA8", offsetof(struct b2r2_memory_map, BLT_STBA8)},
+ {"BLT_CTL", offsetof(struct b2r2_memory_map, BLT_CTL)},
+ {"BLT_ITS", offsetof(struct b2r2_memory_map, BLT_ITS)},
+ {"BLT_STA1", offsetof(struct b2r2_memory_map, BLT_STA1)},
+ {"BLT_SSBA1", offsetof(struct b2r2_memory_map, BLT_SSBA1)},
+ {"BLT_SSBA2", offsetof(struct b2r2_memory_map, BLT_SSBA2)},
+ {"BLT_SSBA3", offsetof(struct b2r2_memory_map, BLT_SSBA3)},
+ {"BLT_SSBA4", offsetof(struct b2r2_memory_map, BLT_SSBA4)},
+ {"BLT_SSBA5", offsetof(struct b2r2_memory_map, BLT_SSBA5)},
+ {"BLT_SSBA6", offsetof(struct b2r2_memory_map, BLT_SSBA6)},
+ {"BLT_SSBA7", offsetof(struct b2r2_memory_map, BLT_SSBA7)},
+ {"BLT_SSBA8", offsetof(struct b2r2_memory_map, BLT_SSBA8)},
+ {"BLT_STBA1", offsetof(struct b2r2_memory_map, BLT_STBA1)},
+ {"BLT_STBA2", offsetof(struct b2r2_memory_map, BLT_STBA2)},
+ {"BLT_STBA3", offsetof(struct b2r2_memory_map, BLT_STBA3)},
+ {"BLT_STBA4", offsetof(struct b2r2_memory_map, BLT_STBA4)},
+ {"BLT_CQ1_TRIG_IP", offsetof(struct b2r2_memory_map, BLT_CQ1_TRIG_IP)},
+ {"BLT_CQ1_TRIG_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ1_TRIG_CTL)},
+ {"BLT_CQ1_PACE_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ1_PACE_CTL)},
+ {"BLT_CQ1_IP", offsetof(struct b2r2_memory_map, BLT_CQ1_IP)},
+ {"BLT_CQ2_TRIG_IP", offsetof(struct b2r2_memory_map, BLT_CQ2_TRIG_IP)},
+ {"BLT_CQ2_TRIG_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ2_TRIG_CTL)},
+ {"BLT_CQ2_PACE_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ2_PACE_CTL)},
+ {"BLT_CQ2_IP", offsetof(struct b2r2_memory_map, BLT_CQ2_IP)},
+ {"BLT_AQ1_CTL", offsetof(struct b2r2_memory_map, BLT_AQ1_CTL)},
+ {"BLT_AQ1_IP", offsetof(struct b2r2_memory_map, BLT_AQ1_IP)},
+ {"BLT_AQ1_LNA", offsetof(struct b2r2_memory_map, BLT_AQ1_LNA)},
+ {"BLT_AQ1_STA", offsetof(struct b2r2_memory_map, BLT_AQ1_STA)},
+ {"BLT_AQ2_CTL", offsetof(struct b2r2_memory_map, BLT_AQ2_CTL)},
+ {"BLT_AQ2_IP", offsetof(struct b2r2_memory_map, BLT_AQ2_IP)},
+ {"BLT_AQ2_LNA", offsetof(struct b2r2_memory_map, BLT_AQ2_LNA)},
+ {"BLT_AQ2_STA", offsetof(struct b2r2_memory_map, BLT_AQ2_STA)},
+ {"BLT_AQ3_CTL", offsetof(struct b2r2_memory_map, BLT_AQ3_CTL)},
+ {"BLT_AQ3_IP", offsetof(struct b2r2_memory_map, BLT_AQ3_IP)},
+ {"BLT_AQ3_LNA", offsetof(struct b2r2_memory_map, BLT_AQ3_LNA)},
+ {"BLT_AQ3_STA", offsetof(struct b2r2_memory_map, BLT_AQ3_STA)},
+ {"BLT_AQ4_CTL", offsetof(struct b2r2_memory_map, BLT_AQ4_CTL)},
+ {"BLT_AQ4_IP", offsetof(struct b2r2_memory_map, BLT_AQ4_IP)},
+ {"BLT_AQ4_LNA", offsetof(struct b2r2_memory_map, BLT_AQ4_LNA)},
+ {"BLT_AQ4_STA", offsetof(struct b2r2_memory_map, BLT_AQ4_STA)},
+ {"BLT_SSBA9", offsetof(struct b2r2_memory_map, BLT_SSBA9)},
+ {"BLT_SSBA10", offsetof(struct b2r2_memory_map, BLT_SSBA10)},
+ {"BLT_SSBA11", offsetof(struct b2r2_memory_map, BLT_SSBA11)},
+ {"BLT_SSBA12", offsetof(struct b2r2_memory_map, BLT_SSBA12)},
+ {"BLT_SSBA13", offsetof(struct b2r2_memory_map, BLT_SSBA13)},
+ {"BLT_SSBA14", offsetof(struct b2r2_memory_map, BLT_SSBA14)},
+ {"BLT_SSBA15", offsetof(struct b2r2_memory_map, BLT_SSBA15)},
+ {"BLT_SSBA16", offsetof(struct b2r2_memory_map, BLT_SSBA16)},
+ {"BLT_SGA1", offsetof(struct b2r2_memory_map, BLT_SGA1)},
+ {"BLT_SGA2", offsetof(struct b2r2_memory_map, BLT_SGA2)},
+ {"BLT_ITM0", offsetof(struct b2r2_memory_map, BLT_ITM0)},
+ {"BLT_ITM1", offsetof(struct b2r2_memory_map, BLT_ITM1)},
+ {"BLT_ITM2", offsetof(struct b2r2_memory_map, BLT_ITM2)},
+ {"BLT_ITM3", offsetof(struct b2r2_memory_map, BLT_ITM3)},
+ {"BLT_DFV2", offsetof(struct b2r2_memory_map, BLT_DFV2)},
+ {"BLT_DFV1", offsetof(struct b2r2_memory_map, BLT_DFV1)},
+ {"BLT_PRI", offsetof(struct b2r2_memory_map, BLT_PRI)},
+ {"PLUGS1_OP2", offsetof(struct b2r2_memory_map, PLUGS1_OP2)},
+ {"PLUGS1_CHZ", offsetof(struct b2r2_memory_map, PLUGS1_CHZ)},
+ {"PLUGS1_MSZ", offsetof(struct b2r2_memory_map, PLUGS1_MSZ)},
+ {"PLUGS1_PGZ", offsetof(struct b2r2_memory_map, PLUGS1_PGZ)},
+ {"PLUGS2_OP2", offsetof(struct b2r2_memory_map, PLUGS2_OP2)},
+ {"PLUGS2_CHZ", offsetof(struct b2r2_memory_map, PLUGS2_CHZ)},
+ {"PLUGS2_MSZ", offsetof(struct b2r2_memory_map, PLUGS2_MSZ)},
+ {"PLUGS2_PGZ", offsetof(struct b2r2_memory_map, PLUGS2_PGZ)},
+ {"PLUGS3_OP2", offsetof(struct b2r2_memory_map, PLUGS3_OP2)},
+ {"PLUGS3_CHZ", offsetof(struct b2r2_memory_map, PLUGS3_CHZ)},
+ {"PLUGS3_MSZ", offsetof(struct b2r2_memory_map, PLUGS3_MSZ)},
+ {"PLUGS3_PGZ", offsetof(struct b2r2_memory_map, PLUGS3_PGZ)},
+ {"PLUGT_OP2", offsetof(struct b2r2_memory_map, PLUGT_OP2)},
+ {"PLUGT_CHZ", offsetof(struct b2r2_memory_map, PLUGT_CHZ)},
+ {"PLUGT_MSZ", offsetof(struct b2r2_memory_map, PLUGT_MSZ)},
+ {"PLUGT_PGZ", offsetof(struct b2r2_memory_map, PLUGT_PGZ)},
+ {"BLT_NIP", offsetof(struct b2r2_memory_map, BLT_NIP)},
+ {"BLT_CIC", offsetof(struct b2r2_memory_map, BLT_CIC)},
+ {"BLT_INS", offsetof(struct b2r2_memory_map, BLT_INS)},
+ {"BLT_ACK", offsetof(struct b2r2_memory_map, BLT_ACK)},
+ {"BLT_TBA", offsetof(struct b2r2_memory_map, BLT_TBA)},
+ {"BLT_TTY", offsetof(struct b2r2_memory_map, BLT_TTY)},
+ {"BLT_TXY", offsetof(struct b2r2_memory_map, BLT_TXY)},
+ {"BLT_TSZ", offsetof(struct b2r2_memory_map, BLT_TSZ)},
+ {"BLT_S1CF", offsetof(struct b2r2_memory_map, BLT_S1CF)},
+ {"BLT_S2CF", offsetof(struct b2r2_memory_map, BLT_S2CF)},
+ {"BLT_S1BA", offsetof(struct b2r2_memory_map, BLT_S1BA)},
+ {"BLT_S1TY", offsetof(struct b2r2_memory_map, BLT_S1TY)},
+ {"BLT_S1XY", offsetof(struct b2r2_memory_map, BLT_S1XY)},
+ {"BLT_S2BA", offsetof(struct b2r2_memory_map, BLT_S2BA)},
+ {"BLT_S2TY", offsetof(struct b2r2_memory_map, BLT_S2TY)},
+ {"BLT_S2XY", offsetof(struct b2r2_memory_map, BLT_S2XY)},
+ {"BLT_S2SZ", offsetof(struct b2r2_memory_map, BLT_S2SZ)},
+ {"BLT_S3BA", offsetof(struct b2r2_memory_map, BLT_S3BA)},
+ {"BLT_S3TY", offsetof(struct b2r2_memory_map, BLT_S3TY)},
+ {"BLT_S3XY", offsetof(struct b2r2_memory_map, BLT_S3XY)},
+ {"BLT_S3SZ", offsetof(struct b2r2_memory_map, BLT_S3SZ)},
+ {"BLT_CWO", offsetof(struct b2r2_memory_map, BLT_CWO)},
+ {"BLT_CWS", offsetof(struct b2r2_memory_map, BLT_CWS)},
+ {"BLT_CCO", offsetof(struct b2r2_memory_map, BLT_CCO)},
+ {"BLT_CML", offsetof(struct b2r2_memory_map, BLT_CML)},
+ {"BLT_FCTL", offsetof(struct b2r2_memory_map, BLT_FCTL)},
+ {"BLT_PMK", offsetof(struct b2r2_memory_map, BLT_PMK)},
+ {"BLT_RSF", offsetof(struct b2r2_memory_map, BLT_RSF)},
+ {"BLT_RZI", offsetof(struct b2r2_memory_map, BLT_RZI)},
+ {"BLT_HFP", offsetof(struct b2r2_memory_map, BLT_HFP)},
+ {"BLT_VFP", offsetof(struct b2r2_memory_map, BLT_VFP)},
+ {"BLT_Y_RSF", offsetof(struct b2r2_memory_map, BLT_Y_RSF)},
+ {"BLT_Y_RZI", offsetof(struct b2r2_memory_map, BLT_Y_RZI)},
+ {"BLT_Y_HFP", offsetof(struct b2r2_memory_map, BLT_Y_HFP)},
+ {"BLT_Y_VFP", offsetof(struct b2r2_memory_map, BLT_Y_VFP)},
+ {"BLT_KEY1", offsetof(struct b2r2_memory_map, BLT_KEY1)},
+ {"BLT_KEY2", offsetof(struct b2r2_memory_map, BLT_KEY2)},
+ {"BLT_SAR", offsetof(struct b2r2_memory_map, BLT_SAR)},
+ {"BLT_USR", offsetof(struct b2r2_memory_map, BLT_USR)},
+ {"BLT_IVMX0", offsetof(struct b2r2_memory_map, BLT_IVMX0)},
+ {"BLT_IVMX1", offsetof(struct b2r2_memory_map, BLT_IVMX1)},
+ {"BLT_IVMX2", offsetof(struct b2r2_memory_map, BLT_IVMX2)},
+ {"BLT_IVMX3", offsetof(struct b2r2_memory_map, BLT_IVMX3)},
+ {"BLT_OVMX0", offsetof(struct b2r2_memory_map, BLT_OVMX0)},
+ {"BLT_OVMX1", offsetof(struct b2r2_memory_map, BLT_OVMX1)},
+ {"BLT_OVMX2", offsetof(struct b2r2_memory_map, BLT_OVMX2)},
+ {"BLT_OVMX3", offsetof(struct b2r2_memory_map, BLT_OVMX3)},
+ {"BLT_VC1R", offsetof(struct b2r2_memory_map, BLT_VC1R)},
+ {"BLT_Y_HFC0", offsetof(struct b2r2_memory_map, BLT_Y_HFC0)},
+ {"BLT_Y_HFC1", offsetof(struct b2r2_memory_map, BLT_Y_HFC1)},
+ {"BLT_Y_HFC2", offsetof(struct b2r2_memory_map, BLT_Y_HFC2)},
+ {"BLT_Y_HFC3", offsetof(struct b2r2_memory_map, BLT_Y_HFC3)},
+ {"BLT_Y_HFC4", offsetof(struct b2r2_memory_map, BLT_Y_HFC4)},
+ {"BLT_Y_HFC5", offsetof(struct b2r2_memory_map, BLT_Y_HFC5)},
+ {"BLT_Y_HFC6", offsetof(struct b2r2_memory_map, BLT_Y_HFC6)},
+ {"BLT_Y_HFC7", offsetof(struct b2r2_memory_map, BLT_Y_HFC7)},
+ {"BLT_Y_HFC8", offsetof(struct b2r2_memory_map, BLT_Y_HFC8)},
+ {"BLT_Y_HFC9", offsetof(struct b2r2_memory_map, BLT_Y_HFC9)},
+ {"BLT_Y_HFC10", offsetof(struct b2r2_memory_map, BLT_Y_HFC10)},
+ {"BLT_Y_HFC11", offsetof(struct b2r2_memory_map, BLT_Y_HFC11)},
+ {"BLT_Y_HFC12", offsetof(struct b2r2_memory_map, BLT_Y_HFC12)},
+ {"BLT_Y_HFC13", offsetof(struct b2r2_memory_map, BLT_Y_HFC13)},
+ {"BLT_Y_HFC14", offsetof(struct b2r2_memory_map, BLT_Y_HFC14)},
+ {"BLT_Y_HFC15", offsetof(struct b2r2_memory_map, BLT_Y_HFC15)},
+ {"BLT_Y_VFC0", offsetof(struct b2r2_memory_map, BLT_Y_VFC0)},
+ {"BLT_Y_VFC1", offsetof(struct b2r2_memory_map, BLT_Y_VFC1)},
+ {"BLT_Y_VFC2", offsetof(struct b2r2_memory_map, BLT_Y_VFC2)},
+ {"BLT_Y_VFC3", offsetof(struct b2r2_memory_map, BLT_Y_VFC3)},
+ {"BLT_Y_VFC4", offsetof(struct b2r2_memory_map, BLT_Y_VFC4)},
+ {"BLT_Y_VFC5", offsetof(struct b2r2_memory_map, BLT_Y_VFC5)},
+ {"BLT_Y_VFC6", offsetof(struct b2r2_memory_map, BLT_Y_VFC6)},
+ {"BLT_Y_VFC7", offsetof(struct b2r2_memory_map, BLT_Y_VFC7)},
+ {"BLT_Y_VFC8", offsetof(struct b2r2_memory_map, BLT_Y_VFC8)},
+ {"BLT_Y_VFC9", offsetof(struct b2r2_memory_map, BLT_Y_VFC9)},
+ {"BLT_HFC0", offsetof(struct b2r2_memory_map, BLT_HFC0)},
+ {"BLT_HFC1", offsetof(struct b2r2_memory_map, BLT_HFC1)},
+ {"BLT_HFC2", offsetof(struct b2r2_memory_map, BLT_HFC2)},
+ {"BLT_HFC3", offsetof(struct b2r2_memory_map, BLT_HFC3)},
+ {"BLT_HFC4", offsetof(struct b2r2_memory_map, BLT_HFC4)},
+ {"BLT_HFC5", offsetof(struct b2r2_memory_map, BLT_HFC5)},
+ {"BLT_HFC6", offsetof(struct b2r2_memory_map, BLT_HFC6)},
+ {"BLT_HFC7", offsetof(struct b2r2_memory_map, BLT_HFC7)},
+ {"BLT_HFC8", offsetof(struct b2r2_memory_map, BLT_HFC8)},
+ {"BLT_HFC9", offsetof(struct b2r2_memory_map, BLT_HFC9)},
+ {"BLT_HFC10", offsetof(struct b2r2_memory_map, BLT_HFC10)},
+ {"BLT_HFC11", offsetof(struct b2r2_memory_map, BLT_HFC11)},
+ {"BLT_HFC12", offsetof(struct b2r2_memory_map, BLT_HFC12)},
+ {"BLT_HFC13", offsetof(struct b2r2_memory_map, BLT_HFC13)},
+ {"BLT_HFC14", offsetof(struct b2r2_memory_map, BLT_HFC14)},
+ {"BLT_HFC15", offsetof(struct b2r2_memory_map, BLT_HFC15)},
+ {"BLT_VFC0", offsetof(struct b2r2_memory_map, BLT_VFC0)},
+ {"BLT_VFC1", offsetof(struct b2r2_memory_map, BLT_VFC1)},
+ {"BLT_VFC2", offsetof(struct b2r2_memory_map, BLT_VFC2)},
+ {"BLT_VFC3", offsetof(struct b2r2_memory_map, BLT_VFC3)},
+ {"BLT_VFC4", offsetof(struct b2r2_memory_map, BLT_VFC4)},
+ {"BLT_VFC5", offsetof(struct b2r2_memory_map, BLT_VFC5)},
+ {"BLT_VFC6", offsetof(struct b2r2_memory_map, BLT_VFC6)},
+ {"BLT_VFC7", offsetof(struct b2r2_memory_map, BLT_VFC7)},
+ {"BLT_VFC8", offsetof(struct b2r2_memory_map, BLT_VFC8)},
+ {"BLT_VFC9", offsetof(struct b2r2_memory_map, BLT_VFC9)},
+};
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+/**
+ * printk_regs() - Print B2R2 registers to printk
+ */
+static void printk_regs(void)
+{
+#ifdef CONFIG_B2R2_DEBUG
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ unsigned long value = readl(
+ (unsigned long *) (((u8 *) b2r2_core.hw) +
+ debugfs_regs[i].offset));
+ b2r2_log_regdump("%s: %08lX\n",
+ debugfs_regs[i].name,
+ value);
+ }
+#endif
+}
+#endif
+
+/**
+ * debugfs_b2r2_reg_read() - Implements debugfs read for B2R2 register
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_reg_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size;
+ int ret = 0;
+
+ unsigned long value;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Read from B2R2 */
+ value = readl((unsigned long *) (((u8 *) b2r2_core.hw) +
+ (u32) filp->f_dentry->
+ d_inode->i_private));
+
+ /* Build the string */
+ dev_size = sprintf(Buf, "%8lX\n", value);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ /* Return it to user space */
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_reg_write() - Implements debugfs write for B2R2 register
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to write
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_reg_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ char Buf[80];
+ u32 reg_value;
+ int ret = 0;
+
+ /* Adjust count */
+ if (count >= sizeof(Buf))
+ count = sizeof(Buf) - 1;
+ /* Get it from user space */
+ if (copy_from_user(Buf, buf, count))
+ return -EINVAL;
+ Buf[count] = 0;
+ /* Convert from hex string */
+ if (sscanf(Buf, "%8lX", (unsigned long *) &reg_value) != 1)
+ return -EINVAL;
+
+ writel(reg_value, (u32 *) (((u8 *) b2r2_core.hw) +
+ (u32) filp->f_dentry->d_inode->i_private));
+
+ *f_pos += count;
+ ret = count;
+
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_reg_fops() - File operations for B2R2 register debugfs
+ */
+static const struct file_operations debugfs_b2r2_reg_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_reg_read,
+ .write = debugfs_b2r2_reg_write,
+};
+
+/**
+ * debugfs_b2r2_regs_read() - Implements debugfs read for B2R2 register dump
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_regs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ int i;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Build a giant string containing all registers */
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ unsigned long value =
+ readl((unsigned long *) (((u8 *) b2r2_core.hw) +
+ debugfs_regs[i].offset));
+ dev_size += sprintf(Buf + dev_size, "%s: %08lX\n",
+ debugfs_regs[i].name,
+ value);
+ }
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_regs_fops() - File operations for B2R2 register dump debugfs
+ */
+static const struct file_operations debugfs_b2r2_regs_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_regs_read,
+};
+
+/**
+ * debugfs_b2r2_stat_read() - Implements debugfs read for B2R2 statistics
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_stat_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ int i = 0;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Build a string containing all statistics */
+ dev_size += sprintf(Buf + dev_size, "Interrupts: %lu\n",
+ b2r2_core.stat_n_irq);
+ dev_size += sprintf(Buf + dev_size, "Added jobs: %lu\n",
+ b2r2_core.stat_n_jobs_added);
+ dev_size += sprintf(Buf + dev_size, "Removed jobs: %lu\n",
+ b2r2_core.stat_n_jobs_removed);
+ dev_size += sprintf(Buf + dev_size, "Jobs in prio list: %lu\n",
+ b2r2_core.stat_n_jobs_in_prio_list);
+ dev_size += sprintf(Buf + dev_size, "Active jobs: %lu\n",
+ b2r2_core.n_active_jobs);
+ for (i = 0; i < ARRAY_SIZE(b2r2_core.active_jobs); i++)
+ dev_size += sprintf(Buf + dev_size, "Job in queue %d: %p\n",
+ i, b2r2_core.active_jobs[i]);
+ dev_size += sprintf(Buf + dev_size, "Clock requests: %lu\n",
+ b2r2_core.clock_request_count);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_stat_fops() - File operations for B2R2 statistics debugfs
+ */
+static const struct file_operations debugfs_b2r2_stat_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_stat_read,
+};
+
+
+/**
+ * debugfs_b2r2_clock_read() - Implements debugfs read for
+ * PMU B2R2 clock register
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_clock_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /* 10 characters hex number + newline + string terminator; */
+ char Buf[10+2];
+ size_t dev_size;
+ int ret = 0;
+
+ unsigned long value = clk_get_rate(b2r2_core.b2r2_clock);
+
+ dev_size = sprintf(Buf, "%#010lX\n", value);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_clock_write() - Implements debugfs write for
+ * PMU B2R2 clock register
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to write
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_clock_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ char Buf[80];
+ u32 reg_value;
+ int ret = 0;
+
+ if (count >= sizeof(Buf))
+ count = sizeof(Buf) - 1;
+ if (copy_from_user(Buf, buf, count))
+ return -EINVAL;
+ Buf[count] = 0;
+ if (sscanf(Buf, "%8lX", (unsigned long *) &reg_value) != 1)
+ return -EINVAL;
+
+ /*not working yet*/
+ /*clk_set_rate(b2r2_core.b2r2_clock, (unsigned long) reg_value);*/
+
+ *f_pos += count;
+ ret = count;
+
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_clock_fops() - File operations for PMU B2R2 clock debugfs
+ */
+static const struct file_operations debugfs_b2r2_clock_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_clock_read,
+ .write = debugfs_b2r2_clock_write,
+};
+
+#endif
+
+/**
+ *
+ * init_hw() - B2R2 Hardware reset & initiliaze
+ *
+ * @pdev: B2R2 platform device
+ *
+ * 1)Register interrupt handler
+ *
+ * 2)B2R2 Register map
+ *
+ * 3)For resetting B2R2 hardware,write to B2R2 Control register the
+ * B2R2BLT_CTLGLOBAL_soft_reset and then polling for on
+ * B2R2 status register for B2R2BLT_STA1BDISP_IDLE flag.
+ *
+ * 4)Wait for B2R2 hardware to be idle (on a timeout rather than while loop)
+ *
+ * 5)Driver status reset
+ *
+ * 6)Recover from any error without any leaks.
+ *
+ */
+static int init_hw(void)
+{
+ int result = 0;
+ u32 uTimeOut = B2R2_DRIVER_TIMEOUT_VALUE;
+
+ /* Put B2R2 into reset */
+ clear_interrupts();
+
+ writel(readl(&b2r2_core.hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &b2r2_core.hw->BLT_CTL);
+
+
+ /* Set up interrupt handler */
+ result = request_irq(b2r2_core.irq, b2r2_irq_handler, 0,
+ "b2r2-interrupt", 0);
+ if (result) {
+ b2r2_log_err("%s: failed to register IRQ for B2R2\n", __func__);
+ goto b2r2_init_request_irq_failed;
+ }
+
+ b2r2_log_info("do a global reset..\n");
+
+ /* Release reset */
+ writel(0x00000000, &b2r2_core.hw->BLT_CTL);
+
+ b2r2_log_info("wait for B2R2 to be idle..\n");
+
+ /** Wait for B2R2 to be idle (on a timeout rather than while loop) */
+ while ((uTimeOut > 0) &&
+ ((readl(&b2r2_core.hw->BLT_STA1) &
+ B2R2BLT_STA1BDISP_IDLE) == 0x0))
+ uTimeOut--;
+ if (uTimeOut == 0) {
+ b2r2_log_err(
+ "%s: B2R2 not idle after SW reset\n", __func__);
+ result = -EAGAIN;
+ goto b2r2_core_init_hw_timeout;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ /* Register debug fs files for register access */
+ if (b2r2_core.debugfs_root_dir && !b2r2_core.debugfs_regs_dir) {
+ int i;
+ b2r2_core.debugfs_regs_dir = debugfs_create_dir("regs",
+ b2r2_core.debugfs_root_dir);
+ debugfs_create_file("all", 0666, b2r2_core.debugfs_regs_dir,
+ 0, &debugfs_b2r2_regs_fops);
+ /* Create debugfs entries for all static registers */
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++)
+ debugfs_create_file(debugfs_regs[i].name, 0666,
+ b2r2_core.debugfs_regs_dir,
+ (void *) debugfs_regs[i].offset,
+ &debugfs_b2r2_reg_fops);
+ }
+#endif
+
+ b2r2_log_info("%s ended..\n", __func__);
+ return result;
+
+/** Recover from any error without any leaks */
+
+b2r2_core_init_hw_timeout:
+
+ /** Free B2R2 interrupt handler */
+
+ free_irq(b2r2_core.irq, 0);
+
+b2r2_init_request_irq_failed:
+
+ if (b2r2_core.hw)
+ iounmap(b2r2_core.hw);
+ b2r2_core.hw = NULL;
+
+ return result;
+
+}
+
+
+/**
+ * exit_hw() - B2R2 Hardware exit
+ *
+ * b2r2_core.lock _must_ NOT be held
+ */
+static void exit_hw(void)
+{
+ unsigned long flags;
+
+ b2r2_log_info("%s started..\n", __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Unregister our debugfs entries */
+ if (b2r2_core.debugfs_regs_dir) {
+ debugfs_remove_recursive(b2r2_core.debugfs_regs_dir);
+ b2r2_core.debugfs_regs_dir = NULL;
+ }
+#endif
+ b2r2_log_debug("%s: locking b2r2_core.lock\n", __func__);
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+
+ /* Cancel all pending jobs */
+ b2r2_log_debug("%s: canceling pending jobs\n", __func__);
+ exit_job_list(&b2r2_core.prio_queue);
+
+ /* Soft reset B2R2 (Close all DMA,
+ reset all state to idle, reset regs)*/
+ b2r2_log_debug("%s: putting b2r2 in reset\n", __func__);
+ writel(readl(&b2r2_core.hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &b2r2_core.hw->BLT_CTL);
+
+ b2r2_log_debug("%s: clearing interrupts\n", __func__);
+ clear_interrupts();
+
+ /** Free B2R2 interrupt handler */
+ b2r2_log_debug("%s: freeing interrupt handler\n", __func__);
+ free_irq(b2r2_core.irq, 0);
+
+ b2r2_log_debug("%s: unlocking b2r2_core.lock\n", __func__);
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+
+ b2r2_log_info("%s ended...\n", __func__);
+}
+
+/**
+ * b2r2_probe() - This routine loads the B2R2 core driver
+ *
+ * @pdev: platform device.
+ */
+static int b2r2_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+
+ BUG_ON(pdev == NULL);
+
+ ret = b2r2_debug_init(&pdev->dev);
+ if (ret < 0) {
+ dev_err(b2r2_core.log_dev, "b2r2_debug_init failed\n");
+ goto b2r2_probe_debug_init_failed;
+ }
+
+ b2r2_core.log_dev = &pdev->dev;
+ b2r2_log_info("init started.\n");
+
+ /* Init spin locks */
+ spin_lock_init(&b2r2_core.lock);
+
+ /* Init job queues */
+ INIT_LIST_HEAD(&b2r2_core.prio_queue);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ /* Create work queue for callbacks & timeout */
+ INIT_DELAYED_WORK(&b2r2_core.timeout_work, timeout_work_function);
+#endif
+
+ /* Work queue for callbacks and timeout management */
+ b2r2_core.work_queue = create_workqueue("B2R2");
+ if (!b2r2_core.work_queue) {
+ ret = -ENOMEM;
+ goto b2r2_probe_no_work_queue;
+ }
+
+ /* Get the clock for B2R2 */
+ b2r2_core.b2r2_clock = clk_get(&pdev->dev, "b2r2");
+ if (IS_ERR(b2r2_core.b2r2_clock)) {
+ ret = PTR_ERR(b2r2_core.b2r2_clock);
+ b2r2_log_err("clk_get b2r2 failed\n");
+ goto b2r2_probe_no_clk;
+ }
+
+ /* Get the B2R2 regulator */
+ b2r2_core.b2r2_reg = regulator_get(&pdev->dev, "vsupply");
+ if (IS_ERR(b2r2_core.b2r2_reg)) {
+ ret = PTR_ERR(b2r2_core.b2r2_reg);
+ b2r2_log_err("regulator_get vsupply failed (dev_name=%s)\n",
+ dev_name(&pdev->dev));
+ goto b2r2_probe_no_reg;
+ }
+
+ /* Init power management */
+ mutex_init(&b2r2_core.domain_lock);
+ INIT_DELAYED_WORK_DEFERRABLE(&b2r2_core.domain_disable_work,
+ domain_disable_work_function);
+ b2r2_core.domain_enabled = false;
+
+ /* Map B2R2 into kernel virtual memory space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ goto b2r2_probe_no_res;
+
+ /* Hook up irq */
+ b2r2_core.irq = platform_get_irq(pdev, 0);
+ if (b2r2_core.irq <= 0) {
+ b2r2_log_info("%s: Failed to request irq (irq=%d)\n", __func__,
+ b2r2_core.irq);
+ goto b2r2_failed_irq_get;
+ }
+
+ b2r2_core.hw = (struct b2r2_memory_map *) ioremap(res->start,
+ res->end - res->start + 1);
+ if (b2r2_core.hw == NULL) {
+
+ b2r2_log_info("%s: ioremap failed\n", __func__);
+ ret = -ENOMEM;
+ goto b2r2_probe_ioremap_failed;
+ }
+
+ dev_dbg(b2r2_core.log_dev,
+ "b2r2 structure address %p\n",
+ b2r2_core.hw);
+
+ /* Initialize b2r2_blt module. FIXME: Module of it's own
+ or perhaps a dedicated module init c file? */
+ ret = b2r2_blt_module_init();
+ if (ret < 0) {
+ b2r2_log_err("b2r2_blt_module_init failed\n");
+ goto b2r2_probe_blt_init_fail;
+ }
+
+ b2r2_core.op_size = B2R2_PLUG_OPCODE_SIZE_DEFAULT;
+ b2r2_core.ch_size = B2R2_PLUG_CHUNK_SIZE_DEFAULT;
+ b2r2_core.pg_size = B2R2_PLUG_PAGE_SIZE_DEFAULT;
+ b2r2_core.mg_size = B2R2_PLUG_MESSAGE_SIZE_DEFAULT;
+ b2r2_core.min_req_time = 0;
+
+#ifdef CONFIG_DEBUG_FS
+ b2r2_core.debugfs_root_dir = debugfs_create_dir("b2r2", NULL);
+ debugfs_create_file("stat", 0666, b2r2_core.debugfs_root_dir,
+ 0, &debugfs_b2r2_stat_fops);
+ debugfs_create_file("clock", 0666, b2r2_core.debugfs_root_dir,
+ 0, &debugfs_b2r2_clock_fops);
+
+ debugfs_create_u8("op_size", 0666, b2r2_core.debugfs_root_dir,
+ &b2r2_core.op_size);
+ debugfs_create_u8("ch_size", 0666, b2r2_core.debugfs_root_dir,
+ &b2r2_core.ch_size);
+ debugfs_create_u8("pg_size", 0666, b2r2_core.debugfs_root_dir,
+ &b2r2_core.pg_size);
+ debugfs_create_u8("mg_size", 0666, b2r2_core.debugfs_root_dir,
+ &b2r2_core.mg_size);
+ debugfs_create_u16("min_req_time", 0666, b2r2_core.debugfs_root_dir,
+ &b2r2_core.min_req_time);
+#endif
+
+ b2r2_log_info("init done.\n");
+
+ return ret;
+
+/** Recover from any error if something fails */
+b2r2_probe_blt_init_fail:
+b2r2_probe_ioremap_failed:
+b2r2_failed_irq_get:
+b2r2_probe_no_res:
+ regulator_put(b2r2_core.b2r2_reg);
+b2r2_probe_no_reg:
+ clk_put(b2r2_core.b2r2_clock);
+b2r2_probe_no_clk:
+ destroy_workqueue(b2r2_core.work_queue);
+ b2r2_core.work_queue = NULL;
+b2r2_probe_no_work_queue:
+
+ b2r2_log_info("init done with errors.\n");
+b2r2_probe_debug_init_failed:
+
+ return ret;
+
+}
+
+
+
+/**
+ * b2r2_remove - This routine unloads b2r2 driver
+ *
+ * @pdev: platform device.
+ */
+static int b2r2_remove(struct platform_device *pdev)
+{
+ unsigned long flags;
+
+ BUG_ON(pdev == NULL);
+
+ b2r2_log_info("%s started\n", __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(b2r2_core.debugfs_root_dir);
+#endif
+
+ /* Flush B2R2 work queue (call all callbacks) */
+ flush_workqueue(b2r2_core.work_queue);
+
+ /* Exit b2r2 blt module */
+ b2r2_blt_module_exit();
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ cancel_delayed_work(&b2r2_core.timeout_work);
+#endif
+
+ /* Flush B2R2 work queue (call all callbacks for
+ cancelled jobs) */
+ flush_workqueue(b2r2_core.work_queue);
+
+ /* Make sure the power is turned off */
+ cancel_delayed_work_sync(&b2r2_core.domain_disable_work);
+
+ /** Unmap B2R2 registers */
+ b2r2_log_info("unmap b2r2 registers..\n");
+ if (b2r2_core.hw) {
+ iounmap(b2r2_core.hw);
+
+ b2r2_core.hw = NULL;
+ }
+
+ destroy_workqueue(b2r2_core.work_queue);
+
+ spin_lock_irqsave(&b2r2_core.lock, flags);
+ b2r2_core.work_queue = NULL;
+ spin_unlock_irqrestore(&b2r2_core.lock, flags);
+
+ /* Return the clock */
+ clk_put(b2r2_core.b2r2_clock);
+ regulator_put(b2r2_core.b2r2_reg);
+
+ b2r2_log_info("%s ended\n", __func__);
+
+ b2r2_core.log_dev = NULL;
+
+ b2r2_debug_exit();
+
+ return 0;
+
+}
+/**
+ * b2r2_suspend() - This routine puts the B2R2 device in to sustend state.
+ * @pdev: platform device.
+ *
+ * This routine stores the current state of the b2r2 device and puts in to suspend state.
+ *
+ */
+int b2r2_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ b2r2_log_info("%s\n", __func__);
+
+ /* Flush B2R2 work queue (call all callbacks) */
+ flush_workqueue(b2r2_core.work_queue);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ cancel_delayed_work(&b2r2_core.timeout_work);
+#endif
+
+ /* Flush B2R2 work queue (call all callbacks for
+ cancelled jobs) */
+ flush_workqueue(b2r2_core.work_queue);
+
+ /* Make sure power is turned off */
+ cancel_delayed_work_sync(&b2r2_core.domain_disable_work);
+
+ return 0;
+}
+
+
+/**
+ * b2r2_resume() - This routine resumes the B2R2 device from sustend state.
+ * @pdev: platform device.
+ *
+ * This routine restore back the current state of the b2r2 device resumes.
+ *
+ */
+int b2r2_resume(struct platform_device *pdev)
+{
+ b2r2_log_info("%s\n", __func__);
+
+ return 0;
+}
+
+/**
+ * struct platform_b2r2_driver - Platform driver configuration for the
+ * B2R2 core driver
+ */
+static struct platform_driver platform_b2r2_driver = {
+ .remove = b2r2_remove,
+ .driver = {
+ .name = "b2r2",
+ },
+ /** TODO implement power mgmt functions */
+ .suspend = b2r2_suspend,
+ .resume = b2r2_resume,
+};
+
+
+/**
+ * b2r2_init() - Module init function for the B2R2 core module
+ */
+static int __init b2r2_init(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ return platform_driver_probe(&platform_b2r2_driver, b2r2_probe);
+}
+module_init(b2r2_init);
+
+/**
+ * b2r2_exit() - Module exit function for the B2R2 core module
+ */
+static void __exit b2r2_exit(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ platform_driver_unregister(&platform_b2r2_driver);
+ return;
+}
+module_exit(b2r2_exit);
+
+
+/** Module is having GPL license */
+
+MODULE_LICENSE("GPL");
+
+/** Module author & discription */
+
+MODULE_AUTHOR("Robert Fekete (robert.fekete@stericsson.com)");
+MODULE_DESCRIPTION("B2R2 Core driver");
diff --git a/drivers/video/b2r2/b2r2_core.h b/drivers/video/b2r2/b2r2_core.h
new file mode 100644
index 00000000000..2f958751694
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_core.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 core driver
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_CORE_H__
+#define __B2R2_CORE_H__
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+/**
+ * enum b2r2_core_queue - Indicates the B2R2 queue that the job belongs to
+ *
+ * @B2R2_CORE_QUEUE_AQ1: Application queue 1
+ * @B2R2_CORE_QUEUE_AQ2: Application queue 2
+ * @B2R2_CORE_QUEUE_AQ3: Application queue 3
+ * @B2R2_CORE_QUEUE_AQ4: Application queue 4
+ * @B2R2_CORE_QUEUE_CQ1: Composition queue 1
+ * @B2R2_CORE_QUEUE_CQ2: Composition queue 2
+ * @B2R2_CORE_QUEUE_NO_OF: Number of queues
+ */
+enum b2r2_core_queue {
+ B2R2_CORE_QUEUE_AQ1 = 0,
+ B2R2_CORE_QUEUE_AQ2,
+ B2R2_CORE_QUEUE_AQ3,
+ B2R2_CORE_QUEUE_AQ4,
+ B2R2_CORE_QUEUE_CQ1,
+ B2R2_CORE_QUEUE_CQ2,
+ B2R2_CORE_QUEUE_NO_OF,
+};
+
+#define B2R2_NUM_APPLICATIONS_QUEUES 4
+
+/**
+ * enum b2r2_core_job_state - Indicates the current state of the job
+ *
+ * @B2R2_CORE_JOB_IDLE: Never queued
+ * @B2R2_CORE_JOB_QUEUED: In queue but not started yet
+ * @B2R2_CORE_JOB_RUNNING: Running, executed by B2R2
+ * @B2R2_CORE_JOB_DONE: Completed
+ * @B2R2_CORE_JOB_CANCELED: Canceled
+ */
+enum b2r2_core_job_state {
+ B2R2_CORE_JOB_IDLE = 0,
+ B2R2_CORE_JOB_QUEUED,
+ B2R2_CORE_JOB_RUNNING,
+ B2R2_CORE_JOB_DONE,
+ B2R2_CORE_JOB_CANCELED,
+};
+
+/**
+ * struct b2r2_core_job - Represents a B2R2 core job
+ *
+ * @start_sentinel: Memory overwrite guard
+ *
+ * @tag: Client value. Used by b2r2_core_job_find_first_with_tag().
+ * @prio: Job priority, from -19 up to 20. Mapped to the
+ * B2R2 application queues. Filled in by the client.
+ * @first_node_address: Physical address of the first node. Filled
+ * in by the client.
+ * @last_node_address: Physical address of the last node. Filled
+ * in by the client.
+ *
+ * @callback: Function that will be called when the job is done.
+ * @acquire_resources: Function that allocates the resources needed
+ * to execute the job (i.e. SRAM alloc). Must not
+ * sleep if atomic, should fail with negative error code
+ * if resources not available.
+ * @release_resources: Function that releases the resources previously
+ * allocated by acquire_resources (i.e. SRAM alloc).
+ * @release: Function that will be called when the reference count reaches
+ * zero.
+ *
+ * @job_id: Unique id for this job, assigned by B2R2 core
+ * @job_state: The current state of the job
+ * @jiffies: Number of jiffies needed for this request
+ *
+ * @list: List entry element for internal list management
+ * @event: Wait queue event to wait for job done
+ * @work: Work queue structure, for callback implementation
+ *
+ * @queue: The queue that this job shall be submitted to
+ * @control: B2R2 Queue control
+ * @pace_control: For composition queue only
+ * @interrupt_context: Context for interrupt
+ *
+ * @end_sentinel: Memory overwrite guard
+ */
+struct b2r2_core_job {
+ u32 start_sentinel;
+
+ /* Data to be filled in by client */
+ int tag;
+ int prio;
+ u32 first_node_address;
+ u32 last_node_address;
+ void (*callback)(struct b2r2_core_job *);
+ int (*acquire_resources)(struct b2r2_core_job *,
+ bool atomic);
+ void (*release_resources)(struct b2r2_core_job *,
+ bool atomic);
+ void (*release)(struct b2r2_core_job *);
+
+ /* Output data, do not modify */
+ int job_id;
+ enum b2r2_core_job_state job_state;
+ unsigned long jiffies;
+
+ /* Data below is internal to b2r2_core, do not modify */
+
+ /* Reference counting */
+ u32 ref_count;
+
+ /* Internal data */
+ struct list_head list;
+ wait_queue_head_t event;
+ struct work_struct work;
+
+ /* B2R2 HW data */
+ enum b2r2_core_queue queue;
+ u32 control;
+ u32 pace_control;
+ u32 interrupt_context;
+
+ /* Timing data */
+ u32 hw_start_time;
+ s32 nsec_active_in_hw;
+
+ u32 end_sentinel;
+};
+
+/**
+ * b2r2_core_job_add() - Adds a job to B2R2 job queues
+ *
+ * The job reference count will be increased after this function
+ * has been called and b2r2_core_job_release() must be called to
+ * release the reference. The job callback function will be always
+ * be called after the job is done or cancelled.
+ *
+ * @job: Job to be added
+ *
+ * Returns 0 if OK else negative error code
+ *
+ */
+int b2r2_core_job_add(struct b2r2_core_job *job);
+
+/**
+ * b2r2_core_job_wait() - Waits for an added job to be done.
+ *
+ * @job: Job to wait for
+ *
+ * Returns 0 if job done else negative error code
+ *
+ */
+int b2r2_core_job_wait(struct b2r2_core_job *job);
+
+/**
+ * b2r2_core_job_cancel() - Cancel an already added job.
+ *
+ * @job: Job to cancel
+ *
+ * Returns 0 if job cancelled or done else negative error code
+ *
+ */
+int b2r2_core_job_cancel(struct b2r2_core_job *job);
+
+/**
+ * b2r2_core_job_find() - Finds job with given job id
+ *
+ * Reference count will be increased for the found job
+ *
+ * @job_id: Job id to find
+ *
+ * Returns job if found, else NULL
+ *
+ */
+struct b2r2_core_job *b2r2_core_job_find(int job_id);
+
+/**
+ * b2r2_core_job_find_first_with_tag() - Finds first job with given tag
+ *
+ * Reference count will be increased for the found job.
+ * This function can be used to find all jobs for a client, i.e.
+ * when cancelling all jobs for a client.
+ *
+ * @tag: Tag to find
+ *
+ * Returns job if found, else NULL
+ *
+ */
+struct b2r2_core_job *b2r2_core_job_find_first_with_tag(int tag);
+
+/**
+ * b2r2_core_job_addref() - Increase the job reference count.
+ *
+ * @job: Job to increase reference count for.
+ * @caller: The function calling this function (for debug)
+ */
+void b2r2_core_job_addref(struct b2r2_core_job *job, const char *caller);
+
+/**
+ * b2r2_core_job_release() - Decrease the job reference count. The
+ * job will be released (the release() function
+ * will be called) when the reference count
+ * reaches zero.
+ *
+ * @job: Job to decrease reference count for.
+ * @caller: The function calling this function (for debug)
+ */
+void b2r2_core_job_release(struct b2r2_core_job *job, const char *caller);
+
+#endif /* !defined(__B2R2_CORE_JOB_H__) */
diff --git a/drivers/video/b2r2/b2r2_debug.c b/drivers/video/b2r2/b2r2_debug.c
new file mode 100644
index 00000000000..d4711cd3e28
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_debug.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 dynamic debug
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "b2r2_debug.h"
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT];
+struct device *b2r2_log_dev;
+
+static struct dentry *root_dir;
+static struct dentry *log_lvl_dir;
+static struct dentry *stats_dir;
+
+#define CHARS_IN_NODE_DUMP 1544
+
+static const size_t dumped_node_size = CHARS_IN_NODE_DUMP * sizeof(char) + 1;
+
+static void dump_node(char *dst, struct b2r2_node *node)
+{
+ dst += sprintf(dst, "node 0x%08x ------------------\n",
+ (unsigned int)node);
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_NIP:", node->node.GROUP0.B2R2_NIP);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CIC:", node->node.GROUP0.B2R2_CIC);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_INS:", node->node.GROUP0.B2R2_INS);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_ACK:", node->node.GROUP0.B2R2_ACK);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TBA:", node->node.GROUP1.B2R2_TBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TTY:", node->node.GROUP1.B2R2_TTY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TXY:", node->node.GROUP1.B2R2_TXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TSZ:", node->node.GROUP1.B2R2_TSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1CF:", node->node.GROUP2.B2R2_S1CF);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2CF:", node->node.GROUP2.B2R2_S2CF);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1BA:", node->node.GROUP3.B2R2_SBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1TY:", node->node.GROUP3.B2R2_STY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1XY:", node->node.GROUP3.B2R2_SXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1SZ:", node->node.GROUP3.B2R2_SSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2BA:", node->node.GROUP4.B2R2_SBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2TY:", node->node.GROUP4.B2R2_STY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2XY:", node->node.GROUP4.B2R2_SXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2SZ:", node->node.GROUP4.B2R2_SSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3BA:", node->node.GROUP5.B2R2_SBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3TY:", node->node.GROUP5.B2R2_STY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3XY:", node->node.GROUP5.B2R2_SXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3SZ:", node->node.GROUP5.B2R2_SSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CWO:", node->node.GROUP6.B2R2_CWO);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CWS:", node->node.GROUP6.B2R2_CWS);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CCO:", node->node.GROUP7.B2R2_CCO);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CML:", node->node.GROUP7.B2R2_CML);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_PMK:", node->node.GROUP8.B2R2_PMK);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FCTL:", node->node.GROUP8.B2R2_FCTL);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_RSF:", node->node.GROUP9.B2R2_RSF);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_RZI:", node->node.GROUP9.B2R2_RZI);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_HFP:", node->node.GROUP9.B2R2_HFP);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_VFP:", node->node.GROUP9.B2R2_VFP);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_RSF:", node->node.GROUP10.B2R2_RSF);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_RZI:", node->node.GROUP10.B2R2_RZI);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_HFP:", node->node.GROUP10.B2R2_HFP);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_VFP:", node->node.GROUP10.B2R2_VFP);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF0:", node->node.GROUP11.B2R2_FF0);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF1:", node->node.GROUP11.B2R2_FF1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF2:", node->node.GROUP11.B2R2_FF2);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF3:", node->node.GROUP11.B2R2_FF3);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_KEY1:", node->node.GROUP12.B2R2_KEY1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_KEY2:", node->node.GROUP12.B2R2_KEY2);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_XYL:", node->node.GROUP13.B2R2_XYL);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_XYP:", node->node.GROUP13.B2R2_XYP);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_SAR:", node->node.GROUP14.B2R2_SAR);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_USR:", node->node.GROUP14.B2R2_USR);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX0:", node->node.GROUP15.B2R2_VMX0);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX1:", node->node.GROUP15.B2R2_VMX1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX2:", node->node.GROUP15.B2R2_VMX2);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX3:", node->node.GROUP15.B2R2_VMX3);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX0:", node->node.GROUP16.B2R2_VMX0);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX1:", node->node.GROUP16.B2R2_VMX1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX2:", node->node.GROUP16.B2R2_VMX2);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX3:", node->node.GROUP16.B2R2_VMX3);
+ dst += sprintf(dst, "--\n");
+
+}
+
+struct mutex last_job_lock;
+
+static struct b2r2_node *last_job;
+
+void b2r2_debug_job_done(struct b2r2_node *first_node)
+{
+ struct b2r2_node *node = first_node;
+ struct b2r2_node **dst_node;
+ unsigned int node_count = 0;
+
+ while (node != NULL) {
+ node_count++;
+ node = node->next;
+ }
+
+ mutex_lock(&last_job_lock);
+
+ if (last_job) {
+ node = last_job;
+ while (node != NULL) {
+ struct b2r2_node *tmp = node->next;
+ kfree(node);
+ node = tmp;
+ }
+ last_job = NULL;
+ }
+
+ node = first_node;
+ dst_node = &last_job;
+ while (node != NULL) {
+ *dst_node = kzalloc(sizeof(**dst_node), GFP_KERNEL);
+ if (!(*dst_node))
+ goto last_job_alloc_failed;
+
+ memcpy(*dst_node, node, sizeof(**dst_node));
+
+ dst_node = &((*dst_node)->next);
+ node = node->next;
+ }
+
+ mutex_unlock(&last_job_lock);
+
+ return;
+
+last_job_alloc_failed:
+ mutex_unlock(&last_job_lock);
+
+ while (last_job != NULL) {
+ struct b2r2_node *tmp = last_job->next;
+ kfree(last_job);
+ last_job = tmp;
+ }
+
+ return;
+}
+
+static char *last_job_chars;
+static int prev_node_count;
+
+static ssize_t last_job_read(struct file *filep, char __user *buf,
+ size_t bytes, loff_t *off)
+{
+ struct b2r2_node *node = last_job;
+ int node_count = 0;
+ int i;
+
+ size_t size;
+ size_t count;
+ loff_t offs = *off;
+
+ for (; node != NULL; node = node->next)
+ node_count++;
+
+ size = node_count * dumped_node_size;
+
+ if (node_count != prev_node_count) {
+ kfree(last_job_chars);
+
+ last_job_chars = kzalloc(size, GFP_KERNEL);
+ if (!last_job_chars)
+ return 0;
+ prev_node_count = node_count;
+ }
+
+ mutex_lock(&last_job_lock);
+ node = last_job;
+ for (i = 0; i < node_count; i++) {
+ BUG_ON(node == NULL);
+ dump_node(last_job_chars + i * dumped_node_size/sizeof(char),
+ node);
+ node = node->next;
+ }
+ mutex_unlock(&last_job_lock);
+
+ if (offs > size)
+ return 0;
+
+ if (offs + bytes > size)
+ count = size - offs;
+ else
+ count = bytes;
+
+ if (copy_to_user(buf, last_job_chars + offs, count))
+ return -EFAULT;
+
+ *off = offs + count;
+ return count;
+}
+
+static const struct file_operations last_job_fops = {
+ .read = last_job_read,
+};
+
+int b2r2_debug_init(struct device *log_dev)
+{
+ int i;
+
+ b2r2_log_dev = log_dev;
+
+ for (i = 0; i < B2R2_LOG_LEVEL_COUNT; i++)
+ b2r2_log_levels[i] = 0;
+
+ root_dir = debugfs_create_dir("b2r2_debug", NULL);
+ if (!root_dir) {
+ b2r2_log_warn("%s: could not create root dir\n", __func__);
+ return -ENODEV;
+ }
+
+#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS)
+ /*
+ * If dynamic debug is disabled we need some other way to control the
+ * log prints
+ */
+ log_lvl_dir = debugfs_create_dir("logs", root_dir);
+
+ /* No need to save the files, they will be removed recursively */
+ (void)debugfs_create_bool("warnings", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_WARN]);
+ (void)debugfs_create_bool("info", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_INFO]);
+ (void)debugfs_create_bool("debug", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]);
+ (void)debugfs_create_bool("regdumps", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]);
+
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+ /* log_lvl_dir is never used */
+ (void)log_lvl_dir;
+#endif
+
+ stats_dir = debugfs_create_dir("stats", root_dir);
+ (void)debugfs_create_file("last_job", 0444, stats_dir, NULL,
+ &last_job_fops);
+
+ mutex_init(&last_job_lock);
+
+ return 0;
+}
+
+void b2r2_debug_exit(void)
+{
+#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS)
+ if (root_dir)
+ debugfs_remove_recursive(root_dir);
+#endif
+}
diff --git a/drivers/video/b2r2/b2r2_debug.h b/drivers/video/b2r2/b2r2_debug.h
new file mode 100644
index 00000000000..f87ca728482
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_debug.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 dynamic debug
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_DEBUG_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_DEBUG_H_
+
+#include <linux/device.h>
+
+#include "b2r2_internal.h"
+
+#ifdef CONFIG_B2R2_DEBUG
+
+/* Log macros */
+enum b2r2_log_levels {
+ B2R2_LOG_LEVEL_WARN,
+ B2R2_LOG_LEVEL_INFO,
+ B2R2_LOG_LEVEL_DEBUG,
+ B2R2_LOG_LEVEL_REGDUMP,
+ B2R2_LOG_LEVEL_COUNT,
+};
+
+/*
+ * Booleans controlling the different log levels. The different log levels are
+ * enabled separately (i.e. you can have info prints without the warn prints).
+ */
+extern int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT];
+
+extern struct device *b2r2_log_dev;
+
+#define b2r2_log_err(...) do { \
+ dev_err(b2r2_log_dev, __VA_ARGS__); \
+ } while (0)
+
+/* If dynamic debug is enabled it should be used instead of loglevels */
+#ifdef CONFIG_DYNAMIC_DEBUG
+# define b2r2_log_warn(...) do { \
+ dev_dbg(b2r2_log_dev, "WARN " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_info(...) do { \
+ dev_dbg(b2r2_log_dev, "INFO " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_debug(...) do { \
+ dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_regdump(...) do { \
+ dev_dbg(b2r2_log_dev, "REGD " __VA_ARGS__); \
+ } while (0)
+#else
+# define b2r2_log_warn(...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_WARN]) \
+ dev_warn(b2r2_log_dev, "WARN " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_info(...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_INFO]) \
+ dev_info(b2r2_log_dev, "INFO " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_debug(...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]) \
+ dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_regdump(...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]) \
+ dev_vdbg(b2r2_log_dev, "REGD " __VA_ARGS__); \
+ } while (0)
+#endif
+
+
+int b2r2_debug_init(struct device *log_dev);
+void b2r2_debug_exit(void);
+
+void b2r2_debug_job_done(struct b2r2_node *node);
+
+#else
+
+#define b2r2_log_err(...)
+#define b2r2_log_warn(...)
+#define b2r2_log_info(...)
+#define b2r2_log_debug(...)
+#define b2r2_log_regdump(...)
+
+static inline int b2r2_debug_init(struct device *log_dev)
+{
+ return 0;
+}
+static inline void b2r2_debug_exit(void)
+{
+ return;
+}
+static inline void b2r2_debug_job_done(struct b2r2_node *node)
+{
+ return;
+}
+
+#endif
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_filters.c b/drivers/video/b2r2/b2r2_filters.c
new file mode 100644
index 00000000000..208cdcd286e
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_filters.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 filters.
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/dma-mapping.h>
+
+#include "b2r2_filters.h"
+#include "b2r2_internal.h"
+
+/**
+ * struct b2r2_filter_spec filters[] - Filter lookup table
+ *
+ * Lookup table for filters for different scale factors. A filter
+ * will be selected according to "min < scale_factor <= max".
+ */
+static struct b2r2_filter_spec filters[] = {
+ {
+ .min = 1024,
+ .max = 1433,
+ .h_coeffs = {
+ 0xfc, 0x06, 0xf9, 0x09, 0x34, 0x09, 0xf9, 0x06,
+ 0xfd, 0x07, 0xf7, 0x10, 0x32, 0x02, 0xfc, 0x05,
+ 0xfe, 0x07, 0xf6, 0x17, 0x2f, 0xfc, 0xff, 0x04,
+ 0xff, 0x06, 0xf5, 0x20, 0x2a, 0xf9, 0x01, 0x02,
+ 0x00, 0x04, 0xf6, 0x27, 0x25, 0xf6, 0x04, 0x00,
+ 0x02, 0x01, 0xf9, 0x2d, 0x1d, 0xf5, 0x06, 0xff,
+ 0x04, 0xff, 0xfd, 0x31, 0x15, 0xf5, 0x07, 0xfe,
+ 0x05, 0xfc, 0x02, 0x35, 0x0d, 0xf7, 0x07, 0xfd
+ },
+ .v_coeffs = {
+ 0xf8, 0x0a, 0x3c, 0x0a, 0xf8,
+ 0xf6, 0x12, 0x3b, 0x02, 0xfb,
+ 0xf4, 0x1b, 0x35, 0xfd, 0xff,
+ 0xf4, 0x23, 0x30, 0xf8, 0x01,
+ 0xf6, 0x29, 0x27, 0xf6, 0x04,
+ 0xf9, 0x2e, 0x1e, 0xf5, 0x06,
+ 0xfd, 0x31, 0x16, 0xf6, 0x06,
+ 0x02, 0x32, 0x0d, 0xf8, 0x07
+ },
+ },
+ {
+ .min = 1433,
+ .max = 1536,
+ .h_coeffs = {
+ 0xfe, 0x06, 0xf8, 0x0b, 0x30, 0x0b, 0xf8, 0x06,
+ 0xff, 0x06, 0xf7, 0x12, 0x2d, 0x05, 0xfa, 0x06,
+ 0x00, 0x04, 0xf6, 0x18, 0x2c, 0x00, 0xfc, 0x06,
+ 0x01, 0x02, 0xf7, 0x1f, 0x27, 0xfd, 0xff, 0x04,
+ 0x03, 0x00, 0xf9, 0x24, 0x24, 0xf9, 0x00, 0x03,
+ 0x04, 0xff, 0xfd, 0x29, 0x1d, 0xf7, 0x02, 0x01,
+ 0x06, 0xfc, 0x00, 0x2d, 0x17, 0xf6, 0x04, 0x00,
+ 0x06, 0xfa, 0x05, 0x30, 0x0f, 0xf7, 0x06, 0xff
+ },
+ .v_coeffs = {
+ 0xf6, 0x0e, 0x38, 0x0e, 0xf6,
+ 0xf5, 0x15, 0x38, 0x06, 0xf8,
+ 0xf5, 0x1d, 0x33, 0x00, 0xfb,
+ 0xf6, 0x23, 0x2d, 0xfc, 0xfe,
+ 0xf9, 0x28, 0x26, 0xf9, 0x00,
+ 0xfc, 0x2c, 0x1e, 0xf7, 0x03,
+ 0x00, 0x2e, 0x18, 0xf6, 0x04,
+ 0x05, 0x2e, 0x11, 0xf7, 0x05
+ },
+ },
+ {
+ .min = 1536,
+ .max = 3072,
+ .h_coeffs = {
+ 0xfc, 0xfd, 0x06, 0x13, 0x18, 0x13, 0x06, 0xfd,
+ 0xfc, 0xfe, 0x08, 0x15, 0x17, 0x12, 0x04, 0xfc,
+ 0xfb, 0xfe, 0x0a, 0x16, 0x18, 0x10, 0x03, 0xfc,
+ 0xfb, 0x00, 0x0b, 0x18, 0x17, 0x0f, 0x01, 0xfb,
+ 0xfb, 0x00, 0x0d, 0x19, 0x17, 0x0d, 0x00, 0xfb,
+ 0xfb, 0x01, 0x0f, 0x19, 0x16, 0x0b, 0x00, 0xfb,
+ 0xfc, 0x03, 0x11, 0x19, 0x15, 0x09, 0xfe, 0xfb,
+ 0xfc, 0x04, 0x12, 0x1a, 0x12, 0x08, 0xfe, 0xfc
+ },
+ .v_coeffs = {
+ 0x05, 0x10, 0x16, 0x10, 0x05,
+ 0x06, 0x11, 0x16, 0x0f, 0x04,
+ 0x08, 0x13, 0x15, 0x0e, 0x02,
+ 0x09, 0x14, 0x16, 0x0c, 0x01,
+ 0x0b, 0x15, 0x15, 0x0b, 0x00,
+ 0x0d, 0x16, 0x13, 0x0a, 0x00,
+ 0x0f, 0x17, 0x13, 0x08, 0xff,
+ 0x11, 0x18, 0x12, 0x07, 0xfe
+ },
+ },
+ {
+ .min = 3072,
+ .max = 4096,
+ .h_coeffs = {
+ 0xfe, 0x02, 0x09, 0x0f, 0x0e, 0x0f, 0x09, 0x02,
+ 0xff, 0x02, 0x09, 0x0f, 0x10, 0x0e, 0x08, 0x01,
+ 0xff, 0x03, 0x0a, 0x10, 0x10, 0x0d, 0x07, 0x00,
+ 0x00, 0x04, 0x0b, 0x10, 0x0f, 0x0c, 0x06, 0x00,
+ 0x00, 0x05, 0x0c, 0x10, 0x0e, 0x0c, 0x05, 0x00,
+ 0x00, 0x06, 0x0c, 0x11, 0x0e, 0x0b, 0x04, 0x00,
+ 0x00, 0x07, 0x0d, 0x11, 0x0f, 0x0a, 0x03, 0xff,
+ 0x01, 0x08, 0x0e, 0x11, 0x0e, 0x09, 0x02, 0xff
+ },
+ .v_coeffs = {
+ 0x09, 0x0f, 0x10, 0x0f, 0x09,
+ 0x09, 0x0f, 0x12, 0x0e, 0x08,
+ 0x0a, 0x10, 0x11, 0x0e, 0x07,
+ 0x0b, 0x11, 0x11, 0x0d, 0x06,
+ 0x0c, 0x11, 0x12, 0x0c, 0x05,
+ 0x0d, 0x12, 0x11, 0x0c, 0x04,
+ 0x0e, 0x12, 0x11, 0x0b, 0x04,
+ 0x0f, 0x13, 0x11, 0x0a, 0x03
+ },
+ },
+ {
+ .min = 4096,
+ .max = 5120,
+ .h_coeffs = {
+ 0x00, 0x04, 0x09, 0x0c, 0x0e, 0x0c, 0x09, 0x04,
+ 0x01, 0x05, 0x09, 0x0c, 0x0d, 0x0c, 0x08, 0x04,
+ 0x01, 0x05, 0x0a, 0x0c, 0x0e, 0x0b, 0x08, 0x03,
+ 0x02, 0x06, 0x0a, 0x0d, 0x0c, 0x0b, 0x07, 0x03,
+ 0x02, 0x07, 0x0a, 0x0d, 0x0d, 0x0a, 0x07, 0x02,
+ 0x03, 0x07, 0x0b, 0x0d, 0x0c, 0x0a, 0x06, 0x02,
+ 0x03, 0x08, 0x0b, 0x0d, 0x0d, 0x0a, 0x05, 0x01,
+ 0x04, 0x08, 0x0c, 0x0d, 0x0c, 0x09, 0x05, 0x01
+ },
+ .v_coeffs = {
+ 0x0a, 0x0e, 0x10, 0x0e, 0x0a,
+ 0x0b, 0x0e, 0x0f, 0x0e, 0x0a,
+ 0x0b, 0x0f, 0x10, 0x0d, 0x09,
+ 0x0c, 0x0f, 0x10, 0x0d, 0x08,
+ 0x0d, 0x0f, 0x0f, 0x0d, 0x08,
+ 0x0d, 0x10, 0x10, 0x0c, 0x07,
+ 0x0e, 0x10, 0x0f, 0x0c, 0x07,
+ 0x0f, 0x10, 0x10, 0x0b, 0x06
+ },
+ },
+};
+static const size_t filters_size = sizeof(filters)/sizeof(filters[0]);
+
+/**
+ * struct b2r2_filter_spec bilinear_filter - A bilinear filter
+ *
+ * The bilinear filter will be used if no custom filters are specified, or
+ * for upscales not matching any filter in the lookup table.
+ */
+static struct b2r2_filter_spec bilinear_filter = {
+ .min = 0,
+ .max = 0xffff,
+ .h_coeffs = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
+ 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
+ 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
+ 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
+ 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
+ },
+ .v_coeffs = {
+ 0x00, 0x00, 0x40, 0x00, 0x00,
+ 0xfd, 0x09, 0x3c, 0xfa, 0x04,
+ 0xf9, 0x13, 0x39, 0xf5, 0x06,
+ 0xf5, 0x1f, 0x31, 0xf3, 0x08,
+ 0xf3, 0x2a, 0x28, 0xf3, 0x08,
+ 0xf3, 0x34, 0x1d, 0xf5, 0x07,
+ 0xf5, 0x3b, 0x12, 0xf9, 0x05,
+ 0xfa, 0x3f, 0x07, 0xfd, 0x03
+ },
+};
+
+/**
+ * struct b2r2_filter_spec default_downscale_filter - Default filter for downscale
+ *
+ * The default downscale filter will be used for downscales not matching any
+ * filter in the lookup table.
+ */
+static struct b2r2_filter_spec default_downscale_filter = {
+ .min = 1 << 10,
+ .max = 0xffff,
+ .h_coeffs = {
+ 0x03, 0x06, 0x09, 0x0b, 0x09, 0x0b, 0x09, 0x06,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x04, 0x07, 0x09, 0x0b, 0x0b, 0x0a, 0x08, 0x04,
+ 0x04, 0x07, 0x0a, 0x0b, 0x0b, 0x0a, 0x07, 0x04,
+ 0x04, 0x08, 0x0a, 0x0b, 0x0b, 0x09, 0x07, 0x04,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03
+ },
+ .v_coeffs = {
+ 0x0b, 0x0e, 0x0e, 0x0e, 0x0b,
+ 0x0b, 0x0e, 0x0f, 0x0d, 0x0b,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0d, 0x0f, 0x0e, 0x0d, 0x09,
+ 0x0d, 0x0f, 0x0f, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0e, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0f, 0x0c, 0x08
+ },
+};
+
+/**
+ * struct b2r2_filter_spec blur_filter - Blur filter
+ *
+ * Filter for blurring an image.
+ */
+static struct b2r2_filter_spec blur_filter = {
+ .min = 0,
+ .max = 0xffff,
+ .h_coeffs = {
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08
+ },
+ .v_coeffs = {
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c
+ },
+};
+
+/* Private function declarations */
+static int alloc_filter_coeffs(struct b2r2_filter_spec *filter);
+static void free_filter_coeffs(struct b2r2_filter_spec *filter);
+
+/* Public functions */
+
+static int filters_initialized;
+
+int b2r2_filters_init()
+{
+ int i;
+
+ if (filters_initialized)
+ return 0;
+
+ for (i = 0; i < filters_size; i++) {
+ alloc_filter_coeffs(&filters[i]);
+ }
+
+ alloc_filter_coeffs(&bilinear_filter);
+ alloc_filter_coeffs(&default_downscale_filter);
+ alloc_filter_coeffs(&blur_filter);
+
+ filters_initialized = 1;
+
+ return 0;
+}
+
+void b2r2_filters_exit()
+{
+ int i;
+
+ if (!filters_initialized)
+ return;
+
+ for (i = 0; i < filters_size; i++) {
+ free_filter_coeffs(&filters[i]);
+ }
+
+ free_filter_coeffs(&bilinear_filter);
+ free_filter_coeffs(&default_downscale_filter);
+ free_filter_coeffs(&blur_filter);
+
+ filters_initialized = 0;
+}
+
+struct b2r2_filter_spec *b2r2_filter_find(u16 scale_factor)
+{
+ int i;
+ struct b2r2_filter_spec *filter = NULL;
+
+ for (i = 0; i < filters_size; i++) {
+ if ((filters[i].min < scale_factor) &&
+ (scale_factor <= filters[i].max) &&
+ filters[i].h_coeffs_dma_addr &&
+ filters[i].v_coeffs_dma_addr) {
+ filter = &filters[i];
+ break;
+ }
+ }
+
+ if (filter == NULL) {
+ /*
+ * No suitable filter has been found. Use default filters,
+ * bilinear for any upscale.
+ */
+ if (scale_factor < (1 << 10))
+ filter = &bilinear_filter;
+ else
+ filter = &default_downscale_filter;
+ }
+
+ /*
+ * Check so that the coefficients were successfully allocated for this
+ * filter.
+ */
+ if (!filter->h_coeffs_dma_addr || !filter->v_coeffs_dma_addr)
+ return NULL;
+ else
+ return filter;
+}
+
+struct b2r2_filter_spec *b2r2_filter_blur()
+{
+ return &blur_filter;
+}
+
+/* Private functions */
+static int alloc_filter_coeffs(struct b2r2_filter_spec *filter)
+{
+ int ret;
+
+ filter->h_coeffs_dma_addr = dma_alloc_coherent(b2r2_blt_device(),
+ B2R2_HF_TABLE_SIZE, &(filter->h_coeffs_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (filter->h_coeffs_dma_addr == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ filter->v_coeffs_dma_addr = dma_alloc_coherent(b2r2_blt_device(),
+ B2R2_VF_TABLE_SIZE, &(filter->v_coeffs_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (filter->v_coeffs_dma_addr == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memcpy(filter->h_coeffs_dma_addr, filter->h_coeffs, B2R2_HF_TABLE_SIZE);
+ memcpy(filter->v_coeffs_dma_addr, filter->v_coeffs, B2R2_VF_TABLE_SIZE);
+
+ return 0;
+
+error:
+ free_filter_coeffs(filter);
+ return ret;
+
+}
+
+static void free_filter_coeffs(struct b2r2_filter_spec *filter)
+{
+ if (filter->h_coeffs_dma_addr != NULL)
+ dma_free_coherent(b2r2_blt_device(), B2R2_HF_TABLE_SIZE,
+ filter->h_coeffs_dma_addr,
+ filter->h_coeffs_phys_addr);
+ if (filter->v_coeffs_dma_addr != NULL)
+ dma_free_coherent(b2r2_blt_device(), B2R2_VF_TABLE_SIZE,
+ filter->v_coeffs_dma_addr,
+ filter->v_coeffs_phys_addr);
+
+ filter->h_coeffs_dma_addr = NULL;
+ filter->h_coeffs_phys_addr = 0;
+ filter->v_coeffs_dma_addr = NULL;
+ filter->v_coeffs_phys_addr = 0;
+}
diff --git a/drivers/video/b2r2/b2r2_filters.h b/drivers/video/b2r2/b2r2_filters.h
new file mode 100644
index 00000000000..0eeefc6b0e0
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_filters.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 filters.
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_VIDEO_B2R2_FILTERS_H
+#define _LINUX_VIDEO_B2R2_FILTERS_H
+
+#include <linux/kernel.h>
+
+#define B2R2_HF_TABLE_SIZE 64
+#define B2R2_VF_TABLE_SIZE 40
+
+/**
+ * @struct b2r2_filter_spec - Filter specification structure
+ *
+ * @param min - Minimum scale factor for this filter (in 6.10 fixed point)
+ * @param max - Maximum scale factor for this filter (in 6.10 fixed point)
+ * @param h_coeffs - Horizontal filter coefficients
+ * @param v_coeffs - Vertical filter coefficients
+ * @param h_coeffs_dma_addr - Virtual DMA address for horizontal coefficients
+ * @param v_coeffs_dma_addr - Virtual DMA address for vertical coefficients
+ * @param h_coeffs_phys_addr - Physical address for horizontal coefficients
+ * @param v_coeffs_phys_addr - Physical address for vertical coefficients
+ */
+struct b2r2_filter_spec {
+ const u16 min;
+ const u16 max;
+
+ const u8 h_coeffs[B2R2_HF_TABLE_SIZE];
+ const u8 v_coeffs[B2R2_VF_TABLE_SIZE];
+
+ void *h_coeffs_dma_addr;
+ u32 h_coeffs_phys_addr;
+
+ void *v_coeffs_dma_addr;
+ u32 v_coeffs_phys_addr;
+};
+
+/**
+ * b2r2_filters_init() - Initilizes the B2R2 filters
+ */
+int b2r2_filters_init(void);
+
+/**
+ * b2r2_filters_init() - De-initilizes the B2R2 filters
+ */
+void b2r2_filters_exit(void);
+
+/**
+ * b2r2_filter_find() - Find a filter matching the given scale factor
+ *
+ * @param scale_factor - Scale factor to find a filter for
+ *
+ * Returns NULL if no filter could be found.
+ */
+struct b2r2_filter_spec *b2r2_filter_find(u16 scale_factor);
+
+/**
+ * b2r2_filter_blur() - Returns the blur filter
+ *
+ * Returns NULL if no blur filter is available.
+ */
+struct b2r2_filter_spec *b2r2_filter_blur(void);
+
+#endif /* _LINUX_VIDEO_B2R2_FILTERS_H */
diff --git a/drivers/video/b2r2/b2r2_generic.c b/drivers/video/b2r2/b2r2_generic.c
new file mode 100644
index 00000000000..5941e39be91
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_generic.c
@@ -0,0 +1,3303 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 generic. Full coverage of user interface but
+ * non optimized implementation. For Fallback purposes.
+ *
+ * Author: Maciej Socha <maciej.socha@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+
+#include "b2r2_generic.h"
+#include "b2r2_internal.h"
+#include "b2r2_global.h"
+#include "b2r2_debug.h"
+#include "b2r2_filters.h"
+
+/*
+ * Debug printing
+ */
+#define B2R2_GENERIC_DEBUG_AREAS 0
+#define B2R2_GENERIC_DEBUG
+
+#define B2R2_GENERIC_WORK_BUF_WIDTH 16
+#define B2R2_GENERIC_WORK_BUF_HEIGHT 16
+#define B2R2_GENERIC_WORK_BUF_PITCH (16 * 4)
+#define B2R2_GENERIC_WORK_BUF_FMT B2R2_NATIVE_ARGB8888
+
+/*
+ * Private functions
+ */
+
+/**
+ * reset_nodes() - clears the node list
+ */
+static void reset_nodes(struct b2r2_node *node)
+{
+ b2r2_log_info("%s ENTRY\n", __func__);
+
+ while (node != NULL) {
+ memset(&(node->node), 0, sizeof(node->node));
+
+ /* TODO: Implement support for short linked lists */
+ node->node.GROUP0.B2R2_CIC = 0x7fffc;
+
+ if (node->next == NULL)
+ break;
+
+ node->node.GROUP0.B2R2_NIP = node->next->physical_address;
+
+ node = node->next;
+ }
+ b2r2_log_info("%s DONE\n", __func__);
+}
+
+/**
+ * dump_nodes() - prints the node list
+ */
+static void dump_nodes(struct b2r2_node *first, bool dump_all)
+{
+ struct b2r2_node *node = first;
+ b2r2_log_info("%s ENTRY\n", __func__);
+ do {
+ b2r2_log_debug("\nNODE START:\n=============\n");
+ b2r2_log_debug("B2R2_ACK: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_ACK);
+ b2r2_log_debug("B2R2_INS: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_INS);
+ b2r2_log_debug("B2R2_CIC: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_CIC);
+ b2r2_log_debug("B2R2_NIP: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_NIP);
+
+ b2r2_log_debug("B2R2_TSZ: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TSZ);
+ b2r2_log_debug("B2R2_TXY: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TXY);
+ b2r2_log_debug("B2R2_TTY: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TTY);
+ b2r2_log_debug("B2R2_TBA: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TBA);
+
+ b2r2_log_debug("B2R2_S2CF: \t0x%.8x\n",
+ node->node.GROUP2.B2R2_S2CF);
+ b2r2_log_debug("B2R2_S1CF: \t0x%.8x\n",
+ node->node.GROUP2.B2R2_S1CF);
+
+ b2r2_log_debug("B2R2_S1SZ: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_SSZ);
+ b2r2_log_debug("B2R2_S1XY: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_SXY);
+ b2r2_log_debug("B2R2_S1TY: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_STY);
+ b2r2_log_debug("B2R2_S1BA: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_SBA);
+
+ b2r2_log_debug("B2R2_S2SZ: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_SSZ);
+ b2r2_log_debug("B2R2_S2XY: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_SXY);
+ b2r2_log_debug("B2R2_S2TY: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_STY);
+ b2r2_log_debug("B2R2_S2BA: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_SBA);
+
+ b2r2_log_debug("B2R2_S3SZ: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_SSZ);
+ b2r2_log_debug("B2R2_S3XY: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_SXY);
+ b2r2_log_debug("B2R2_S3TY: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_STY);
+ b2r2_log_debug("B2R2_S3BA: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_SBA);
+
+ b2r2_log_debug("B2R2_CWS: \t0x%.8x\n",
+ node->node.GROUP6.B2R2_CWS);
+ b2r2_log_debug("B2R2_CWO: \t0x%.8x\n",
+ node->node.GROUP6.B2R2_CWO);
+
+ b2r2_log_debug("B2R2_FCTL: \t0x%.8x\n",
+ node->node.GROUP8.B2R2_FCTL);
+ b2r2_log_debug("B2R2_RSF: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_RSF);
+ b2r2_log_debug("B2R2_RZI: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_RZI);
+ b2r2_log_debug("B2R2_HFP: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_HFP);
+ b2r2_log_debug("B2R2_VFP: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_VFP);
+ b2r2_log_debug("B2R2_LUMA_RSF: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_RSF);
+ b2r2_log_debug("B2R2_LUMA_RZI: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_RZI);
+ b2r2_log_debug("B2R2_LUMA_HFP: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_HFP);
+ b2r2_log_debug("B2R2_LUMA_VFP: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_VFP);
+
+
+ b2r2_log_debug("B2R2_IVMX0: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX0);
+ b2r2_log_debug("B2R2_IVMX1: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX1);
+ b2r2_log_debug("B2R2_IVMX2: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX2);
+ b2r2_log_debug("B2R2_IVMX3: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX3);
+ b2r2_log_debug("\n=============\nNODE END\n");
+
+ node = node->next;
+ } while (node != NULL && dump_all);
+
+ b2r2_log_info("%s DONE\n", __func__);
+}
+
+/**
+ * to_native_fmt() - returns the native B2R2 format
+ */
+static inline enum b2r2_native_fmt to_native_fmt(enum b2r2_blt_fmt fmt)
+{
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_UNUSED:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return B2R2_NATIVE_A1;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return B2R2_NATIVE_A8;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return B2R2_NATIVE_ARGB4444;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ return B2R2_NATIVE_ARGB1555;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ return B2R2_NATIVE_ARGB8565;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ return B2R2_NATIVE_RGB888;
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ return B2R2_NATIVE_YCBCR888;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Not actually supported by HW */
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ return B2R2_NATIVE_ARGB8888;
+ case B2R2_BLT_FMT_32_BIT_VUYA8888: /* fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ return B2R2_NATIVE_AYCBCR8888;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return B2R2_NATIVE_YCBCR42X_R2B;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return B2R2_NATIVE_YCBCR42X_MBN;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return B2R2_NATIVE_YUV;
+ default:
+ /* Should never ever happen */
+ return B2R2_NATIVE_BYTE;
+ }
+}
+
+/**
+ * get_alpha_range() - returns the alpha range of the given format
+ */
+static inline enum b2r2_ty get_alpha_range(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ return B2R2_TY_ALPHA_RANGE_255; /* 0 - 255 */
+ break;
+ default:
+ break;
+ }
+
+ return B2R2_TY_ALPHA_RANGE_128; /* 0 - 128 */
+}
+
+static unsigned int get_pitch(enum b2r2_blt_fmt format, u32 width)
+{
+ switch (format) {
+ case B2R2_BLT_FMT_1_BIT_A1: {
+ int pitch = width >> 3;
+ /* Check for remainder */
+ if (width & 7)
+ pitch++;
+ return pitch;
+ break;
+ }
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return width;
+ break;
+ case B2R2_BLT_FMT_16_BIT_RGB565: /* all 16 bits/pixel RGB formats */
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return width * 2;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888: /* all 24 bits/pixel raster formats */
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ return width * 3;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ARGB8888: /* all 32 bits/pixel formats */
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ return width * 4;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ /* width of the buffer must be a multiple of 4 */
+ if (width & 3) {
+ b2r2_log_warn("%s: Illegal width "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+ return width * 2;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return width;
+ break;
+ /* fall through, same pitch and pointers */
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ /* width of the buffer must be a multiple of 2 */
+ if (width & 1) {
+ b2r2_log_warn("%s: Illegal width "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+ /*
+ * return pitch of the Y-buffer.
+ * U and V pitch can be derived from it.
+ */
+ return width;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /* width of the buffer must be a multiple of 16. */
+ if (width & 15) {
+ b2r2_log_warn("%s: Illegal width "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+ /*
+ * return pitch of the Y-buffer.
+ * U and V pitch can be derived from it.
+ */
+ return width;
+ break;
+ default:
+ b2r2_log_warn("%s: Unable to determine pitch "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+}
+
+static s32 validate_buf(const struct b2r2_blt_img *image,
+ const struct b2r2_resolved_buf *buf)
+{
+ u32 expect_buf_size;
+ u32 pitch;
+
+ if (image->width <= 0 || image->height <= 0) {
+ b2r2_log_warn("%s: width=%d or height=%d negative.\n", __func__,
+ image->width, image->height);
+ return -EINVAL;
+ }
+
+ if (image->pitch == 0) {
+ /* autodetect pitch based on format and width */
+ pitch = get_pitch(image->fmt, image->width);
+ } else
+ pitch = image->pitch;
+
+ expect_buf_size = pitch * image->height;
+
+ if (pitch == 0) {
+ b2r2_log_warn("%s: Unable to detect pitch. "
+ "fmt=%#010x, width=%d\n",
+ __func__,
+ image->fmt, image->width);
+ return -EINVAL;
+ }
+
+ /* format specific adjustments */
+ switch (image->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ /*
+ * Use ceil(height/2) in case buffer height
+ * is not divisible by 2.
+ */
+ expect_buf_size +=
+ (pitch >> 1) * ((image->height + 1) >> 1) * 2;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ expect_buf_size += (pitch >> 1) * image->height * 2;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ expect_buf_size += pitch * image->height * 2;
+ break;
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ /*
+ * include space occupied by U and V data.
+ * U and V interleaved, half resolution, which makes
+ * the UV pitch equal to luma pitch.
+ * Use ceil(height/2) in case buffer height
+ * is not divisible by 2.
+ */
+ expect_buf_size += pitch * ((image->height + 1) >> 1);
+ break;
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ /*
+ * include space occupied by U and V data.
+ * U and V interleaved, half resolution, which makes
+ * the UV pitch equal to luma pitch.
+ */
+ expect_buf_size += pitch * image->height;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /* Height must be a multiple of 16 for macro-block format.*/
+ if (image->height & 15) {
+ b2r2_log_warn("%s: Illegal height "
+ "for fmt=%#010x height=%d\n", __func__,
+ image->fmt, image->height);
+ return -EINVAL;
+ }
+ expect_buf_size += pitch * (image->height >> 1);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /* Height must be a multiple of 16 for macro-block format.*/
+ if (image->height & 15) {
+ b2r2_log_warn("%s: Illegal height "
+ "for fmt=%#010x height=%d\n", __func__,
+ image->fmt, image->height);
+ return -EINVAL;
+ }
+ expect_buf_size += pitch * image->height;
+ break;
+ default:
+ break;
+ }
+
+ if (buf->file_len < expect_buf_size) {
+ b2r2_log_warn("%s: Invalid buffer size:\n"
+ "fmt=%#010x w=%d h=%d buf.len=%d expect_buf_size=%d\n",
+ __func__,
+ image->fmt, image->width, image->height, buf->file_len,
+ expect_buf_size);
+ return -EINVAL;
+ }
+
+ if (image->buf.type == B2R2_BLT_PTR_VIRTUAL) {
+ b2r2_log_warn("%s: Virtual pointers not supported yet.\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Bit-expand the color from fmt to RGB888 with blue at LSB.
+ * Copy MSBs into missing LSBs.
+ */
+static u32 to_RGB888(u32 color, const enum b2r2_blt_fmt fmt)
+{
+ u32 out_color = 0;
+ u32 r = 0;
+ u32 g = 0;
+ u32 b = 0;
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ r = ((color & 0xf00) << 12) | ((color & 0xf00) << 8);
+ g = ((color & 0xf0) << 8) | ((color & 0xf0) << 4);
+ b = ((color & 0xf) << 4) | (color & 0xf);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ r = ((color & 0x7c00) << 9) | ((color & 0x7000) << 4);
+ g = ((color & 0x3e0) << 6) | ((color & 0x380) << 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ out_color = color & 0xffffff;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ r = (color & 0xff) << 16;
+ g = color & 0xff00;
+ b = (color & 0xff0000) >> 16;
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ default:
+ break;
+ }
+
+ return out_color;
+}
+
+
+static void setup_fill_input_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf)
+{
+ enum b2r2_native_fmt fill_fmt = 0;
+ u32 src_color = req->user_req.src_color;
+ const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
+ b2r2_log_info("%s ENTRY\n", __func__);
+
+ /* Determine format in src_color */
+ switch (dst_img->fmt) {
+ /* ARGB formats */
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL) != 0) {
+ fill_fmt = B2R2_NATIVE_ARGB8888;
+ } else {
+ /* SOURCE_FILL_RAW */
+ fill_fmt = to_native_fmt(dst_img->fmt);
+ if (dst_img->fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) {
+ /*
+ * Color is read from a register,
+ * where it is stored in ABGR format.
+ * Set up IVMX.
+ */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_BGR;
+ }
+ }
+ break;
+ /* YUV formats */
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL) != 0) {
+ fill_fmt = B2R2_NATIVE_AYCBCR8888;
+ /*
+ * Set up IVMX
+ * The destination format is in fact YUV,
+ * but the input stage stores the data in
+ * an intermediate buffer which is RGB.
+ * Hence the conversion from YUV to RGB.
+ * Format of the supplied src_color is
+ * B2R2_BLT_FMT_32_BIT_AYUV8888.
+ */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO;
+ } else {
+ /* SOURCE_FILL_RAW */
+ bool dst_yuv_planar =
+ B2R2_BLT_FMT_YUV420_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV422_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV444_PACKED_PLANAR ==
+ dst_img->fmt;
+
+ bool dst_yuv_semi_planar =
+ B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE ==
+ dst_img->fmt;
+
+ if (dst_yuv_planar || dst_yuv_semi_planar) {
+ /*
+ * SOURCE_FILL_RAW cannot be supported
+ * with multi-buffer formats.
+ * Force a legal format to prevent B2R2
+ * from misbehaving.
+ */
+ fill_fmt = B2R2_NATIVE_AYCBCR8888;
+ } else {
+ fill_fmt = to_native_fmt(dst_img->fmt);
+ }
+
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO;
+ /*
+ * Re-arrange the color components from
+ * VUY(A) to (A)YUV
+ */
+ if (dst_img->fmt ==
+ B2R2_BLT_FMT_24_BIT_VUY888) {
+ u32 Y = src_color & 0xff;
+ u32 U = src_color & 0xff00;
+ u32 V = src_color & 0xff0000;
+ src_color = (Y << 16) | U | (V >> 16);
+ } else if (dst_img->fmt ==
+ B2R2_BLT_FMT_32_BIT_VUYA8888) {
+ u32 A = src_color & 0xff;
+ u32 Y = src_color & 0xff00;
+ u32 U = src_color & 0xff0000;
+ u32 V = src_color & 0xff000000;
+ src_color = (A << 24) |
+ (Y << 8) |
+ (U >> 8) |
+ (V >> 24);
+ }
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ /*
+ * Setup input VMX to convert YVU to
+ * RGB 601 VIDEO
+ * Chroma components are swapped so
+ * it is YVU and not YUV.
+ */
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ break;
+ default:
+ /*
+ * Set up IVMX
+ * The destination format is in fact YUV,
+ * but the input stage stores the data in
+ * an intermediate buffer which is RGB.
+ * Hence the conversion from YUV to RGB.
+ */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ break;
+ }
+ }
+ break;
+ default:
+ src_color = 0;
+ fill_fmt = B2R2_NATIVE_ARGB8888;
+ break;
+ }
+
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+ /* Set color fill on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = 0;
+ node->node.GROUP4.B2R2_STY =
+ (0 << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ fill_fmt |
+ get_alpha_range(dst_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_FILL;
+ node->node.GROUP2.B2R2_S2CF = src_color;
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+ b2r2_log_info("%s DONE\n", __func__);
+}
+
+static void setup_input_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf)
+{
+ /* Horizontal and vertical scaling factors in 6.10 fixed point format */
+ s32 h_scf = 1 << 10;
+ s32 v_scf = 1 << 10;
+ const struct b2r2_blt_rect *src_rect = &(req->user_req.src_rect);
+ const struct b2r2_blt_rect *dst_rect = &(req->user_req.dst_rect);
+ const struct b2r2_blt_img *src_img = &(req->user_req.src_img);
+ u32 src_pitch = 0;
+ /* horizontal and vertical scan order for out_buf */
+ enum b2r2_ty dst_hso = B2R2_TY_HSO_LEFT_TO_RIGHT;
+ enum b2r2_ty dst_vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+ u32 endianness = 0;
+ u32 fctl = 0;
+ u32 rsf = 0;
+ u32 rzi = 0;
+ bool yuv_semi_planar =
+ src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ bool yuv_planar =
+ src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+
+ struct b2r2_filter_spec *hf;
+ struct b2r2_filter_spec *vf;
+
+ bool use_h_filter = false;
+ bool use_v_filter = false;
+
+ b2r2_log_info("%s ENTRY\n", __func__);
+
+ if (((B2R2_BLT_FLAG_SOURCE_FILL | B2R2_BLT_FLAG_SOURCE_FILL_RAW) &
+ req->user_req.flags) != 0) {
+ setup_fill_input_stage(req, node, out_buf);
+ b2r2_log_info("%s DONE\n", __func__);
+ return;
+ }
+
+ if (src_img->pitch == 0) {
+ /* Determine pitch based on format and width of the image. */
+ src_pitch = get_pitch(src_img->fmt, src_img->width);
+ } else {
+ src_pitch = src_img->pitch;
+ }
+
+ b2r2_log_info("%s transform=%#010x\n",
+ __func__, req->user_req.transform);
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ h_scf = (src_rect->width << 10) / dst_rect->height;
+ v_scf = (src_rect->height << 10) / dst_rect->width;
+ } else {
+ h_scf = (src_rect->width << 10) / dst_rect->width;
+ v_scf = (src_rect->height << 10) / dst_rect->height;
+ }
+
+ hf = b2r2_filter_find(h_scf);
+ vf = b2r2_filter_find(v_scf);
+
+ use_h_filter = h_scf != (1 << 10);
+ use_v_filter = v_scf != (1 << 10);
+
+ /* B2R2_BLT_FLAG_BLUR overrides any scaling filter. */
+ if (req->user_req.flags & B2R2_BLT_FLAG_BLUR) {
+ use_h_filter = true;
+ use_v_filter = true;
+ hf = b2r2_filter_blur();
+ vf = b2r2_filter_blur();
+ }
+
+ /* Configure horizontal rescale */
+ if (h_scf != (1 << 10)) {
+ b2r2_log_info("%s: Scaling horizontally by 0x%.8x"
+ "\ns(%d, %d)->d(%d, %d)\n", __func__,
+ h_scf, src_rect->width, src_rect->height,
+ dst_rect->width, dst_rect->height);
+ }
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= h_scf << B2R2_RSF_HSRC_INC_SHIFT;
+ rzi |= B2R2_RZI_DEFAULT_HNB_REPEAT;
+
+ /* Configure vertical rescale */
+ if (v_scf != (1 << 10)) {
+ b2r2_log_info("%s: Scaling vertically by 0x%.8x"
+ "\ns(%d, %d)->d(%d, %d)\n", __func__,
+ v_scf, src_rect->width, src_rect->height,
+ dst_rect->width, dst_rect->height);
+ }
+ fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ rzi |= 2 << B2R2_RZI_VNB_REPEAT_SHIFT;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_CHROMA;
+
+ /* Adjustments that depend on the source format */
+ switch (src_img->fmt) {
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_BGR;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ /*
+ * Setup input VMX to convert YVU to RGB 601 VIDEO
+ * Chroma components are swapped so
+ * it is YVU and not YUV.
+ */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ /*
+ * Set up IVMX.
+ * For B2R2_BLT_FMT_32_BIT_YUV888 and
+ * B2R2_BLT_FMT_32_BIT_AYUV8888
+ * the color components are laid out in memory as V, U, Y, (A)
+ * with V at the first byte (due to little endian addressing).
+ * B2R2 expects them to be as U, Y, V, (A)
+ * with U at the first byte.
+ */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO;
+
+ /*
+ * Re-arrange color components from VUY(A) to (A)YUV
+ * for input VMX to work on them further.
+ */
+ if (src_img->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ src_img->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: {
+ /*
+ * Luma handled in the same way
+ * for all YUV multi-buffer formats.
+ * Set luma rescale registers.
+ */
+ u32 rsf_luma = 0;
+ u32 rzi_luma = 0;
+
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_IVMX_ENABLED | B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_IVMX | B2R2_CIC_RESIZE_LUMA;
+
+ if (src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_img->fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ } else {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ }
+
+ fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+
+ if (use_h_filter && hf) {
+ fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER;
+ node->node.GROUP10.B2R2_HFP = hf->h_coeffs_phys_addr;
+ }
+
+ if (use_v_filter && vf) {
+ fctl |= B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER;
+ node->node.GROUP10.B2R2_VFP = vf->v_coeffs_phys_addr;
+ }
+
+ rsf_luma |= h_scf << B2R2_RSF_HSRC_INC_SHIFT;
+ rzi_luma |= B2R2_RZI_DEFAULT_HNB_REPEAT;
+
+ rsf_luma |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ rzi_luma |= 2 << B2R2_RZI_VNB_REPEAT_SHIFT;
+
+ node->node.GROUP10.B2R2_RSF = rsf_luma;
+ node->node.GROUP10.B2R2_RZI = rzi_luma;
+
+ switch (src_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (h_scf >> 1) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (v_scf >> 1) << B2R2_RSF_VSRC_INC_SHIFT;
+ /* Select suitable filter for chroma */
+ hf = b2r2_filter_find(h_scf >> 1);
+ vf = b2r2_filter_find(v_scf >> 1);
+ use_h_filter = true;
+ use_v_filter = true;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (h_scf >> 1) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ /* Select suitable filter for chroma */
+ hf = b2r2_filter_find(h_scf >> 1);
+ use_h_filter = true;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /* Chrominance is the same size as luminance.*/
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= h_scf << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ /* Select suitable filter for chroma */
+ hf = b2r2_filter_find(h_scf);
+ vf = b2r2_filter_find(v_scf);
+ use_h_filter = true;
+ use_v_filter = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ /*
+ * Set the filter control and rescale registers.
+ * GROUP9 registers are used for all single-buffer formats
+ * or for chroma in case of multi-buffer YUV formats.
+ * h/v_filter is now appropriately selected for chroma scaling,
+ * be it YUV multi-buffer, or single-buffer raster format.
+ * B2R2_BLT_FLAG_BLUR overrides any scaling filter.
+ */
+ if (req->user_req.flags & B2R2_BLT_FLAG_BLUR) {
+ use_h_filter = true;
+ use_v_filter = true;
+ hf = b2r2_filter_blur();
+ vf = b2r2_filter_blur();
+ }
+
+ if (use_h_filter && hf) {
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ node->node.GROUP9.B2R2_HFP = hf->h_coeffs_phys_addr;
+ }
+
+ if (use_v_filter && vf) {
+ fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ node->node.GROUP9.B2R2_VFP = vf->v_coeffs_phys_addr;
+ }
+
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+ node->node.GROUP9.B2R2_RSF |= rsf;
+ node->node.GROUP9.B2R2_RZI |= rzi;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_FILTER_CONTROL;
+
+ /*
+ * Flip transform is done before potential rotation.
+ * This can be achieved with appropriate scan order.
+ * Transform stage will only do rotation.
+ */
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ dst_hso = B2R2_TY_HSO_RIGHT_TO_LEFT;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ dst_vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ dst_hso | dst_vso;
+
+ if (yuv_planar) {
+ /*
+ * Set up chrominance buffers on source 1 and 2,
+ * luminance on source 3.
+ * src_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ */
+ u32 cb_addr = 0;
+ u32 cr_addr = 0;
+ u32 chroma_pitch = 0;
+ bool swapped_chroma =
+ src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
+ enum b2r2_native_fmt src_fmt = to_native_fmt(src_img->fmt);
+
+ if (swapped_chroma)
+ cr_addr = req->src_resolved.physical_address +
+ src_pitch * src_img->height;
+ else
+ cb_addr = req->src_resolved.physical_address +
+ src_pitch * src_img->height;
+
+ switch (src_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ chroma_pitch = src_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ (src_img->height >> 1);
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ (src_img->height >> 1);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ chroma_pitch = src_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ src_img->height;
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ src_img->height;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /* Chrominance has full resolution, same as luminance.*/
+ chroma_pitch = src_pitch;
+ cr_addr =
+ cb_addr + chroma_pitch * src_img->height;
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP3.B2R2_SBA = cr_addr;
+ node->node.GROUP3.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP4.B2R2_SBA = cb_addr;
+ node->node.GROUP4.B2R2_STY = node->node.GROUP3.B2R2_STY;
+
+ node->node.GROUP5.B2R2_SBA = req->src_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_1 |
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_SOURCE_3;
+ } else if (yuv_semi_planar) {
+ /*
+ * Set up chrominance buffer on source 2, luminance on source 3.
+ * src_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ * U and V are interleaved at half the luminance resolution,
+ * which makes the pitch of the UV plane equal
+ * to luminance pitch.
+ */
+ u32 chroma_addr = req->src_resolved.physical_address +
+ src_pitch * src_img->height;
+ u32 chroma_pitch = src_pitch;
+
+ enum b2r2_native_fmt src_fmt = to_native_fmt(src_img->fmt);
+
+ node->node.GROUP4.B2R2_SBA = chroma_addr;
+ node->node.GROUP4.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP5.B2R2_SBA = req->src_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_2 | B2R2_CIC_SOURCE_3;
+ } else {
+ /* single buffer format */
+ node->node.GROUP4.B2R2_SBA = req->src_resolved.physical_address;
+ node->node.GROUP4.B2R2_STY =
+ (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ to_native_fmt(src_img->fmt) |
+ get_alpha_range(src_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ endianness;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ }
+
+ if ((req->user_req.flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CLUTOP_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLUT;
+ node->node.GROUP7.B2R2_CCO = B2R2_CCO_CLUT_COLOR_CORRECTION |
+ B2R2_CCO_CLUT_UPDATE;
+ node->node.GROUP7.B2R2_CML = req->clut_phys_addr;
+ }
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ b2r2_log_info("%s DONE\n", __func__);
+}
+
+static void setup_transform_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf,
+ struct b2r2_work_buf *in_buf)
+{
+ /* vertical scan order for out_buf */
+ enum b2r2_ty dst_vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+ enum b2r2_blt_transform transform = req->user_req.transform;
+
+ b2r2_log_info("%s ENTRY\n", __func__);
+
+ if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ /*
+ * Scan order must be flipped otherwise contents will
+ * be mirrored vertically. Leftmost column of in_buf
+ * would become top instead of bottom row of out_buf.
+ */
+ dst_vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_ROTATION_ENABLED;
+ }
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT | dst_vso;
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ b2r2_log_info("%s DONE\n", __func__);
+}
+
+/*
+static void setup_mask_stage(const struct b2r2_blt_request req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf,
+ struct b2r2_work_buf *in_buf);
+*/
+
+static void setup_dst_read_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf)
+{
+ const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
+ u32 fctl = 0;
+ u32 rsf = 0;
+ u32 endianness = 0;
+ bool yuv_semi_planar =
+ dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ bool yuv_planar =
+ dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+
+ u32 dst_pitch = 0;
+ if (dst_img->pitch == 0) {
+ /* Determine pitch based on format and width of the image. */
+ dst_pitch = get_pitch(dst_img->fmt, dst_img->width);
+ } else {
+ dst_pitch = dst_img->pitch;
+ }
+
+ b2r2_log_info("%s ENTRY\n", __func__);
+
+ /* Adjustments that depend on the destination format */
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_BGR;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ /*
+ * Setup input VMX to convert YVU to RGB 601 VIDEO
+ * Chroma components are swapped
+ * so it is YVU and not YUV.
+ */
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ /*
+ * Set up IVMX.
+ * For B2R2_BLT_FMT_32_BIT_YUV888 and
+ * B2R2_BLT_FMT_32_BIT_AYUV8888
+ * the color components are laid out in memory as V, U, Y, (A)
+ * with V at the first byte (due to little endian addressing).
+ * B2R2 expects them to be as U, Y, V, (A)
+ * with U at the first byte.
+ */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO;
+
+ /*
+ * Re-arrange color components from VUY(A) to (A)YUV
+ * for input VMX to work on them further.
+ */
+ if (dst_img->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_img->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: {
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+
+ if (dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_img->fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ } else {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ }
+
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (1 << 9) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 9) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (1 << 9) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /* Chrominance is the same size as luminance.*/
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ default:
+ break;
+ }
+ /* Set the filter control and rescale registers for chroma */
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+ node->node.GROUP9.B2R2_RSF |= rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_FILTER_CONTROL | B2R2_CIC_RESIZE_CHROMA;
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ if (yuv_planar) {
+ /*
+ * Set up chrominance buffers on source 1 and 2,
+ * luminance on source 3.
+ * dst_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ */
+ u32 cb_addr = 0;
+ u32 cr_addr = 0;
+ u32 chroma_pitch = 0;
+ bool swapped_chroma =
+ dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(dst_img->fmt);
+
+ if (swapped_chroma)
+ cr_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ else
+ cb_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ dst_img->height;
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ dst_img->height;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ chroma_pitch = dst_pitch;
+ cr_addr =
+ cb_addr + chroma_pitch * dst_img->height;
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP3.B2R2_SBA = cr_addr;
+ node->node.GROUP3.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP4.B2R2_SBA = cb_addr;
+ node->node.GROUP4.B2R2_STY = node->node.GROUP3.B2R2_STY;
+
+ node->node.GROUP5.B2R2_SBA = req->dst_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_1 |
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_SOURCE_3;
+ } else if (yuv_semi_planar) {
+ /*
+ * Set up chrominance buffer on source 2, luminance on source 3.
+ * dst_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ * U and V are interleaved at half the luminance resolution,
+ * which makes the pitch of the UV plane equal
+ * to luminance pitch.
+ */
+ u32 chroma_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ u32 chroma_pitch = dst_pitch;
+
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(dst_img->fmt);
+
+ node->node.GROUP4.B2R2_SBA = chroma_addr;
+ node->node.GROUP4.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP5.B2R2_SBA = req->dst_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_2 | B2R2_CIC_SOURCE_3;
+ } else {
+ /* single buffer format */
+ node->node.GROUP4.B2R2_SBA = req->dst_resolved.physical_address;
+ node->node.GROUP4.B2R2_STY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ to_native_fmt(dst_img->fmt) |
+ get_alpha_range(dst_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ endianness;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ }
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ b2r2_log_info("%s DONE\n", __func__);
+}
+
+static void setup_blend_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *bg_buf,
+ struct b2r2_work_buf *fg_buf)
+{
+ u32 global_alpha = req->user_req.global_alpha;
+ b2r2_log_info("%s ENTRY\n", __func__);
+
+ node->node.GROUP0.B2R2_ACK = 0;
+
+ if (req->user_req.flags &
+ (B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND |
+ B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)) {
+ /* Some kind of blending needs to be done. */
+ if (req->user_req.flags & B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT)
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_MODE_BLEND_NOT_PREMULT;
+ else
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_MODE_BLEND_PREMULT;
+
+ /*
+ * global_alpha register accepts 0..128 range,
+ * global_alpha in the request is 0..255, remap needed.
+ */
+ if (req->user_req.flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) {
+ if (global_alpha == 255)
+ global_alpha = 128;
+ else
+ global_alpha >>= 1;
+ } else {
+ /*
+ * Use solid global_alpha
+ * if global alpha blending is not set.
+ */
+ global_alpha = 128;
+ }
+
+ node->node.GROUP0.B2R2_ACK |=
+ global_alpha << (B2R2_ACK_GALPHA_ROPID_SHIFT);
+
+ /* Set background on SRC1 channel */
+ node->node.GROUP3.B2R2_SBA = bg_buf->phys_addr;
+ node->node.GROUP3.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ /* Set foreground on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = fg_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = bg_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_1 |
+ B2R2_CIC_SOURCE_2;
+ } else {
+ /*
+ * No blending, foreground goes on SRC2. No global alpha.
+ * EMACSOC TODO: The blending stage should be skipped altogether
+ * if no blending is to be done. Probably could go directly from
+ * transform to writeback.
+ */
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+
+ node->node.GROUP4.B2R2_SBA = fg_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP1.B2R2_TBA = bg_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+ }
+
+ b2r2_log_info("%s DONE\n", __func__);
+}
+
+static void setup_writeback_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *in_buf)
+{
+ const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
+ const enum b2r2_blt_fmt dst_fmt = dst_img->fmt;
+ const bool yuv_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+
+ const bool yuv_semi_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ const u32 group4_b2r2_sty =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ u32 dst_dither = 0;
+ u32 dst_pitch = 0;
+ u32 endianness = 0;
+
+ b2r2_log_info("%s ENTRY\n", __func__);
+
+ if (dst_img->pitch == 0) {
+ /* Determine pitch based on format and width of the image. */
+ dst_pitch = get_pitch(dst_img->fmt, dst_img->width);
+ } else
+ dst_pitch = dst_img->pitch;
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_DITHER) != 0)
+ dst_dither = B2R2_TTY_RGB_ROUND_DITHER;
+
+ /* Set target buffer(s) */
+ if (yuv_planar_dst) {
+ /*
+ * three nodes required to write the output.
+ * Luma, blue chroma and red chroma.
+ */
+ u32 fctl = 0;
+ u32 rsf = 0;
+ const u32 group0_b2r2_ins =
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_RECT_CLIP_ENABLED |
+ B2R2_INS_IVMX_ENABLED;
+ const u32 group0_b2r2_cic =
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_CLIP_WINDOW |
+ B2R2_CIC_IVMX;
+
+ u32 cb_addr = 0;
+ u32 cr_addr = 0;
+ u32 chroma_pitch = 0;
+ bool swapped_chroma =
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(dst_img->fmt);
+ enum b2r2_ty alpha_range = get_alpha_range(dst_img->fmt);
+
+ if (swapped_chroma)
+ cr_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ else
+ cb_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ dst_img->height;
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ dst_img->height;
+ /*
+ * YUV422 or YVU422
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ chroma_pitch = dst_pitch;
+ cr_addr =
+ cb_addr + chroma_pitch * dst_img->height;
+ /*
+ * No scaling required since
+ * chrominance is not subsampled.
+ */
+ default:
+ break;
+ }
+
+ /* Luma (Y-component) */
+ node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
+ node->node.GROUP1.B2R2_TTY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+
+ /* bypass ALU, no blending here. Handled in its own stage. */
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+
+ /* Blue chroma (U-component)*/
+ node = node->next;
+ node->node.GROUP1.B2R2_TBA = cb_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ B2R2_TTY_CHROMA_NOT_LUMA;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+ if (dst_fmt != B2R2_BLT_FMT_YUV444_PACKED_PLANAR) {
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_FILTER_CONTROL |
+ B2R2_CIC_RESIZE_CHROMA;
+ /* Set the filter control and rescale registers */
+ node->node.GROUP8.B2R2_FCTL = fctl;
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ }
+
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+
+
+ /*
+ * Red chroma (V-component)
+ * The flag B2R2_TTY_CB_NOT_CR actually works
+ * the other way around, i.e. as if it was
+ * CR_NOT_CB.
+ */
+ node = node->next;
+ node->node.GROUP1.B2R2_TBA = cr_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TTY_CB_NOT_CR |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ B2R2_TTY_CHROMA_NOT_LUMA;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+ if (dst_fmt != B2R2_BLT_FMT_YUV444_PACKED_PLANAR) {
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_FILTER_CONTROL |
+ B2R2_CIC_RESIZE_CHROMA;
+ /* Set the filter control and rescale registers */
+ node->node.GROUP8.B2R2_FCTL = fctl;
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ }
+
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+ } else if (yuv_semi_planar_dst) {
+ /*
+ * two nodes required to write the output.
+ * One node for luma and one for interleaved chroma
+ * components.
+ */
+ u32 fctl = 0;
+ u32 rsf = 0;
+ const u32 group0_b2r2_ins =
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_RECT_CLIP_ENABLED |
+ B2R2_INS_IVMX_ENABLED;
+ const u32 group0_b2r2_cic =
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_CLIP_WINDOW |
+ B2R2_CIC_IVMX;
+
+ u32 chroma_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ u32 chroma_pitch = dst_pitch;
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(dst_img->fmt);
+ enum b2r2_ty alpha_range = get_alpha_range(dst_img->fmt);
+
+ if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt ==
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR) {
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ } else {
+ /*
+ * YUV422
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ }
+
+ /* Luma (Y-component) */
+ node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
+ node->node.GROUP1.B2R2_TTY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither;
+
+ if (dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YVU_601_VIDEO;
+ } else {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+ }
+
+ /* bypass ALU, no blending here. Handled in its own stage. */
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+
+ /* Chroma (UV-components)*/
+ node = node->next;
+ node->node.GROUP1.B2R2_TBA = chroma_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ B2R2_TTY_CHROMA_NOT_LUMA;
+
+ if (dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YVU_601_VIDEO;
+ } else {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+ }
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS =
+ group0_b2r2_ins | B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic |
+ B2R2_CIC_FILTER_CONTROL |
+ B2R2_CIC_RESIZE_CHROMA;
+
+ /* Set the filter control and rescale registers */
+ node->node.GROUP8.B2R2_FCTL = fctl;
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+ } else {
+ /* single buffer target */
+
+ /* Set up OVMX */
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_OVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_OVMX;
+ node->node.GROUP16.B2R2_VMX0 = B2R2_VMX0_RGB_TO_BGR;
+ node->node.GROUP16.B2R2_VMX1 = B2R2_VMX1_RGB_TO_BGR;
+ node->node.GROUP16.B2R2_VMX2 = B2R2_VMX2_RGB_TO_BGR;
+ node->node.GROUP16.B2R2_VMX3 = B2R2_VMX3_RGB_TO_BGR;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_OVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_OVMX;
+ node->node.GROUP16.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YVU_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_24_BIT_YUV888: /* fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888: /* fall through */
+ case B2R2_BLT_FMT_24_BIT_VUY888: /* fall through */
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_OVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_OVMX;
+ node->node.GROUP16.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_BLT_YUV888_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_BLT_YUV888_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_BLT_YUV888_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_BLT_YUV888_601_VIDEO;
+
+ /*
+ * Re-arrange color components from (A)YUV to VUY(A)
+ * when bytes are stored in memory.
+ */
+ if (dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
+ node->node.GROUP1.B2R2_TTY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ to_native_fmt(dst_img->fmt) |
+ get_alpha_range(dst_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ endianness;
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_RECT_CLIP_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_2 | B2R2_CIC_CLIP_WINDOW;
+
+ if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) {
+ u32 key_color = 0;
+
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT |
+ B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CKEY_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_KEY;
+
+ key_color = to_RGB888(req->user_req.src_color,
+ req->user_req.src_img.fmt);
+ node->node.GROUP12.B2R2_KEY1 = key_color;
+ node->node.GROUP12.B2R2_KEY2 = key_color;
+ }
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+ }
+ /*
+ * Writeback is the last stage. Terminate the program chain
+ * to prevent out-of-control B2R2 execution.
+ */
+ node->node.GROUP0.B2R2_NIP = 0;
+
+ b2r2_log_info("%s DONE\n", __func__);
+}
+
+/*
+ * Public functions
+ */
+void b2r2_generic_init()
+{
+ b2r2_filters_init();
+}
+
+void b2r2_generic_exit(void)
+{
+ b2r2_filters_exit();
+}
+
+int b2r2_generic_analyze(const struct b2r2_blt_request *req,
+ s32 *work_buf_width,
+ s32 *work_buf_height,
+ u32 *work_buf_count,
+ u32 *node_count)
+{
+ /*
+ * Need at least 4 nodes, read or fill input, read dst, blend
+ * and write back the result */
+ u32 n_nodes = 4;
+ /* Need at least 2 bufs, 1 for blend output and 1 for input */
+ u32 n_work_bufs = 2;
+ /* Horizontal and vertical scaling factors in 6.10 fixed point format */
+ s32 h_scf = 1 << 10;
+ s32 v_scf = 1 << 10;
+ enum b2r2_blt_fmt dst_fmt = 0;
+ bool is_src_fill = false;
+ bool yuv_planar_dst;
+ bool yuv_semi_planar_dst;
+
+ struct b2r2_blt_rect src_rect;
+ struct b2r2_blt_rect dst_rect;
+
+ if (req == NULL || work_buf_width == NULL || work_buf_height == NULL ||
+ work_buf_count == NULL || node_count == NULL) {
+ b2r2_log_warn("%s: Invalid in or out pointers:\n"
+ "req=0x%p\n"
+ "work_buf_width=0x%p work_buf_height=0x%p "
+ "work_buf_count=0x%p\n"
+ "node_count=0x%p.\n",
+ __func__,
+ req,
+ work_buf_width, work_buf_height,
+ work_buf_count,
+ node_count);
+ return -EINVAL;
+ }
+
+ dst_fmt = req->user_req.dst_img.fmt;
+
+ is_src_fill = (req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0;
+
+ yuv_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+ yuv_semi_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ *node_count = 0;
+ *work_buf_width = 0;
+ *work_buf_height = 0;
+ *work_buf_count = 0;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ n_nodes++;
+ n_work_bufs++;
+ }
+
+ if ((yuv_planar_dst || yuv_semi_planar_dst) &&
+ (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW)) {
+ b2r2_log_warn("%s: Invalid combination: source_fill_raw"
+ " and multi-buffer destination.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) != 0 &&
+ (req->user_req.flags & B2R2_BLT_FLAG_DEST_COLOR_KEY)) {
+ b2r2_log_warn("%s: Invalid combination: source and "
+ "destination color keying.\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) &&
+ (req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_COLOR_KEY |
+ B2R2_BLT_FLAG_DEST_COLOR_KEY))) {
+ b2r2_log_warn("%s: Invalid combination: "
+ "source_fill and color keying.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags &
+ (B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND |
+ B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND)) &&
+ (req->user_req.flags &
+ (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) {
+ b2r2_log_warn("%s: Invalid combination: "
+ "blending and color keying.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) &&
+ (req->user_req.flags &
+ (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) {
+ b2r2_log_warn("%s: Invalid combination: source mask and "
+ "color keying.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (req->user_req.flags &
+ (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_MASK)) {
+ b2r2_log_warn("%s: Unsupported: source mask, "
+ "destination color keying.\n",
+ __func__);
+ return -ENOSYS;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK)) {
+ enum b2r2_blt_fmt src_fmt = req->user_req.src_img.fmt;
+ bool yuv_src =
+ src_fmt == B2R2_BLT_FMT_Y_CB_Y_CR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ src_fmt ==
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ if (yuv_src || src_fmt == B2R2_BLT_FMT_1_BIT_A1 ||
+ src_fmt == B2R2_BLT_FMT_8_BIT_A8) {
+ b2r2_log_warn("%s: Unsupported: source color keying "
+ "with YUV or pure alpha formats.\n",
+ __func__);
+ return -ENOSYS;
+ }
+ }
+
+ /* Check for invalid dimensions that would hinder scale calculations */
+ src_rect = req->user_req.src_rect;
+ dst_rect = req->user_req.dst_rect;
+ /* Check for invalid src_rect unless src_fill is enabled */
+ if (!is_src_fill && (src_rect.x < 0 || src_rect.y < 0 ||
+ src_rect.x + src_rect.width > req->user_req.src_img.width ||
+ src_rect.y + src_rect.height > req->user_req.src_img.height)) {
+ b2r2_log_warn("%s: src_rect outside src_img:\n"
+ "src(x,y,w,h)=(%d, %d, %d, %d) "
+ "src_img(w,h)=(%d, %d).\n",
+ __func__,
+ src_rect.x, src_rect.y, src_rect.width, src_rect.height,
+ req->user_req.src_img.width,
+ req->user_req.src_img.height);
+ return -EINVAL;
+ }
+
+ if (!is_src_fill && (src_rect.width <= 0 || src_rect.height <= 0)) {
+ b2r2_log_warn("%s: Invalid source dimensions:\n"
+ "src(w,h)=(%d, %d).\n",
+ __func__,
+ src_rect.width, src_rect.height);
+ return -EINVAL;
+ }
+
+ if (dst_rect.width <= 0 || dst_rect.height <= 0) {
+ b2r2_log_warn("%s: Invalid dest dimensions:\n"
+ "dst(w,h)=(%d, %d).\n",
+ __func__,
+ dst_rect.width, dst_rect.height);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
+ req->user_req.clut == NULL) {
+ b2r2_log_warn("%s: Invalid request: no table specified "
+ "for CLUT color correction.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check for invalid image params */
+ if (!is_src_fill && validate_buf(&(req->user_req.src_img),
+ &(req->src_resolved)))
+ return -EINVAL;
+
+ if (validate_buf(&(req->user_req.dst_img), &(req->dst_resolved)))
+ return -EINVAL;
+
+ if (is_src_fill) {
+ /*
+ * Params correct for a source fill operation.
+ * No need for further checking.
+ */
+ if (yuv_planar_dst)
+ n_nodes += 2;
+ else if (yuv_semi_planar_dst)
+ n_nodes++;
+
+ *work_buf_width = B2R2_GENERIC_WORK_BUF_WIDTH;
+ *work_buf_height = B2R2_GENERIC_WORK_BUF_HEIGHT;
+ *work_buf_count = n_work_bufs;
+ *node_count = n_nodes;
+ b2r2_log_info("%s DONE buf_w=%d buf_h=%d buf_count=%d "
+ "node_count=%d\n",
+ __func__,
+ *work_buf_width, *work_buf_height,
+ *work_buf_count, *node_count);
+ return 0;
+ }
+
+ /*
+ * Calculate scaling factors, all transform enum values
+ * that include rotation have the CCW_ROT_90 bit set.
+ */
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ h_scf = (src_rect.width << 10) / dst_rect.height;
+ v_scf = (src_rect.height << 10) / dst_rect.width;
+ } else {
+ h_scf = (src_rect.width << 10) / dst_rect.width;
+ v_scf = (src_rect.height << 10) / dst_rect.height;
+ }
+
+ /* Check for degenerate/out_of_range scaling factors. */
+ if (h_scf <= 0 || v_scf <= 0 || h_scf > 0x7C00 || v_scf > 0x7C00) {
+ b2r2_log_warn("%s: Dimensions result in degenerate or "
+ "out of range scaling:\n"
+ "src(w,h)=(%d, %d) "
+ "dst(w,h)=(%d,%d).\n"
+ "h_scf=0x%.8x, v_scf=0x%.8x\n",
+ __func__,
+ src_rect.width, src_rect.height,
+ dst_rect.width, dst_rect.height,
+ h_scf, v_scf);
+ return -EINVAL;
+ }
+
+ if (yuv_planar_dst)
+ n_nodes += 2;
+ else if (yuv_semi_planar_dst)
+ n_nodes++;
+
+ *work_buf_width = B2R2_GENERIC_WORK_BUF_WIDTH;
+ *work_buf_height = B2R2_GENERIC_WORK_BUF_HEIGHT;
+ *work_buf_count = n_work_bufs;
+ *node_count = n_nodes;
+ b2r2_log_info("%s DONE buf_w=%d buf_h=%d buf_count=%d node_count=%d\n",
+ __func__,
+ *work_buf_width, *work_buf_height, *work_buf_count,
+ *node_count);
+ return 0;
+}
+
+/*
+ *
+ */
+int b2r2_generic_configure(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_work_buf *tmp_bufs,
+ u32 buf_count)
+{
+ struct b2r2_node *node = NULL;
+ struct b2r2_work_buf *in_buf = NULL;
+ struct b2r2_work_buf *out_buf = NULL;
+ struct b2r2_work_buf *empty_buf = NULL;
+
+#ifdef B2R2_GENERIC_DEBUG
+ u32 needed_bufs = 0;
+ u32 needed_nodes = 0;
+ s32 work_buf_width = 0;
+ s32 work_buf_height = 0;
+ u32 n_nodes = 0;
+ int invalid_req = b2r2_generic_analyze(req, &work_buf_width,
+ &work_buf_height, &needed_bufs,
+ &needed_nodes);
+ if (invalid_req < 0) {
+ b2r2_log_warn("%s: Invalid request supplied, ec=%d\n",
+ __func__, invalid_req);
+ return -EINVAL;
+ }
+
+ node = first;
+
+ while (node != NULL) {
+ n_nodes++;
+ node = node->next;
+ }
+ if (n_nodes < needed_nodes) {
+ b2r2_log_warn("%s: Not enough nodes %d < %d.\n",
+ __func__, n_nodes, needed_nodes);
+ return -EINVAL;
+ }
+
+ if (buf_count < needed_bufs) {
+ b2r2_log_warn("%s: Not enough buffers %d < %d.\n",
+ __func__, buf_count, needed_bufs);
+ return -EINVAL;
+ }
+
+#endif
+
+ reset_nodes(first);
+ node = first;
+ empty_buf = tmp_bufs;
+ out_buf = empty_buf;
+ empty_buf++;
+ /* Prepare input tile. Color_fill or read from src */
+ setup_input_stage(req, node, out_buf);
+ in_buf = out_buf;
+ out_buf = empty_buf;
+ empty_buf++;
+ node = node->next;
+
+ if ((req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0) {
+ setup_transform_stage(req, node, out_buf, in_buf);
+ node = node->next;
+ in_buf = out_buf;
+ out_buf = empty_buf++;
+ }
+ /* EMACSOC TODO: mask */
+ /*
+ if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) {
+ setup_mask_stage(req, node, out_buf, in_buf);
+ node = node->next;
+ in_buf = out_buf;
+ out_buf = empty_buf++;
+ }
+ */
+ /* Read the part of destination that will be updated */
+ setup_dst_read_stage(req, node, out_buf);
+ node = node->next;
+ setup_blend_stage(req, node, out_buf, in_buf);
+ node = node->next;
+ in_buf = out_buf;
+ setup_writeback_stage(req, node, in_buf);
+ return 0;
+}
+
+void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_blt_rect *dst_rect_area)
+{
+ /*
+ * Nodes come in the following order: <input stage>, [transform],
+ * [src_mask], <dst_read>, <blend>, <writeback>
+ */
+ struct b2r2_node *node = first;
+ const struct b2r2_blt_rect *dst_rect = &(req->user_req.dst_rect);
+ const struct b2r2_blt_rect *src_rect = &(req->user_req.src_rect);
+ const enum b2r2_blt_fmt src_fmt = req->user_req.src_img.fmt;
+ bool yuv_multi_buffer_src =
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ const enum b2r2_blt_fmt dst_fmt = req->user_req.dst_img.fmt;
+ const bool yuv_multi_buffer_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ s32 h_scf = 1 << 10;
+ s32 v_scf = 1 << 10;
+ s32 src_x = 0;
+ s32 src_y = 0;
+ s32 src_w = 0;
+ s32 src_h = 0;
+ u32 b2r2_rzi = 0;
+ s32 clip_top = 0;
+ s32 clip_left = 0;
+ s32 clip_bottom = req->user_req.dst_img.height - 1;
+ s32 clip_right = req->user_req.dst_img.width - 1;
+ /* Dst coords inside the dst_rect, not the buffer */
+ s32 dst_x = dst_rect_area->x;
+ s32 dst_y = dst_rect_area->y;
+
+ b2r2_log_info("%s ENTRY\n", __func__);
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ h_scf = (src_rect->width << 10) / dst_rect->height;
+ v_scf = (src_rect->height << 10) / dst_rect->width;
+ } else {
+ h_scf = (src_rect->width << 10) / dst_rect->width;
+ v_scf = (src_rect->height << 10) / dst_rect->height;
+ }
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ /*
+ * Normally the inverse transform for 90 degree rotation
+ * is given by:
+ * | 0 1| |x| | y|
+ * | | X | | = | |
+ * |-1 0| |y| |-x|
+ * but screen coordinates are flipped in y direction
+ * (compared to usual Cartesian coordinates), hence the offsets.
+ */
+ src_x = (dst_rect->height - dst_y - dst_rect_area->height) *
+ h_scf;
+ src_y = dst_x * v_scf;
+ src_w = dst_rect_area->height * h_scf;
+ src_h = dst_rect_area->width * v_scf;
+ } else {
+ src_x = dst_x * h_scf;
+ src_y = dst_y * v_scf;
+ src_w = dst_rect_area->width * h_scf;
+ src_h = dst_rect_area->height * v_scf;
+ }
+
+ b2r2_rzi |= ((src_x & 0x3ff) << B2R2_RZI_HSRC_INIT_SHIFT) |
+ ((src_y & 0x3ff) << B2R2_RZI_VSRC_INIT_SHIFT);
+
+ /*
+ * src_w must contain all the pixels that contribute
+ * to a particular tile.
+ * ((x + 0x3ff) >> 10) is equivalent to ceiling(x),
+ * expressed in 6.10 fixed point format.
+ * Every destination tile, maps to a certain area in the source
+ * rectangle. The area in source will most likely not be a rectangle
+ * with exact integer dimensions whenever arbitrary scaling is involved.
+ * Consider the following example.
+ * Suppose, that width of the current destination tile maps
+ * to 1.7 pixels in source, starting at x == 5.4, as calculated
+ * using the scaling factor.
+ * This means that while the destination tile is written,
+ * the source should be read from x == 5.4 up to x == 5.4 + 1.7 == 7.1
+ * Consequently, color from 3 pixels (x == 5, 6 and 7)
+ * needs to be read from source.
+ * The formula below the comment yields:
+ * ceil(0.4 + 1.7) == ceil(2.1) == 3
+ * (src_x & 0x3ff) is the fractional part of src_x,
+ * which is expressed in 6.10 fixed point format.
+ * Thus, width of the source area should be 3 pixels wide,
+ * starting at x == 5.
+ * However, the reading should not start at x == 5.0
+ * but a bit inside, namely x == 5.4
+ * The B2R2_RZI register is used to instruct the HW to do so.
+ * It contains the fractional part that will be added to
+ * the first pixel coordinate, before incrementing the current source
+ * coordinate with the step specified in B2R2_RSF register.
+ * The same applies to scaling in vertical direction.
+ */
+ src_w = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10;
+ src_h = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10;
+
+ /*
+ * EMACSOC TODO: Remove this debug clamp, once tile size
+ * is taken into account in generic_analyze()
+ */
+ if (src_w > 128)
+ src_w = 128;
+
+ src_x >>= 10;
+ src_y >>= 10;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ src_x = src_rect->width - src_x - src_w;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ src_y = src_rect->height - src_y - src_h;
+
+ /*
+ * Translate the src/dst_rect coordinates into true
+ * src/dst_buffer coordinates
+ */
+ src_x += src_rect->x;
+ src_y += src_rect->y;
+
+ dst_x += dst_rect->x;
+ dst_y += dst_rect->y;
+
+ /*
+ * Clamp the src coords to buffer dimensions
+ * to prevent illegal reads.
+ */
+ if (src_x < 0)
+ src_x = 0;
+
+ if (src_y < 0)
+ src_y = 0;
+
+ if ((src_x + src_w) > req->user_req.src_img.width)
+ src_w = req->user_req.src_img.width - src_x;
+
+ if ((src_y + src_h) > req->user_req.src_img.height)
+ src_h = req->user_req.src_img.height - src_y;
+
+
+ /* The input node */
+ if (yuv_multi_buffer_src) {
+ /* Luma on SRC3 */
+ node->node.GROUP5.B2R2_SXY =
+ ((src_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP5.B2R2_SSZ =
+ ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ /* Clear and set only the SRC_INIT bits */
+ node->node.GROUP10.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ node->node.GROUP10.B2R2_RZI |= b2r2_rzi;
+
+ node->node.GROUP9.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ switch (src_fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Chroma is half the size of luma. Must round up
+ * the chroma size to handle cases when luma size is not
+ * divisible by 2.
+ * E.g. luma width==7 requires chroma width==4.
+ * Chroma width==7/2==3 is only enough
+ * for luma width==6.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((src_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ (((src_y & 0xffff) >> 1) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((src_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((((src_h + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ if (src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_fmt ==
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ node->node.GROUP9.B2R2_RZI |= (b2r2_rzi >> 1) &
+ ((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Now chroma is half the size of luma
+ * only in horizontal direction.
+ * Same rounding applies as for 420 formats above,
+ * except it is only done horizontally.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((src_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((src_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ if (src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ node->node.GROUP9.B2R2_RZI |=
+ (((src_x & 0x3ff) >> 1) <<
+ B2R2_RZI_HSRC_INIT_SHIFT) |
+ ((src_y & 0x3ff) << B2R2_RZI_VSRC_INIT_SHIFT);
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /*
+ * Chroma goes on SRC2 and SRC1.
+ * It is the same size as luma.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ ((src_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP3.B2R2_SXY = node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ = node->node.GROUP4.B2R2_SSZ;
+
+ /* Clear and set only the SRC_INIT bits */
+ node->node.GROUP9.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ node->node.GROUP9.B2R2_RZI |= b2r2_rzi;
+ break;
+ default:
+ break;
+ }
+ } else {
+ node->node.GROUP4.B2R2_SXY =
+ ((src_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ /* Clear and set only the SRC_INIT bits */
+ node->node.GROUP9.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ node->node.GROUP9.B2R2_RZI |= b2r2_rzi;
+ }
+
+ node->node.GROUP1.B2R2_TXY = 0;
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ /*
+ * dst_rect_area coordinates are specified
+ * after potential rotation.
+ * Input is read before rotation, hence the width and height
+ * need to be swapped.
+ * Horizontal and vertical flips are accomplished with
+ * suitable scanning order while writing
+ * to the temporary buffer.
+ */
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->height - 1) & 0xffff) <<
+ B2R2_XY_X_SHIFT;
+ }
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->width - 1) & 0xffff) <<
+ B2R2_XY_Y_SHIFT;
+ }
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ } else {
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->width - 1) & 0xffff) <<
+ B2R2_XY_X_SHIFT;
+ }
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->height - 1) & 0xffff) <<
+ B2R2_XY_Y_SHIFT;
+ }
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ }
+
+ if (req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_FILL | B2R2_BLT_FLAG_SOURCE_FILL_RAW)) {
+ /*
+ * Scan order for source fill should always be left-to-right
+ * and top-to-bottom. Fill the input tile from top left.
+ */
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP4.B2R2_SSZ = node->node.GROUP1.B2R2_TSZ;
+ }
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(node, false);
+ b2r2_log_debug("%s Input node done.\n", __func__);
+ }
+
+ /* Transform */
+ if ((req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0) {
+ /*
+ * Transform node operates on temporary buffers.
+ * Content always at top left, but scanning order
+ * has to be flipped during rotation.
+ * Width and height need to be considered as well, since
+ * a tile may not necessarily be filled completely.
+ * dst_rect_area dimensions are specified
+ * after potential rotation.
+ * Input is read before rotation, hence the width and height
+ * need to be swapped on src.
+ */
+ node = node->next;
+
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ /* Bottom line written first */
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_rect_area->height - 1) & 0xffff) <<
+ B2R2_XY_Y_SHIFT;
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(node, false);
+ b2r2_log_debug("%s Tranform node done.\n", __func__);
+ }
+ }
+
+ /* Source mask */
+ if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) {
+ node = node->next;
+ /*
+ * Same coords for mask as for the input stage.
+ * Should the mask be transformed together with source?
+ * EMACSOC TODO: Apply mask before any
+ * transform/scaling is done.
+ * Otherwise it will be dst_ not src_mask.
+ */
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(node, false);
+ b2r2_log_debug("%s Source mask node done.\n", __func__);
+ }
+ }
+
+ /* dst_read */
+ if (yuv_multi_buffer_dst) {
+ s32 dst_w = dst_rect_area->width;
+ s32 dst_h = dst_rect_area->height;
+ bool yuv420_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE;
+
+ bool yuv422_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ node = node->next;
+ /* Luma on SRC3 */
+ node->node.GROUP5.B2R2_SXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP5.B2R2_SSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ if (yuv420_dst) {
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Chroma is half the size of luma. Must round up
+ * the chroma size to handle cases when luma size is not
+ * divisible by 2.
+ * E.g. luma width==7 requires chroma width==4.
+ * Chroma width==7/2==3 is only enough
+ * for luma width==6.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((dst_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ (((dst_y & 0xffff) >> 1) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((((dst_h + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+
+ if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt ==
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ } else if (yuv422_dst) {
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Now chroma is half the size of luma
+ * only in horizontal direction.
+ * Same rounding applies as for 420 formats above,
+ * except it is only done horizontally.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((dst_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ if (dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ } else if (dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR) {
+ /*
+ * Chroma goes on SRC2 and SRC1.
+ * It is the same size as luma.
+ */
+ node->node.GROUP4.B2R2_SXY = node->node.GROUP5.B2R2_SXY;
+ node->node.GROUP4.B2R2_SSZ = node->node.GROUP5.B2R2_SSZ;
+ node->node.GROUP3.B2R2_SXY = node->node.GROUP5.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ = node->node.GROUP5.B2R2_SSZ;
+ }
+
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ } else {
+ node = node->next;
+ node->node.GROUP4.B2R2_SXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ }
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(node, false);
+ b2r2_log_debug("%s dst_read node done.\n", __func__);
+ }
+
+ /* blend */
+ node = node->next;
+ node->node.GROUP3.B2R2_SXY = 0;
+ node->node.GROUP3.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ /* contents of the foreground temporary buffer always at top left */
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(node, false);
+ b2r2_log_debug("%s Blend node done.\n", __func__);
+ }
+
+ /* writeback */
+ node = node->next;
+ if ((req->user_req.flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0) {
+ clip_left = req->user_req.dst_clip_rect.x;
+ clip_top = req->user_req.dst_clip_rect.y;
+ clip_right = clip_left + req->user_req.dst_clip_rect.width - 1;
+ clip_bottom = clip_top + req->user_req.dst_clip_rect.height - 1;
+ }
+ /*
+ * Clamp the dst clip rectangle to buffer dimensions to prevent
+ * illegal writes. An illegal clip rectangle, e.g. outside the
+ * buffer will be ignored, resulting in nothing being clipped.
+ */
+ if (clip_left < 0 || req->user_req.dst_img.width <= clip_left)
+ clip_left = 0;
+
+ if (clip_top < 0 || req->user_req.dst_img.height <= clip_top)
+ clip_top = 0;
+
+ if (clip_right < 0 || req->user_req.dst_img.width <= clip_right)
+ clip_right = req->user_req.dst_img.width - 1;
+
+ if (clip_bottom < 0 || req->user_req.dst_img.height <= clip_bottom)
+ clip_bottom = req->user_req.dst_img.height - 1;
+
+ /*
+ * Only allow writing inside the clip rect.
+ * INTNL bit in B2R2_CWO should be zero.
+ */
+ node->node.GROUP6.B2R2_CWO =
+ ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) |
+ ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) |
+ ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT);
+
+ if (yuv_multi_buffer_dst) {
+ const s32 dst_w = dst_rect_area->width;
+ const s32 dst_h = dst_rect_area->height;
+ int i = 0;
+ /* Number of nodes required to write chroma output */
+ int n_nodes = 1;
+ if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR)
+ n_nodes = 2;
+
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ /* Luma (Y-component) */
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ node->node.GROUP6.B2R2_CWO =
+ ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) |
+ ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) |
+ ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(node, false);
+ b2r2_log_debug("%s Writeback luma node done.\n",
+ __func__);
+ }
+
+ node = node->next;
+
+ /*
+ * Chroma components. 1 or 2 nodes
+ * for semi-planar or planar buffer respectively.
+ */
+ for (i = 0; i < n_nodes && node != NULL; ++i) {
+
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chroma is half the size of luma.
+ * Must round up the chroma size to handle
+ * cases when luma size is not divisible by 2.
+ * E.g. luma_width==7 requires chroma_width==4.
+ * Chroma_width==7/2==3 is only enough
+ * for luma_width==6.
+ */
+ node->node.GROUP1.B2R2_TXY =
+ (((dst_x & 0xffff) >> 1) <<
+ B2R2_XY_X_SHIFT) |
+ (((dst_y & 0xffff) >> 1) <<
+ B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((((dst_h + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Now chroma is half the size of luma only
+ * in horizontal direction.
+ * Same rounding applies as
+ * for 420 formats above, except it is only
+ * done horizontally.
+ */
+ node->node.GROUP1.B2R2_TXY =
+ (((dst_x & 0xffff) >> 1) <<
+ B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /*
+ * Chroma has the same resolution as luma.
+ */
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_w & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP6.B2R2_CWO =
+ ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) |
+ ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) |
+ ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(node, false);
+ b2r2_log_debug("%s Writeback chroma node "
+ "%d of %d done.\n",
+ __func__, i + 1, n_nodes);
+ }
+
+ node = node->next;
+ }
+ } else {
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(node, false);
+ b2r2_log_debug("%s Writeback node done.\n", __func__);
+ }
+ }
+
+ b2r2_log_info("%s DONE\n", __func__);
+}
diff --git a/drivers/video/b2r2/b2r2_generic.h b/drivers/video/b2r2/b2r2_generic.h
new file mode 100644
index 00000000000..35451543c5c
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_generic.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 generic. Full coverage of user interface but
+ * non optimized implementation. For Fallback purposes.
+ *
+ * Author: Maciej Socha <maciej.socha@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_VIDEO_B2R2_GENERIC_H
+#define _LINUX_VIDEO_B2R2_GENERIC_H
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_internal.h"
+
+/**
+ * b2r2_generic_init()
+ */
+void b2r2_generic_init(void);
+
+/**
+ * b2r2_generic_exit()
+ */
+void b2r2_generic_exit(void);
+
+/**
+ * b2r2_generic_analyze()
+ */
+int b2r2_generic_analyze(const struct b2r2_blt_request *req,
+ s32 *work_buf_width,
+ s32 *work_buf_height,
+ u32 *work_buf_count,
+ u32 *node_count);
+/**
+ * b2r2_generic_configure()
+ */
+int b2r2_generic_configure(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_work_buf *tmp_bufs,
+ u32 buf_count);
+/**
+ * b2r2_generic_set_areas()
+ */
+void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_blt_rect *dst_rect_area);
+#endif
diff --git a/drivers/video/b2r2/b2r2_global.h b/drivers/video/b2r2/b2r2_global.h
new file mode 100644
index 00000000000..38cf74bb753
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_global.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 global definitions
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_GLOBAL_H
+#define __B2R2_GLOBAL_H
+
+/** Sources involved */
+
+struct b2r2_system {
+ unsigned int B2R2_NIP;
+ unsigned int B2R2_CIC;
+ unsigned int B2R2_INS;
+ unsigned int B2R2_ACK;
+};
+
+struct b2r2_target {
+ unsigned int B2R2_TBA;
+ unsigned int B2R2_TTY;
+ unsigned int B2R2_TXY;
+ unsigned int B2R2_TSZ;
+};
+
+struct b2r2_color_fill {
+ unsigned int B2R2_S1CF;
+ unsigned int B2R2_S2CF;
+};
+
+struct b2r2_src_config {
+ unsigned int B2R2_SBA;
+ unsigned int B2R2_STY;
+ unsigned int B2R2_SXY;
+ unsigned int B2R2_SSZ;
+};
+
+struct b2r2_clip {
+ unsigned int B2R2_CWO;
+ unsigned int B2R2_CWS;
+};
+
+struct b2r2_color_key {
+ unsigned int B2R2_KEY1;
+ unsigned int B2R2_KEY2;
+};
+
+struct b2r2_clut {
+ unsigned int B2R2_CCO;
+ unsigned int B2R2_CML;
+};
+
+struct b2r2_rsz_pl_mask {
+ unsigned int B2R2_FCTL;
+ unsigned int B2R2_PMK;
+};
+
+struct b2r2_Cr_luma_rsz {
+ unsigned int B2R2_RSF;
+ unsigned int B2R2_RZI;
+ unsigned int B2R2_HFP;
+ unsigned int B2R2_VFP;
+};
+
+struct b2r2_flikr_filter {
+ unsigned int B2R2_FF0;
+ unsigned int B2R2_FF1;
+ unsigned int B2R2_FF2;
+ unsigned int B2R2_FF3;
+};
+
+struct b2r2_xyl {
+ unsigned int B2R2_XYL;
+ unsigned int B2R2_XYP;
+};
+
+struct b2r2_sau {
+ unsigned int B2R2_SAR;
+ unsigned int B2R2_USR;
+};
+
+struct b2r2_vm {
+ unsigned int B2R2_VMX0;
+ unsigned int B2R2_VMX1;
+ unsigned int B2R2_VMX2;
+ unsigned int B2R2_VMX3;
+};
+
+struct b2r2_link_list {
+
+ struct b2r2_system GROUP0;
+ struct b2r2_target GROUP1;
+ struct b2r2_color_fill GROUP2;
+ struct b2r2_src_config GROUP3;
+ struct b2r2_src_config GROUP4;
+ struct b2r2_src_config GROUP5;
+ struct b2r2_clip GROUP6;
+ struct b2r2_clut GROUP7;
+ struct b2r2_rsz_pl_mask GROUP8;
+ struct b2r2_Cr_luma_rsz GROUP9;
+ struct b2r2_Cr_luma_rsz GROUP10;
+ struct b2r2_flikr_filter GROUP11;
+ struct b2r2_color_key GROUP12;
+ struct b2r2_xyl GROUP13;
+ struct b2r2_sau GROUP14;
+ struct b2r2_vm GROUP15;
+ struct b2r2_vm GROUP16;
+
+ unsigned int B2R2_RESERVED[2];
+};
+
+
+#endif /* !defined(__B2R2_GLOBAL_H) */
diff --git a/drivers/video/b2r2/b2r2_hw.h b/drivers/video/b2r2/b2r2_hw.h
new file mode 100644
index 00000000000..d492168913a
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_hw.h
@@ -0,0 +1,707 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 hw definitions
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef B2R2_HW_H__
+#define B2R2_HW_H__
+
+#include <linux/bitops.h>
+
+/* Scaling works in strips 128 pixels wide */
+#define B2R2_RESCALE_MAX_WIDTH 128
+
+/* Rotation works in strips 16 pixels wide */
+#define B2R2_ROTATE_MAX_WIDTH 16
+
+/* B2R2 color formats */
+#define B2R2_COLOR_FORMAT_SHIFT 16
+enum b2r2_native_fmt {
+ /* RGB formats */
+ B2R2_NATIVE_RGB565 = 0x00 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_RGB888 = 0x01 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB8565 = 0x04 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB8888 = 0x05 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB1555 = 0x06 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB4444 = 0x07 << B2R2_COLOR_FORMAT_SHIFT,
+
+ /* YCbCr formats */
+ B2R2_NATIVE_YCBCR888 = 0x10 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR422R = 0x12 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_AYCBCR8888 = 0x15 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR42X_MB = 0x14 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR42X_R2B = 0x16 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR42X_MBN = 0x0e << B2R2_COLOR_FORMAT_SHIFT,
+
+ /* CLUT formats */
+ B2R2_NATIVE_CLUT2 = 0x09 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_CLUT8 = 0x0b << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ACLUT44 = 0x0c << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ACLUT88 = 0x0d << B2R2_COLOR_FORMAT_SHIFT,
+
+ /* Misc. formats */
+ B2R2_NATIVE_A1 = 0x18 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_A8 = 0x19 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YUV = 0x1e << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_BYTE = 0x1f << B2R2_COLOR_FORMAT_SHIFT,
+};
+
+/* B2R2_CIC register values */
+enum b2r2_cic {
+ B2R2_CIC_COLOR_FILL = BIT(1),/*0x00000002*/
+ B2R2_CIC_SOURCE_1 = BIT(2),/*0x00000004*/
+ B2R2_CIC_SOURCE_2 = BIT(3),/*0x00000008*/
+ B2R2_CIC_SOURCE_3 = BIT(4),/*0x00000010*/
+ B2R2_CIC_CLIP_WINDOW = BIT(5),/*0x00000020*/
+ B2R2_CIC_CLUT = BIT(6),/*0x00000040*/
+ B2R2_CIC_FILTER_CONTROL = BIT(7),/*0x00000080*/
+ B2R2_CIC_RESIZE_CHROMA = BIT(8),/*0x00000100*/
+ B2R2_CIC_RESIZE_LUMA = BIT(9),/*0x00000200*/
+ B2R2_CIC_FLICKER_COEFF = BIT(10),/*0x00000400*/
+ B2R2_CIC_COLOR_KEY = BIT(11),/*0x00000800*/
+ B2R2_CIC_XYL = BIT(12),/*0x00001000*/
+ B2R2_CIC_SAU = BIT(13),/*0x00002000*/
+ B2R2_CIC_IVMX = BIT(14),/*0x00004000*/
+ B2R2_CIC_OVMX = BIT(15),/*0x00008000*/
+ B2R2_CIC_PACEDOT = BIT(16),/*0x00010000*/
+ B2R2_CIC_VC1 = BIT(17)/*0x00020000*/
+};
+
+/* B2R2_INS register values */
+#define B2R2_INS_SOURCE_1_SHIFT 0
+#define B2R2_INS_SOURCE_2_SHIFT 3
+#define B2R2_INS_SOURCE_3_SHIFT 5
+#define B2R2_INS_IVMX_SHIFT 6
+#define B2R2_INS_CLUTOP_SHIFT 7
+#define B2R2_INS_RESCALE2D_SHIFT 8
+#define B2R2_INS_FLICK_FILT_SHIFT 9
+#define B2R2_INS_RECT_CLIP_SHIFT 10
+#define B2R2_INS_CKEY_SHIFT 11
+#define B2R2_INS_OVMX_SHIFT 12
+#define B2R2_INS_DEI_SHIFT 13
+#define B2R2_INS_PLANE_MASK_SHIFT 14
+#define B2R2_INS_XYL_SHIFT 15
+#define B2R2_INS_DOT_SHIFT 16
+#define B2R2_INS_VC1R_SHIFT 17
+#define B2R2_INS_ROTATION_SHIFT 18
+#define B2R2_INS_PACE_DOWN_SHIFT 30
+#define B2R2_INS_BLITCOMPIRQ_SHIFT 31
+enum b2r2_ins {
+ /* Source 1 config */
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_1_SHIFT,
+ B2R2_INS_SOURCE_1_COLOR_FILL_REGISTER = 0x3 << B2R2_INS_SOURCE_1_SHIFT,
+ B2R2_INS_SOURCE_1_DIRECT_COPY = 0x4 << B2R2_INS_SOURCE_1_SHIFT,
+ B2R2_INS_SOURCE_1_DIRECT_FILL = 0x7 << B2R2_INS_SOURCE_1_SHIFT,
+
+ /* Source 2 config */
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_2_SHIFT,
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER = 0x3 << B2R2_INS_SOURCE_2_SHIFT,
+
+ /* Source 3 config */
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_3_SHIFT,
+
+ /* Other configs */
+ B2R2_INS_IVMX_ENABLED = 0x1 << B2R2_INS_IVMX_SHIFT,
+ B2R2_INS_CLUTOP_ENABLED = 0x1 << B2R2_INS_CLUTOP_SHIFT,
+ B2R2_INS_RESCALE2D_ENABLED = 0x1 << B2R2_INS_RESCALE2D_SHIFT,
+ B2R2_INS_FLICK_FILT_ENABLED = 0x1 << B2R2_INS_FLICK_FILT_SHIFT,
+ B2R2_INS_RECT_CLIP_ENABLED = 0x1 << B2R2_INS_RECT_CLIP_SHIFT,
+ B2R2_INS_CKEY_ENABLED = 0x1 << B2R2_INS_CKEY_SHIFT,
+ B2R2_INS_OVMX_ENABLED = 0x1 << B2R2_INS_OVMX_SHIFT,
+ B2R2_INS_DEI_ENABLED = 0x1 << B2R2_INS_DEI_SHIFT,
+ B2R2_INS_PLANE_MASK_ENABLED = 0x1 << B2R2_INS_PLANE_MASK_SHIFT,
+ B2R2_INS_XYL_ENABLED = 0x1 << B2R2_INS_XYL_SHIFT,
+ B2R2_INS_DOT_ENABLED = 0x1 << B2R2_INS_DOT_SHIFT,
+ B2R2_INS_VC1R_ENABLED = 0x1 << B2R2_INS_VC1R_SHIFT,
+ B2R2_INS_ROTATION_ENABLED = 0x1 << B2R2_INS_ROTATION_SHIFT,
+ B2R2_INS_PACE_DOWN_ENABLED = 0x1 << B2R2_INS_PACE_DOWN_SHIFT,
+ B2R2_INS_BLITCOMPIRQ_ENABLED = 0x1 << B2R2_INS_BLITCOMPIRQ_SHIFT,
+
+};
+
+/* B2R2_ACK register values */
+#define B2R2_ACK_MODE_SHIFT 0
+#define B2R2_ACK_SWAP_FG_BG_SHIFT 4
+#define B2R2_ACK_GALPHA_ROPID_SHIFT 8
+#define B2R2_ACK_CKEY_BLUE_SHIFT 16
+#define B2R2_ACK_CKEY_GREEN_SHIFT 18
+#define B2R2_ACK_CKEY_RED_SHIFT 20
+#define B2R2_ACK_CKEY_SEL_SHIFT 22
+enum b2r2_ack {
+ /* ALU operation modes */
+ B2R2_ACK_MODE_LOGICAL_OPERATION = 0x1 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_BLEND_NOT_PREMULT = 0x2 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_BLEND_PREMULT = 0x3 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_LOGICAL_FIRST_PASS = 0x4 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_BLEND = 0x5 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_BYPASS_S2_S3 = 0x7 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_LOGICAL_SECOND_PASS = 0x8 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_XYL_LOGICAL = 0x9 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_XYL_BLEND_NOT_PREMULT =
+ 0xa << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_XYL_BLEND_PREMULT = 0xb << B2R2_ACK_MODE_SHIFT,
+
+ /* ALU channel selection */
+ B2R2_ACK_SWAP_FG_BG = 0x1 << B2R2_ACK_SWAP_FG_BG_SHIFT,
+
+ /* Global alpha and ROP IDs */
+ B2R2_ACK_ROP_CLEAR = 0x0 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_AND = 0x1 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_AND_REV = 0x2 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_COPY = 0x3 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_AND_INV = 0x4 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_NOOP = 0x5 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_XOR = 0x6 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_OR = 0x7 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_NOR = 0x8 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_EQUIV = 0x9 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_INVERT = 0xa << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_OR_REV = 0xb << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_COPY_INV = 0xc << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_OR_INV = 0xd << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_NAND = 0xe << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_SET = 0xf << B2R2_ACK_GALPHA_ROPID_SHIFT,
+
+ /* Color key configuration bits */
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_BLUE_SHIFT,
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_BLUE_SHIFT,
+ B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_GREEN_SHIFT,
+ B2R2_ACK_CKEY_RED_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_GREEN_SHIFT,
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_RED_SHIFT,
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_RED_SHIFT,
+
+ /* Color key input selection */
+ B2R2_ACK_CKEY_SEL_DEST = 0x0 << B2R2_ACK_CKEY_SEL_SHIFT,
+ B2R2_ACK_CKEY_SEL_SRC_BEFORE_CLUT = 0x1 << B2R2_ACK_CKEY_SEL_SHIFT,
+ B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT = 0x2 << B2R2_ACK_CKEY_SEL_SHIFT,
+ B2R2_ACK_CKEY_SEL_BLANKING_S2_ALPHA = 0x3 << B2R2_ACK_CKEY_SEL_SHIFT,
+};
+
+/* Common <S/T>TY defines */
+#define B2R2_TY_BITMAP_PITCH_SHIFT 0
+#define B2R2_TY_COLOR_FORM_SHIFT 16
+#define B2R2_TY_ALPHA_RANGE_SHIFT 21
+#define B2R2_TY_MB_ACCESS_MODE_SHIFT 23
+#define B2R2_TY_HSO_SHIFT 24
+#define B2R2_TY_VSO_SHIFT 25
+#define B2R2_TY_SUBBYTE_SHIFT 28
+#define B2R2_TY_ENDIAN_SHIFT 30
+#define B2R2_TY_SECURE_SHIFT 31
+
+/* Dummy enum for generalization of <S/T>TY registers */
+enum b2r2_ty {
+ /* Alpha range */
+ B2R2_TY_ALPHA_RANGE_128 = 0x0 << B2R2_TY_ALPHA_RANGE_SHIFT,
+ B2R2_TY_ALPHA_RANGE_255 = 0x1 << B2R2_TY_ALPHA_RANGE_SHIFT,
+
+ /* Access mode in macro-block organized frame buffers */
+ B2R2_TY_MB_ACCESS_MODE_FRAME = 0x0 << B2R2_TY_MB_ACCESS_MODE_SHIFT,
+ B2R2_TY_MB_ACCESS_MODE_FIELD = 0x1 << B2R2_TY_MB_ACCESS_MODE_SHIFT,
+
+ /* Horizontal scan order */
+ B2R2_TY_HSO_LEFT_TO_RIGHT = 0x0 << B2R2_TY_HSO_SHIFT,
+ B2R2_TY_HSO_RIGHT_TO_LEFT = 0x1 << B2R2_TY_HSO_SHIFT,
+
+ /* Vertical scan order */
+ B2R2_TY_VSO_TOP_TO_BOTTOM = 0x0 << B2R2_TY_VSO_SHIFT,
+ B2R2_TY_VSO_BOTTOM_TO_TOP = 0x1 << B2R2_TY_VSO_SHIFT,
+
+ /* Pixel ordering for sub-byte formats (position of right-most pixel) */
+ B2R2_TY_SUBBYTE_MSB = 0x0 << B2R2_TY_SUBBYTE_SHIFT,
+ B2R2_TY_SUBBYTE_LSB = 0x1 << B2R2_TY_SUBBYTE_SHIFT,
+
+ /* Bitmap endianess */
+ B2R2_TY_ENDIAN_BIG_NOT_LITTLE = 0x1 << B2R2_TY_ENDIAN_SHIFT,
+
+ /* Secureness of the target memory region */
+ B2R2_TY_SECURE_UNSECURE = 0x0 << B2R2_TY_SECURE_SHIFT,
+ B2R2_TY_SECURE_SECURE = 0x1 << B2R2_TY_SECURE_SHIFT,
+
+ /* Dummy to make sure the data type is large enough */
+ B2R2_TY_DUMMY = 0xffffffff,
+};
+
+/* B2R2_TTY register values */
+#define B2R2_TTY_CB_NOT_CR_SHIFT 22
+#define B2R2_TTY_RGB_ROUND_SHIFT 26
+#define B2R2_TTY_CHROMA_NOT_LUMA_SHIFT 27
+enum b2r2_tty {
+
+ /* Chroma component selection */
+ B2R2_TTY_CB_NOT_CR = 0x1 << B2R2_TTY_CB_NOT_CR_SHIFT,
+
+ /* RGB rounding mode */
+ B2R2_TTY_RGB_ROUND_NORMAL = 0x0 << B2R2_TTY_RGB_ROUND_SHIFT,
+ B2R2_TTY_RGB_ROUND_DITHER = 0x1 << B2R2_TTY_RGB_ROUND_SHIFT,
+
+ /* Component selection for splitted frame buffer formats */
+ B2R2_TTY_CHROMA_NOT_LUMA = 0x1 << B2R2_TTY_CHROMA_NOT_LUMA_SHIFT,
+};
+
+/* B2R2_S1TY register values */
+#define B2R2_S1TY_A1_SUBST_SHIFT 22
+#define B2R2_S1TY_ROTATION_SHIFT 27
+#define B2R2_S1TY_RGB_EXPANSION_SHIFT 29
+enum b2r2_s1ty {
+
+ /* Alpha bit substitution mode for ARGB1555 */
+ B2R2_S1TY_A1_SUBST_KEY_MODE = 0x1 << B2R2_S1TY_A1_SUBST_SHIFT,
+
+ /* Input rectangle rotation (NOT YET IMPLEMENTED) */
+ B2R2_S1TY_ENABLE_ROTATION = 0x1 << B2R2_S1TY_ROTATION_SHIFT,
+
+ /* RGB expansion mode */
+ B2R2_S1TY_RGB_EXPANSION_MSB_DUP = 0x0 << B2R2_S1TY_RGB_EXPANSION_SHIFT,
+ B2R2_S1TY_RGB_EXPANSION_LSP_ZERO = 0x1 << B2R2_S1TY_RGB_EXPANSION_SHIFT,
+};
+
+/* B2R2_S1TY register values */
+#define B2R2_S2TY_A1_SUBST_SHIFT 22
+#define B2R2_S2TY_CHROMA_LEFT_SHIFT 26
+#define B2R2_S2TY_RGB_EXPANSION_SHIFT 29
+enum b2r2_s2ty {
+
+ /* Alpha bit substitution mode for ARGB1555 */
+ B2R2_S2TY_A1_SUBST_KEY_MODE = 0x1 << B2R2_S2TY_A1_SUBST_SHIFT,
+
+ /* Chroma left extension */
+ B2R2_S2TY_CHROMA_LEFT_EXT_FOLLOWING_PIXEL = 0x0
+ << B2R2_S2TY_CHROMA_LEFT_SHIFT,
+ B2R2_S2TY_CHROMA_LEFT_EXT_AVERAGE = 0x1 << B2R2_S2TY_CHROMA_LEFT_SHIFT,
+
+ /* RGB expansion mode */
+ B2R2_S2TY_RGB_EXPANSION_MSB_DUP = 0x0 << B2R2_S2TY_RGB_EXPANSION_SHIFT,
+ B2R2_S2TY_RGB_EXPANSION_LSP_ZERO = 0x1 << B2R2_S2TY_RGB_EXPANSION_SHIFT,
+};
+
+/* B2R2_S1TY register values */
+#define B2R2_S3TY_BLANK_ACC_SHIFT 26
+enum b2r2_s3ty {
+ /* Enables "blank" access on this source (nothing will be fetched from
+ memory) */
+ B2R2_S3TY_ENABLE_BLANK_ACCESS = 0x1 << B2R2_S3TY_BLANK_ACC_SHIFT,
+};
+
+/* B2R2_<S or T>XY register values */
+#define B2R2_XY_X_SHIFT 0
+#define B2R2_XY_Y_SHIFT 16
+
+/* B2R2_<S or T>SZ register values */
+#define B2R2_SZ_WIDTH_SHIFT 0
+#define B2R2_SZ_HEIGHT_SHIFT 16
+
+/* Clip window offset (top left coordinates) */
+#define B2R2_CWO_X_SHIFT 0
+#define B2R2_CWO_Y_SHIFT 16
+
+/* Clip window stop (bottom right coordinates) */
+#define B2R2_CWS_X_SHIFT 0
+#define B2R2_CWS_Y_SHIFT 16
+
+/* Color look-up table */
+enum b2r2_cco {
+ B2R2_CCO_CLUT_COLOR_CORRECTION = (1 << 16),
+ B2R2_CCO_CLUT_UPDATE = (1 << 18),
+ B2R2_CCO_CLUT_ON_S1 = (1 << 15)
+};
+
+/* Filter control (2D resize control) */
+enum b2r2_fctl {
+ /* Horizontal 2D filter mode */
+ B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER = BIT(0),
+ B2R2_FCTL_HF2D_MODE_ENABLE_ALPHA_CHANNEL_FILTER = BIT(1),
+ B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER = BIT(2),
+
+ /* Vertical 2D filter mode */
+ B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER = BIT(4),
+ B2R2_FCTL_VF2D_MODE_ENABLE_ALPHA_CHANNEL_FILTER = BIT(5),
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER = BIT(6),
+
+ /* Alpha borders */
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_RIGHT = BIT(12),
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_LEFT = BIT(13),
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_BOTTOM = BIT(14),
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_TOP = BIT(15),
+
+ /* Luma path horizontal 2D filter mode */
+ B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER = BIT(24),
+ B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER = BIT(25),
+
+ /* Luma path vertical 2D filter mode */
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER = BIT(28),
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER = BIT(29),
+};
+
+/* Resize scaling factor */
+#define B2R2_RSF_HSRC_INC_SHIFT 0
+#define B2R2_RSF_VSRC_INC_SHIFT 16
+
+/* Resizer initialization */
+#define B2R2_RZI_HSRC_INIT_SHIFT 0
+#define B2R2_RZI_HNB_REPEAT_SHIFT 12
+#define B2R2_RZI_VSRC_INIT_SHIFT 16
+#define B2R2_RZI_VNB_REPEAT_SHIFT 28
+
+/* Default values for the resizer */
+#define B2R2_RZI_DEFAULT_HNB_REPEAT (3 << B2R2_RZI_HNB_REPEAT_SHIFT)
+#define B2R2_RZI_DEFAULT_VNB_REPEAT (3 << B2R2_RZI_VNB_REPEAT_SHIFT)
+
+
+/* Bus plug configuration registers */
+enum b2r2_plug_opcode_size {
+ B2R2_PLUG_OPCODE_SIZE_8 = 0x3,
+ B2R2_PLUG_OPCODE_SIZE_16 = 0x4,
+ B2R2_PLUG_OPCODE_SIZE_32 = 0x5,
+ B2R2_PLUG_OPCODE_SIZE_64 = 0x6,
+};
+
+enum b2r2_plug_chunk_size {
+ B2R2_PLUG_CHUNK_SIZE_1 = 0x0,
+ B2R2_PLUG_CHUNK_SIZE_2 = 0x1,
+ B2R2_PLUG_CHUNK_SIZE_4 = 0x2,
+ B2R2_PLUG_CHUNK_SIZE_8 = 0x3,
+ B2R2_PLUG_CHUNK_SIZE_16 = 0x4,
+ B2R2_PLUG_CHUNK_SIZE_32 = 0x5,
+ B2R2_PLUG_CHUNK_SIZE_64 = 0x6,
+ B2R2_PLUG_CHUNK_SIZE_128 = 0x7,
+};
+
+enum b2r2_plug_message_size {
+ B2R2_PLUG_MESSAGE_SIZE_1 = 0x0,
+ B2R2_PLUG_MESSAGE_SIZE_2 = 0x1,
+ B2R2_PLUG_MESSAGE_SIZE_4 = 0x2,
+ B2R2_PLUG_MESSAGE_SIZE_8 = 0x3,
+ B2R2_PLUG_MESSAGE_SIZE_16 = 0x4,
+ B2R2_PLUG_MESSAGE_SIZE_32 = 0x5,
+ B2R2_PLUG_MESSAGE_SIZE_64 = 0x6,
+ B2R2_PLUG_MESSAGE_SIZE_128 = 0x7,
+};
+
+enum b2r2_plug_page_size {
+ B2R2_PLUG_PAGE_SIZE_64 = 0x0,
+ B2R2_PLUG_PAGE_SIZE_128 = 0x1,
+ B2R2_PLUG_PAGE_SIZE_256 = 0x2,
+};
+
+/* Default opcode size */
+#if defined(CONFIG_B2R2_OPSIZE_8)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_8
+#elif defined(CONFIG_B2R2_OPSIZE_16)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_16
+#elif defined(CONFIG_B2R2_OPSIZE_32)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_32
+#elif defined(CONFIG_B2R2_OPSIZE_64)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_64
+#else
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT 0
+#endif
+
+/* Default chunk size */
+#if defined(CONFIG_B2R2_CHSIZE_1)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_1
+#elif defined(CONFIG_B2R2_CHSIZE_2)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_2
+#elif defined(CONFIG_B2R2_CHSIZE_4)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_4
+#elif defined(CONFIG_B2R2_CHSIZE_8)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_8
+#elif defined(CONFIG_B2R2_CHSIZE_16)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_16
+#elif defined(CONFIG_B2R2_CHSIZE_32)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_32
+#elif defined(CONFIG_B2R2_CHSIZE_64)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_64
+#elif defined(CONFIG_B2R2_CHSIZE_128)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_128
+#else
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT 0
+#endif
+
+/* Default message size */
+#if defined(CONFIG_B2R2_MGSIZE_1)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_1
+#elif defined(CONFIG_B2R2_MGSIZE_2)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_2
+#elif defined(CONFIG_B2R2_MGSIZE_4)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_4
+#elif defined(CONFIG_B2R2_MGSIZE_8)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_8
+#elif defined(CONFIG_B2R2_MGSIZE_16)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_16
+#elif defined(CONFIG_B2R2_MGSIZE_32)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_32
+#elif defined(CONFIG_B2R2_MGSIZE_64)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_64
+#elif defined(CONFIG_B2R2_MGSIZE_128)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_128
+#else
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT 0
+#endif
+
+/* Default page size */
+#if defined(CONFIG_B2R2_PGSIZE_64)
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_64
+#elif defined(CONFIG_B2R2_PGSIZE_128)
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_128
+#elif defined(CONFIG_B2R2_PGSIZE_256)
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_256
+#else
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT 0
+#endif
+
+/* VMX register values for RGB to YUV color conversion */
+/* Magic numbers from 27.11 in DB8500_DesignSpecification_v2.5.pdf */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_RGB_TO_YUV_601_VIDEO 0x107e4beb
+#define B2R2_VMX1_RGB_TO_YUV_601_VIDEO 0x0982581d
+#define B2R2_VMX2_RGB_TO_YUV_601_VIDEO 0xfa9ea483
+#define B2R2_VMX3_RGB_TO_YUV_601_VIDEO 0x08000080
+
+/* 601 Gfx Matrix (full range conversion) */
+#define B2R2_VMX0_RGB_TO_YUV_601_GFX 0x0e1e8bee
+#define B2R2_VMX1_RGB_TO_YUV_601_GFX 0x08420419
+#define B2R2_VMX2_RGB_TO_YUV_601_GFX 0xfb5ed471
+#define B2R2_VMX3_RGB_TO_YUV_601_GFX 0x08004080
+
+/* 709 Video Matrix (standard 709 conversion) */
+#define B2R2_VMX0_RGB_TO_YUV_709_VIDEO 0x107e27f4
+#define B2R2_VMX1_RGB_TO_YUV_709_VIDEO 0x06e2dc13
+#define B2R2_VMX2_RGB_TO_YUV_709_VIDEO 0xfc5e6c83
+#define B2R2_VMX3_RGB_TO_YUV_709_VIDEO 0x08000080
+
+/* 709 Gfx Matrix (standard 709 conversion) */
+#define B2R2_VMX0_RGB_TO_YUV_709_GFX 0x0e3e6bf5
+#define B2R2_VMX1_RGB_TO_YUV_709_GFX 0x05e27410
+#define B2R2_VMX2_RGB_TO_YUV_709_GFX 0xfcdea471
+#define B2R2_VMX3_RGB_TO_YUV_709_GFX 0x08004080
+
+/* VMX register values for YUV to RGB color conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_YUV_TO_RGB_601_VIDEO 0x2c440000
+#define B2R2_VMX1_YUV_TO_RGB_601_VIDEO 0xe9a403aa
+#define B2R2_VMX2_YUV_TO_RGB_601_VIDEO 0x0004013f
+#define B2R2_VMX3_YUV_TO_RGB_601_VIDEO 0x34f21322
+
+/* 601 Gfx Matrix (full range conversion) */
+#define B2R2_VMX0_YUV_TO_RGB_601_GFX 0x3324a800
+#define B2R2_VMX1_YUV_TO_RGB_601_GFX 0xe604ab9c
+#define B2R2_VMX2_YUV_TO_RGB_601_GFX 0x0004a957
+#define B2R2_VMX3_YUV_TO_RGB_601_GFX 0x32121eeb
+
+/* 709 Video Matrix (standard 709 conversion) */
+#define B2R2_VMX0_YUV_TO_RGB_709_VIDEO 0x31440000
+#define B2R2_VMX1_YUV_TO_RGB_709_VIDEO 0xf16403d1
+#define B2R2_VMX2_YUV_TO_RGB_709_VIDEO 0x00040145
+#define B2R2_VMX3_YUV_TO_RGB_709_VIDEO 0x33b14b18
+
+/* 709 Gfx Matrix (standard 709 conversion) */
+#define B2R2_VMX0_YUV_TO_RGB_709_GFX 0x3964a800
+#define B2R2_VMX1_YUV_TO_RGB_709_GFX 0xef04abc9
+#define B2R2_VMX2_YUV_TO_RGB_709_GFX 0x0004a95f
+#define B2R2_VMX3_YUV_TO_RGB_709_GFX 0x307132df
+
+/* VMX register values for RGB to BGR conversion */
+#define B2R2_VMX0_RGB_TO_BGR 0x00000100
+#define B2R2_VMX1_RGB_TO_BGR 0x00040000
+#define B2R2_VMX2_RGB_TO_BGR 0x20000000
+#define B2R2_VMX3_RGB_TO_BGR 0x00000000
+
+/* VMX register values for BGR to YUV color conversion */
+/* Note: All BGR -> YUV values are calculated by multiplying
+ * the RGB -> YUV matrices [A], with [S] to form [A]x[S] where
+ * |0 0 1|
+ * S = |0 1 0|
+ * |1 0 0|
+ * Essentially swapping first and third columns in
+ * the matrices (VMX0, VMX1 and VMX2 values).
+ * The offset vector VMX3 remains untouched.
+ * Put another way, the value of bits 0 through 9
+ * is swapped with the value of
+ * bits 20 through 31 in VMX0, VMX1 and VMX2,
+ * taking into consideration the compression
+ * that is used on bits 0 through 9. Bit 0 being LSB.
+ */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_BGR_TO_YUV_601_VIDEO 0xfd7e4883
+#define B2R2_VMX1_BGR_TO_YUV_601_VIDEO 0x03a2584c
+#define B2R2_VMX2_BGR_TO_YUV_601_VIDEO 0x107ea7d4
+#define B2R2_VMX3_BGR_TO_YUV_601_VIDEO 0x08000080
+
+/* 601 Gfx Matrix (full range conversion) */
+#define B2R2_VMX0_BGR_TO_YUV_601_GFX 0xfdde8870
+#define B2R2_VMX1_BGR_TO_YUV_601_GFX 0x03220442
+#define B2R2_VMX2_BGR_TO_YUV_601_GFX 0x0e3ed7da
+#define B2R2_VMX3_BGR_TO_YUV_601_GFX 0x08004080
+
+/* 709 Video Matrix (standard 709 conversion) */
+#define B2R2_VMX0_BGR_TO_YUV_709_VIDEO 0xfe9e2483
+#define B2R2_VMX1_BGR_TO_YUV_709_VIDEO 0x0262dc37
+#define B2R2_VMX2_BGR_TO_YUV_709_VIDEO 0x107e6fe2
+#define B2R2_VMX3_BGR_TO_YUV_709_VIDEO 0x08000080
+
+/* 709 Gfx Matrix (standard 709 conversion) */
+#define B2R2_VMX0_BGR_TO_YUV_709_GFX 0xfebe6871
+#define B2R2_VMX1_BGR_TO_YUV_709_GFX 0x0202742f
+#define B2R2_VMX2_BGR_TO_YUV_709_GFX 0x0e3ea7e6
+#define B2R2_VMX3_BGR_TO_YUV_709_GFX 0x08004080
+
+
+/* VMX register values for YUV to BGR conversion */
+/* Note: All YUV -> BGR values are constructed
+ * from the YUV -> RGB ones, by swapping
+ * first and third rows in the matrix
+ * (VMX0 and VMX2 values). Further, the first and
+ * third values in the offset vector need to be
+ * swapped as well, i.e. bits 0 through 9 are swapped
+ * with bits 20 through 29 in the VMX3 value.
+ * Bit 0 being LSB.
+ */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_YUV_TO_BGR_601_VIDEO 0x0004013f
+#define B2R2_VMX1_YUV_TO_BGR_601_VIDEO 0xe9a403aa
+#define B2R2_VMX2_YUV_TO_BGR_601_VIDEO 0x2c440000
+#define B2R2_VMX3_YUV_TO_BGR_601_VIDEO 0x3222134f
+
+/* 601 Gfx Matrix (full range conversion) */
+#define B2R2_VMX0_YUV_TO_BGR_601_GFX 0x0004a957
+#define B2R2_VMX1_YUV_TO_BGR_601_GFX 0xe604ab9c
+#define B2R2_VMX2_YUV_TO_BGR_601_GFX 0x3324a800
+#define B2R2_VMX3_YUV_TO_BGR_601_GFX 0x2eb21f21
+
+/* 709 Video Matrix (standard 709 conversion) */
+#define B2R2_VMX0_YUV_TO_BGR_709_VIDEO 0x00040145
+#define B2R2_VMX1_YUV_TO_BGR_709_VIDEO 0xf16403d1
+#define B2R2_VMX2_YUV_TO_BGR_709_VIDEO 0x31440000
+#define B2R2_VMX3_YUV_TO_BGR_709_VIDEO 0x31814b3b
+
+/* 709 Gfx Matrix (standard 709 conversion) */
+#define B2R2_VMX0_YUV_TO_BGR_709_GFX 0x0004a95f
+#define B2R2_VMX1_YUV_TO_BGR_709_GFX 0xef04abc9
+#define B2R2_VMX2_YUV_TO_BGR_709_GFX 0x3964a800
+#define B2R2_VMX3_YUV_TO_BGR_709_GFX 0x2df13307
+
+
+/* VMX register values for YVU to RGB conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_YVU_TO_RGB_601_VIDEO 0x00040120
+#define B2R2_VMX1_YVU_TO_RGB_601_VIDEO 0xF544034D
+#define B2R2_VMX2_YVU_TO_RGB_601_VIDEO 0x37840000
+#define B2R2_VMX3_YVU_TO_RGB_601_VIDEO 0x34f21322
+
+/* VMX register values for RGB to YVU conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_RGB_TO_YVU_601_VIDEO 0xfa9ea483
+#define B2R2_VMX1_RGB_TO_YVU_601_VIDEO 0x0982581d
+#define B2R2_VMX2_RGB_TO_YVU_601_VIDEO 0x107e4beb
+#define B2R2_VMX3_RGB_TO_YVU_601_VIDEO 0x08000080
+
+/* VMX register values for YVU to BGR conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_YVU_TO_BGR_601_VIDEO 0x37840000
+#define B2R2_VMX1_YVU_TO_BGR_601_VIDEO 0xF544034D
+#define B2R2_VMX2_YVU_TO_BGR_601_VIDEO 0x00040120
+#define B2R2_VMX3_YVU_TO_BGR_601_VIDEO 0x3222134F
+
+/* VMX register values for BGR to YVU conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_BGR_TO_YVU_601_VIDEO 0x107ea7d4
+#define B2R2_VMX1_BGR_TO_YVU_601_VIDEO 0x03a2584c
+#define B2R2_VMX2_BGR_TO_YVU_601_VIDEO 0xfd7e4883
+#define B2R2_VMX3_BGR_TO_YVU_601_VIDEO 0x08000080
+
+/* VMX register values for YVU to YUV conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+/* Internally, the components are in fact stored
+ * with luma in the middle, i.e. UYV, which is why
+ * the values are just like for RGB->BGR conversion.
+ */
+#define B2R2_VMX0_YVU_TO_YUV_601_VIDEO 0x00000100
+#define B2R2_VMX1_YVU_TO_YUV_601_VIDEO 0x00040000
+#define B2R2_VMX2_YVU_TO_YUV_601_VIDEO 0x20000000
+#define B2R2_VMX3_YVU_TO_YUV_601_VIDEO 0x00000000
+
+/* VMX register values for RGB to BLT_YUV888 conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+/*
+ * BLT_YUV888 has color components laid out in memory as V, U, Y, (Alpha)
+ * with V at the first byte (due to little endian addressing).
+ * B2R2 expects them to be as U, Y, V, (A)
+ * with U at the first byte.
+ * Note: RGB -> BLT_YUV888 values are calculated by multiplying
+ * the RGB -> YUV matrix [A], with [S] to form [S]x[A] where
+ * |0 1 0|
+ * S = |0 0 1|
+ * |1 0 0|
+ * Essentially changing the order of rows in the original
+ * matrix [A].
+ * row1 -> row3
+ * row2 -> row1
+ * row3 -> row2
+ * Values in the offset vector are swapped in the same manner.
+ */
+#define B2R2_VMX0_RGB_TO_BLT_YUV888_601_VIDEO 0x0982581d
+#define B2R2_VMX1_RGB_TO_BLT_YUV888_601_VIDEO 0xfa9ea483
+#define B2R2_VMX2_RGB_TO_BLT_YUV888_601_VIDEO 0x107e4beb
+#define B2R2_VMX3_RGB_TO_BLT_YUV888_601_VIDEO 0x00020080
+
+/* VMX register values for BLT_YUV888 to RGB conversion */
+
+/*
+ * Note: BLT_YUV888 -> RGB values are calculated by multiplying
+ * the YUV -> RGB matrix [A], with [S] to form [A]x[S] where
+ * |0 0 1|
+ * S = |1 0 0|
+ * |0 1 0|
+ * Essentially changing the order of columns in the original
+ * matrix [A].
+ * col1 -> col3
+ * col2 -> col1
+ * col3 -> col2
+ * Values in the offset vector remain unchanged.
+ */
+#define B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO 0x20000121
+#define B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO 0x201ea74c
+#define B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO 0x2006f000
+#define B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO 0x34f21322
+
+/* VMX register values for YUV to BLT_YUV888 conversion */
+#define B2R2_VMX0_YUV_TO_BLT_YUV888 0x00040000
+#define B2R2_VMX1_YUV_TO_BLT_YUV888 0x00000100
+#define B2R2_VMX2_YUV_TO_BLT_YUV888 0x20000000
+#define B2R2_VMX3_YUV_TO_BLT_YUV888 0x00000000
+
+/* VMX register values for BLT_YUV888 to YUV conversion */
+#define B2R2_VMX0_BLT_YUV888_TO_YUV 0x00000100
+#define B2R2_VMX1_BLT_YUV888_TO_YUV 0x20000000
+#define B2R2_VMX2_BLT_YUV888_TO_YUV 0x00040000
+#define B2R2_VMX3_BLT_YUV888_TO_YUV 0x00000000
+
+/* VMX register values for YVU to BLT_YUV888 conversion */
+#define B2R2_VMX0_YVU_TO_BLT_YUV888 0x00040000
+#define B2R2_VMX1_YVU_TO_BLT_YUV888 0x20000000
+#define B2R2_VMX2_YVU_TO_BLT_YUV888 0x00000100
+#define B2R2_VMX3_YVU_TO_BLT_YUV888 0x00000000
+
+/* VMX register values for BLT_YUV888 to YVU conversion */
+#define B2R2_VMX0_BLT_YUV888_TO_YVU 0x00040000
+#define B2R2_VMX1_BLT_YUV888_TO_YVU 0x20000000
+#define B2R2_VMX2_BLT_YUV888_TO_YVU 0x00000100
+#define B2R2_VMX3_BLT_YUV888_TO_YVU 0x00000000
+
+#endif /* B2R2_HW_H__ */
diff --git a/drivers/video/b2r2/b2r2_input_validation.c b/drivers/video/b2r2/b2r2_input_validation.c
new file mode 100644
index 00000000000..0de714f1161
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_input_validation.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include "b2r2_input_validation.h"
+
+#include "b2r2_debug.h"
+#include "b2r2_utils.h"
+
+#include <video/b2r2_blt.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+
+static bool is_valid_format(enum b2r2_blt_fmt fmt);
+
+static bool is_valid_pitch_for_fmt(u32 pitch, s32 width,
+ enum b2r2_blt_fmt fmt);
+
+static bool is_aligned_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt);
+static s32 width_2_complete_width(s32 width, enum b2r2_blt_fmt fmt);
+static bool is_complete_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt);
+static bool is_valid_height_for_fmt(s32 height, enum b2r2_blt_fmt fmt);
+
+static bool validate_img(struct b2r2_blt_img *img);
+static bool validate_rect(struct b2r2_blt_rect *rect);
+
+
+static bool is_valid_format(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+
+static bool is_valid_pitch_for_fmt(u32 pitch, s32 width, enum b2r2_blt_fmt fmt)
+{
+ s32 complete_width;
+ u32 pitch_derived_from_width;
+
+ complete_width = width_2_complete_width(width, fmt);
+
+ pitch_derived_from_width =
+ b2r2_calc_pitch_from_width(complete_width, fmt);
+
+ if (pitch < pitch_derived_from_width)
+ return false;
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ if (!b2r2_is_aligned(pitch, 2))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (!b2r2_is_aligned(pitch, 4))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+
+static bool is_aligned_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ if (!b2r2_is_aligned(width, 4))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_1_BIT_A1:
+ if (!b2r2_is_aligned(width, 8))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ if (!b2r2_is_aligned(width, 2))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static s32 width_2_complete_width(s32 width, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return b2r2_align_up(width, 2);
+
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return b2r2_align_up(width, 16);
+
+ default:
+ return width;
+ }
+}
+
+static bool is_complete_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ if (!b2r2_is_aligned(width, 2))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ if (!b2r2_is_aligned(width, 16))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool is_valid_height_for_fmt(s32 height, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ if (!b2r2_is_aligned(height, 2))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ if (!b2r2_is_aligned(height, 16))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool validate_img(struct b2r2_blt_img *img)
+{
+ /*
+ * So that we always can do width * height * bpp without overflowing a
+ * 32 bit signed integer. isqrt(s32_max / max_bpp) was used to
+ * calculate the value.
+ */
+ static const s32 max_img_width_height = 8191;
+
+ s32 img_size;
+
+ if (!is_valid_format(img->fmt)) {
+ b2r2_log_info("Validation Error: !is_valid_format(img->fmt)\n");
+ return false;
+ }
+
+ if (img->width < 0 || img->width > max_img_width_height ||
+ img->height < 0 || img->height > max_img_width_height) {
+ b2r2_log_info("Validation Error: "
+ "img->width < 0 || "
+ "img->width > max_img_width_height || "
+ "img->height < 0 || "
+ "img->height > max_img_width_height\n");
+ return false;
+ }
+
+ if (b2r2_is_mb_fmt(img->fmt)) {
+ if (!is_complete_width_for_fmt(img->width, img->fmt)) {
+ b2r2_log_info("Validation Error: "
+ "!is_complete_width_for_fmt(img->width,"
+ " img->fmt)\n");
+ return false;
+ }
+ } else {
+ if (0 == img->pitch &&
+ (!is_aligned_width_for_fmt(img->width, img->fmt) ||
+ !is_complete_width_for_fmt(img->width, img->fmt))) {
+ b2r2_log_info("Validation Error: "
+ "0 == img->pitch && "
+ "(!is_aligned_width_for_fmt(img->width,"
+ " img->fmt) || "
+ "!is_complete_width_for_fmt(img->width,"
+ " img->fmt))\n");
+ return false;
+ }
+
+ if (img->pitch != 0 &&
+ !is_valid_pitch_for_fmt(img->pitch, img->width,
+ img->fmt)) {
+ b2r2_log_info("Validation Error: "
+ "img->pitch != 0 && "
+ "!is_valid_pitch_for_fmt(img->pitch, "
+ "img->width, img->fmt)\n");
+ return false;
+ }
+ }
+
+ if (!is_valid_height_for_fmt(img->width, img->fmt)) {
+ b2r2_log_info("Validation Error: "
+ "!is_valid_height_for_fmt(img->width, "
+ "img->fmt)\n");
+ return false;
+ }
+
+ img_size = b2r2_get_img_size(img);
+
+ /*
+ * To keep the entire image inside s32 range.
+ */
+ if ((B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == img->buf.type ||
+ B2R2_BLT_PTR_FD_OFFSET == img->buf.type) &&
+ img->buf.offset > (u32)b2r2_s32_max - (u32)img_size) {
+ b2r2_log_info("Validation Error: "
+ "(B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == "
+ "img->buf.type || B2R2_BLT_PTR_FD_OFFSET == "
+ "img->buf.type) && img->buf.offset > "
+ "(u32)B2R2_MAX_S32 - (u32)img_size\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool validate_rect(struct b2r2_blt_rect *rect)
+{
+ if (rect->width < 0 || rect->height < 0) {
+ b2r2_log_info("Validation Error: "
+ "rect->width < 0 || rect->height < 0\n");
+ return false;
+ }
+
+ return true;
+}
+
+bool b2r2_validate_user_req(struct b2r2_blt_req *req)
+{
+ bool is_src_img_used;
+ bool is_src_mask_used;
+ bool is_dst_clip_rect_used;
+
+ if (req->size != sizeof(struct b2r2_blt_req)) {
+ b2r2_log_err("Validation Error: "
+ "req->size != sizeof(struct b2r2_blt_req)\n");
+ return false;
+ }
+
+ is_src_img_used = !(req->flags & B2R2_BLT_FLAG_SOURCE_FILL ||
+ req->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW);
+ is_src_mask_used = req->flags & B2R2_BLT_FLAG_SOURCE_MASK;
+ is_dst_clip_rect_used = req->flags & B2R2_BLT_FLAG_DESTINATION_CLIP;
+
+ if (is_src_img_used || is_src_mask_used) {
+ if (!validate_rect(&req->src_rect)) {
+ b2r2_log_info("Validation Error: "
+ "!validate_rect(&req->src_rect)\n");
+ return false;
+ }
+ }
+
+ if (!validate_rect(&req->dst_rect)) {
+ b2r2_log_info("Validation Error: "
+ "!validate_rect(&req->dst_rect)\n");
+ return false;
+ }
+
+ if (is_dst_clip_rect_used) {
+ if (!validate_rect(&req->dst_clip_rect)) {
+ b2r2_log_info("Validation Error: "
+ "!validate_rect(&req->dst_clip_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_src_img_used) {
+ struct b2r2_blt_rect src_img_bounding_rect;
+
+ if (!validate_img(&req->src_img)) {
+ b2r2_log_info("Validation Error: "
+ "!validate_img(&req->src_img)\n");
+ return false;
+ }
+
+ b2r2_get_img_bounding_rect(&req->src_img,
+ &src_img_bounding_rect);
+ if (!b2r2_is_rect_inside_rect(&req->src_rect,
+ &src_img_bounding_rect)) {
+ b2r2_log_info("Validation Error: "
+ "!b2r2_is_rect_inside_rect(&req->src_rect, "
+ "&src_img_bounding_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_src_mask_used) {
+ struct b2r2_blt_rect src_mask_bounding_rect;
+
+ if (!validate_img(&req->src_mask)) {
+ b2r2_log_info("Validation Error: "
+ "!validate_img(&req->src_mask)\n");
+ return false;
+ }
+
+ b2r2_get_img_bounding_rect(&req->src_mask,
+ &src_mask_bounding_rect);
+ if (!b2r2_is_rect_inside_rect(&req->src_rect,
+ &src_mask_bounding_rect)) {
+ b2r2_log_info("Validation Error: "
+ "!b2r2_is_rect_inside_rect(&req->src_rect, "
+ "&src_mask_bounding_rect)\n");
+ return false;
+ }
+ }
+
+ if (!validate_img(&req->dst_img)) {
+ b2r2_log_info("Validation Error: "
+ "!validate_img(&req->dst_img)\n");
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/video/b2r2/b2r2_input_validation.h b/drivers/video/b2r2/b2r2_input_validation.h
new file mode 100644
index 00000000000..9a736343e06
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_input_validation.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_INPUT_VALIDATION_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_INPUT_VALIDATION_H_
+
+#include <video/b2r2_blt.h>
+
+bool b2r2_validate_user_req(struct b2r2_blt_req *req);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_internal.h b/drivers/video/b2r2/b2r2_internal.h
new file mode 100644
index 00000000000..18556e7193a
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_internal.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 internal definitions
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_INTERNAL_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_INTERNAL_H_
+
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_core.h"
+#include "b2r2_global.h"
+
+#include "b2r2_hw.h"
+
+/* The maximum possible number of temporary buffers needed */
+#define MAX_TMP_BUFS_NEEDED 2
+
+/* Size of the color look-up table */
+#define CLUT_SIZE 1024
+
+/**
+ * b2r2_blt_device() - Returns the device associated with B2R2 BLT.
+ * Mainly for debugging with dev_... functions.
+ *
+ * Returns the device pointer or NULL
+ */
+struct device *b2r2_blt_device(void);
+
+/**
+ * struct b2r2_blt_instance - Represents the B2R2 instance (one per open)
+ *
+ * @lock: Lock to protect the instance
+ *
+ * @report_list: Ready requests that should be reported,
+ * @report_list_waitq: Wait queue for report list
+ * @no_of_active_requests: Number of requests added but not reported
+ * in callback.
+ * @synching: true if any client is waiting for b2r2_blt_synch(0)
+ * @synch_done_waitq: Wait queue to handle synching on request_id 0
+ */
+struct b2r2_blt_instance {
+ struct mutex lock;
+
+ /* Requests to be reported */
+ struct list_head report_list;
+ wait_queue_head_t report_list_waitq;
+
+ /* Below for synching */
+ u32 no_of_active_requests;
+ bool synching;
+ wait_queue_head_t synch_done_waitq;
+};
+
+/**
+ * struct b2r2_node - Represents a B2R2 node with reqister values, executed
+ * by B2R2. Should be allocated non-cached.
+ *
+ * @next: Next node
+ * @physical_address: Physical address to be given to B2R2
+ * (physical address of "node" member below)
+ * @node: The B2R2 node with register settings. This is the data
+ * that B2R2 will use.
+ *
+ */
+struct b2r2_node {
+ struct b2r2_node *next;
+ u32 physical_address;
+
+ int src_tmp_index;
+ int dst_tmp_index;
+
+ int src_index;
+
+ /* B2R2 regs comes here */
+ struct b2r2_link_list node;
+};
+
+/**
+ * struct b2r2_resolved_buf - Contains calculated information about
+ * image buffers.
+ *
+ * @physical_address: Physical address of the buffer
+ * @virtual_address: Virtual address of the buffer
+ * @is_pmem: true if buffer is from pmem
+ * @hwmem_session: Hwmem session
+ * @hwmem_alloc: Hwmem alloc
+ * @filep: File pointer of mapped file (like pmem device, frame buffer device)
+ * @file_physical_start: Physical address of file start
+ * @file_virtual_start: Virtual address of file start
+ * @file_len: File len
+ *
+ */
+struct b2r2_resolved_buf {
+ u32 physical_address;
+ void *virtual_address;
+ bool is_pmem;
+ struct hwmem_alloc *hwmem_alloc;
+ /* Data for validation below */
+ struct file *filep;
+ u32 file_physical_start;
+ u32 file_virtual_start;
+ u32 file_len;
+};
+
+
+/**
+ * b2r2_work_buf - specification for a temporary work buffer
+ *
+ * @size - the size of the buffer (set by b2r2_node_split)
+ * @phys_addr - the physical address of the buffer (set by b2r2_blt_main)
+ */
+struct b2r2_work_buf {
+ u32 size;
+ u32 phys_addr;
+ void *virt_addr;
+ u32 mem_handle;
+};
+
+
+/**
+ * b2r2_op_type - the type of B2R2 operation to configure
+ */
+enum b2r2_op_type {
+ B2R2_DIRECT_COPY,
+ B2R2_DIRECT_FILL,
+ B2R2_COPY,
+ B2R2_FILL,
+ B2R2_SCALE,
+ B2R2_ROTATE,
+ B2R2_SCALE_AND_ROTATE,
+ B2R2_FLIP,
+};
+
+/**
+ * b2r2_fmt_type - the type of buffer for a given format
+ */
+enum b2r2_fmt_type {
+ B2R2_FMT_TYPE_RASTER,
+ B2R2_FMT_TYPE_SEMI_PLANAR,
+ B2R2_FMT_TYPE_PLANAR,
+};
+
+/**
+ * b2r2_fmt_conv - the type of format conversion to do
+ */
+enum b2r2_fmt_conv {
+ B2R2_FMT_CONV_NONE,
+ B2R2_FMT_CONV_RGB_TO_YUV,
+ B2R2_FMT_CONV_YUV_TO_RGB,
+ B2R2_FMT_CONV_YUV_TO_YUV,
+ B2R2_FMT_CONV_RGB_TO_BGR,
+ B2R2_FMT_CONV_BGR_TO_RGB,
+ B2R2_FMT_CONV_YUV_TO_BGR,
+ B2R2_FMT_CONV_BGR_TO_YUV,
+};
+
+/**
+ * b2r2_node_split_buf - information about a source or destination buffer
+ *
+ * @addr - the physical base address
+ * @chroma_addr - the physical address of the chroma plane
+ * @chroma_cr_addr - the physical address of the Cr chroma plane
+ * @fmt - the buffer format
+ * @fmt_type - the buffer format type
+ * @rect - the rectangle of the buffer to use
+ * @color - the color value to use is case of a fill operation
+ * @pitch - the pixmap byte pitch
+ * @height - the pixmap height
+ * @alpha_range - the alpha range of the buffer (0-128 or 0-255)
+ * @hso - the horizontal scan order
+ * @vso - the vertical scan order
+ * @endian - the endianess of the buffer
+ * @plane_selection - the plane to write if buffer is planar or semi-planar
+ */
+struct b2r2_node_split_buf {
+ u32 addr;
+ u32 chroma_addr;
+ u32 chroma_cr_addr;
+
+ enum b2r2_blt_fmt fmt;
+ enum b2r2_fmt_type type;
+
+ struct b2r2_blt_rect rect;
+ struct b2r2_blt_rect win;
+
+ s32 dx;
+ s32 dy;
+
+ u32 color;
+ u16 pitch;
+ u16 width;
+ u16 height;
+
+ enum b2r2_ty alpha_range;
+ enum b2r2_ty hso;
+ enum b2r2_ty vso;
+ enum b2r2_ty endian;
+ enum b2r2_tty dither;
+
+ /* Plane selection (used when writing to a multibuffer format) */
+ enum b2r2_tty plane_selection;
+
+ /* Chroma plane selection (used when writing planar formats) */
+ enum b2r2_tty chroma_selection;
+
+ int tmp_buf_index;
+};
+
+/**
+ * b2r2_node_split_job - an instance of a node split job
+ *
+ * @type - the type of operation
+ * @ivmx - the ivmx matrix to use for color conversion
+ * @blend - determines if blending is enabled
+ * @clip - determines if destination clipping is enabled
+ * @swap_fg_bg - determines if FG and BG should be swapped when blending
+ * @flags - the flags passed in the blt request
+ * @flag_param - parameter required by certain flags,
+ * e.g. color for source color keying.
+ * @transform - the transforms passed in the blt request
+ * @global_alpha - the global alpha
+ * @clip_rect - the clipping rectangle to use
+ * @horiz_rescale - determmines if horizontal rescaling is enabled
+ * @horiz_sf - the horizontal scale factor
+ * @vert_rescale - determines if vertical rescale is enabled
+ * @vert_sf - the vertical scale factor
+ * @src - the incoming source buffer
+ * @dst - the outgoing destination buffer
+ * @work_bufs - work buffer specifications
+ * @tmp_bufs - temporary buffers
+ * @buf_count - the number of temporary buffers used for the job
+ * @node_count - the number of nodes used for the job
+ * @max_buf_size - the maximum size of temporary buffers
+ * @nbr_rows - the number of tile rows in the blit operation
+ * @nbr_cols - the number of time columns in the blit operation
+ */
+struct b2r2_node_split_job {
+ enum b2r2_op_type type;
+
+ const u32 *ivmx;
+
+ bool blend;
+ bool clip;
+ bool rotation;
+
+ bool swap_fg_bg;
+
+ u32 flags;
+ u32 flag_param;
+ u32 transform;
+ u32 global_alpha;
+
+ struct b2r2_blt_rect clip_rect;
+
+ bool h_rescale;
+ u16 h_rsf;
+
+ bool v_rescale;
+ u16 v_rsf;
+
+ struct b2r2_node_split_buf src;
+ struct b2r2_node_split_buf dst;
+
+ struct b2r2_work_buf work_bufs[MAX_TMP_BUFS_NEEDED];
+ struct b2r2_node_split_buf tmp_bufs[MAX_TMP_BUFS_NEEDED];
+
+ u32 buf_count;
+ u32 node_count;
+ u32 max_buf_size;
+};
+
+/**
+ * struct b2r2_blt_request - Represents one B2R2 blit request
+ *
+ * @instance: Back pointer to the instance structure
+ * @list: List item to keep track of requests per instance
+ * @user_req: The request received from userspace
+ * @job: The administration structure for the B2R2 job,
+ * consisting of one or more nodes
+ * @node_split_job: The administration structure for the B2R2 node split job
+ * @first_node: Pointer to the first B2R2 node
+ * @request_id: Request id for this job
+ * @node_split_handle: Handle of the node split
+ * @src_resolved: Calculated info about the source buffer
+ * @src_mask_resolved: Calculated info about the source mask buffer
+ * @dst_resolved: Calculated info about the destination buffer
+ * @profile: True if the blit shall be profiled, false otherwise
+ */
+struct b2r2_blt_request {
+ struct b2r2_blt_instance *instance;
+ struct list_head list;
+ struct b2r2_blt_req user_req;
+ struct b2r2_core_job job;
+ struct b2r2_node_split_job node_split_job;
+ struct b2r2_node *first_node;
+ int request_id;
+
+ /* Resolved buffer addresses */
+ struct b2r2_resolved_buf src_resolved;
+ struct b2r2_resolved_buf src_mask_resolved;
+ struct b2r2_resolved_buf dst_resolved;
+
+ /* TBD: Info about SRAM usage & needs */
+ struct b2r2_work_buf *bufs;
+ u32 buf_count;
+
+ /* color look-up table */
+ void *clut;
+ u32 clut_phys_addr;
+
+ /* Profiling stuff */
+ bool profile;
+
+ s32 nsec_active_in_cpu;
+
+ u32 start_time_nsec;
+ s32 total_time_nsec;
+};
+
+/* FIXME: The functions below should be removed when we are
+ switching to the new Robert Lind allocator */
+
+/**
+ * b2r2_blt_alloc_nodes() - Allocate nodes
+ *
+ * @node_count: Number of nodes to allocate
+ *
+ * Return:
+ * Returns a pointer to the first node in the node list.
+ */
+struct b2r2_node *b2r2_blt_alloc_nodes(int node_count);
+
+/**
+ * b2r2_blt_free_nodes() - Release nodes previously allocated via
+ * b2r2_generate_nodes
+ *
+ * @first_node: First node in linked list of nodes
+ */
+void b2r2_blt_free_nodes(struct b2r2_node *first_node);
+
+/**
+ * b2r2_blt_module_init() - Initialize the B2R2 blt module
+ */
+int b2r2_blt_module_init(void);
+
+/**
+ * b2r2_blt_module_exit() - Un-initialize the B2R2 blt module
+ */
+void b2r2_blt_module_exit(void);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_kernel_if.c b/drivers/video/b2r2/b2r2_kernel_if.c
new file mode 100644
index 00000000000..373311ccca5
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_kernel_if.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 kernel interface for beeing a separate module
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#ifdef CONFIG_ANDROID_PMEM
+#include <linux/android_pmem.h>
+#endif
+#include <linux/fb.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+
+EXPORT_SYMBOL(fget_light);
+EXPORT_SYMBOL(fput_light);
+EXPORT_SYMBOL(flush_cache_range);
+EXPORT_SYMBOL(task_sched_runtime);
+#ifdef CONFIG_ANDROID_PMEM
+EXPORT_SYMBOL(get_pmem_file);
+EXPORT_SYMBOL(put_pmem_file);
+EXPORT_SYMBOL(flush_pmem_file);
+#endif
diff --git a/drivers/video/b2r2/b2r2_mem_alloc.c b/drivers/video/b2r2/b2r2_mem_alloc.c
new file mode 100644
index 00000000000..0dcd9b6a55e
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_mem_alloc.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 internal Memory allocator
+ *
+ * Author: Robert Lind <robert.lind@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#include "b2r2_internal.h"
+#include "b2r2_mem_alloc.h"
+
+
+/* Represents one block of b2r2 physical memory, free or allocated */
+struct b2r2_mem_block {
+ struct list_head list; /* For membership in list */
+
+ u32 offset; /* Offset in b2r2 physical memory area (aligned) */
+ u32 size; /* Size of the object (requested size if busy,
+ else actual) */
+ bool free; /* True if the block is free */
+
+ u32 lock_count; /* Lock count */
+
+#ifdef CONFIG_DEBUG_FS
+ char debugfs_fname[80]; /* debugfs file name */
+ struct dentry *debugfs_block; /* debugfs dir entry for the block */
+#endif
+};
+
+/* The memory heap */
+struct b2r2_mem_heap {
+ struct device *dev; /* Device pointer for memory allocation */
+ dma_addr_t start_phys_addr;/* Physical memory start address */
+ void *start_virt_ptr; /* Virtual pointer to start */
+ u32 size; /* Memory size */
+ u32 align; /* Alignment */
+
+ struct list_head blocks; /* List of all blocks */
+
+ spinlock_t heap_lock; /* Protection for the heap */
+
+ u32 node_size; /* Size of each B2R2 node */
+ struct dma_pool *node_heap;/* Heap for B2R2 node allocations */
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_sub_root_dir; /* debugfs: B2R2 MEM root dir */
+ struct dentry *debugfs_heap_stats; /* debugfs: B2R2 memory status */
+ struct dentry *debugfs_dir_blocks; /* debugfs: Free blocks dir */
+#endif
+};
+
+static struct b2r2_mem_heap *mem_heap;
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_root_dir; /* debugfs: B2R2 MEM root dir */
+#endif
+
+/* Forward declarations */
+static struct b2r2_mem_block *b2r2_mem_block_alloc(
+ u32 offset, u32 size, bool free);
+static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block);
+
+/* Align value down to specified alignment */
+static inline u32 align_down(u32 align, u32 value)
+{
+ return value & ~(align - 1);
+}
+
+/* Align value up to specified alignment */
+static inline u32 align_up(u32 align, u32 value)
+{
+ return (value + align - 1) & ~(align - 1);
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+/* About debugfs:
+ * debugfs is a mountable debug file system.
+ *
+ * Mount like this:
+ * mkdir /debug
+ * mount -t debugfs none /debug
+ * ls /debug/b2r2_mem
+ *
+ * ls -al /debug/b2r2_mem/blocks
+ * cat /debug/b2r2_mem/stats
+ */
+
+
+/* Create string containing memory heap status */
+static char *get_b2r2_mem_stats(struct b2r2_mem_heap *mem_heap, char *buf)
+{
+ struct b2r2_mem_heap_status mem_heap_status;
+
+ if (b2r2_mem_heap_status(&mem_heap_status) != 0) {
+ strcpy(buf, "Error, failed to get status\n");
+ return buf;
+ }
+
+ sprintf(buf,
+ "Handle : 0x%lX\n"
+ "Physical start address : 0x%lX\n"
+ "Size : %lu\n"
+ "Align : %lu\n"
+ "No of blocks allocated : %lu\n"
+ "Allocated size : %lu\n"
+ "No of free blocks : %lu\n"
+ "Free size : %lu\n"
+ "No of locks : %lu\n"
+ "No of locked : %lu\n"
+ "No of nodes : %lu\n",
+ (unsigned long) mem_heap,
+ (unsigned long) mem_heap_status.start_phys_addr,
+ (unsigned long) mem_heap_status.size,
+ (unsigned long) mem_heap_status.align,
+ (unsigned long) mem_heap_status.num_alloc,
+ (unsigned long) mem_heap_status.allocated_size,
+ (unsigned long) mem_heap_status.num_free,
+ (unsigned long) mem_heap_status.free_size,
+ (unsigned long) mem_heap_status.num_locks,
+ (unsigned long) mem_heap_status.num_locked,
+ (unsigned long) mem_heap_status.num_nodes);
+
+ return buf;
+}
+
+/*
+ * Print memory heap status on file
+ * (Use like "cat /debug/b2r2_mem/igram/stats")
+ */
+static int debugfs_b2r2_mem_stats_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct b2r2_mem_heap *mem_heap = filp->f_dentry->d_inode->i_private;
+ char Buf[400];
+ size_t dev_size;
+ int ret = 0;
+
+ get_b2r2_mem_stats(mem_heap, Buf);
+ dev_size = strlen(Buf);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ return ret;
+}
+
+/* debugfs file operations for the "stats" file */
+static const struct file_operations debugfs_b2r2_mem_stats_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_mem_stats_read,
+};
+
+/* read function for file in the "blocks" sub directory */
+static int debugfs_b2r2_mem_block_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct b2r2_mem_block *mem_block = filp->f_dentry->d_inode->i_private;
+ char Buf[200];
+ size_t dev_size;
+ int ret = 0;
+
+ dev_size = sprintf(Buf, "offset: %08lX %s size: %8d "
+ "lock_count: %2d\n",
+ (unsigned long) mem_block->offset,
+ mem_block->free ? "free" : "allc",
+ mem_block->size,
+ mem_block->lock_count);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ return ret;
+}
+
+/* debugfs file operations for files in the "blocks" directory */
+static const struct file_operations debugfs_b2r2_mem_block_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_mem_block_read,
+};
+
+/*
+ * Create or update the debugfs directory entry for a file in the
+ * "blocks" directory (a memory allocation)
+ */
+void debugfs_create_mem_block_entry(struct b2r2_mem_block *mem_block,
+ struct dentry *parent)
+{
+ struct timespec tm = current_kernel_time();
+ struct timespec atime = tm;
+ struct timespec mtime = tm;
+ struct timespec ctime = tm;
+
+ if (mem_block->debugfs_block) {
+ atime = mem_block->debugfs_block->d_inode->i_atime;
+ ctime = mem_block->debugfs_block->d_inode->i_ctime;
+ debugfs_remove(mem_block->debugfs_block);
+ }
+
+ /* Add the block in debugfs */
+ if (mem_block->free)
+ sprintf(mem_block->debugfs_fname, "%08lX free",
+ (unsigned long) mem_block->offset);
+ else {
+ sprintf(mem_block->debugfs_fname, "%08lX allc h:%08lX "
+ "lck:%d ",
+ (unsigned long) mem_block->offset,
+ (unsigned long) mem_block,
+ mem_block->lock_count);
+ }
+
+ mem_block->debugfs_block = debugfs_create_file(
+ mem_block->debugfs_fname,
+ 0444, parent, mem_block,
+ &debugfs_b2r2_mem_block_fops);
+ if (mem_block->debugfs_block) {
+ mem_block->debugfs_block->d_inode->i_size = mem_block->size;
+ mem_block->debugfs_block->d_inode->i_atime = atime;
+ mem_block->debugfs_block->d_inode->i_mtime = mtime;
+ mem_block->debugfs_block->d_inode->i_ctime = ctime;
+ }
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/* Module initialization function */
+int b2r2_mem_init(struct device *dev, u32 heap_size, u32 align, u32 node_size)
+{
+ struct b2r2_mem_block *mem_block;
+ u32 aligned_size;
+
+ printk(KERN_INFO "B2R2_MEM: Creating heap for size %d bytes\n",
+ (int) heap_size);
+
+ /* Align size */
+ aligned_size = align_down(align, heap_size);
+ if (aligned_size == 0)
+ return -EINVAL;
+
+ mem_heap = kcalloc(sizeof(struct b2r2_mem_heap), 1, GFP_KERNEL);
+ if (!mem_heap)
+ return -ENOMEM;
+
+ mem_heap->start_virt_ptr = dma_alloc_coherent(dev,
+ aligned_size, &(mem_heap->start_phys_addr), GFP_KERNEL);
+ if (!mem_heap->start_phys_addr || !mem_heap->start_virt_ptr) {
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize the heap */
+ mem_heap->dev = dev;
+ mem_heap->size = aligned_size;
+ mem_heap->align = align;
+
+ INIT_LIST_HEAD(&mem_heap->blocks);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Register debugfs */
+
+ debugfs_root_dir = debugfs_create_dir("b2r2_mem", NULL);
+
+ mem_heap->debugfs_sub_root_dir = debugfs_create_dir("b2r2_mem",
+ debugfs_root_dir);
+ mem_heap->debugfs_heap_stats = debugfs_create_file(
+ "stats", 0444, mem_heap->debugfs_sub_root_dir, mem_heap,
+ &debugfs_b2r2_mem_stats_fops);
+ mem_heap->debugfs_dir_blocks = debugfs_create_dir(
+ "blocks", mem_heap->debugfs_sub_root_dir);
+#endif
+
+ /* Create the first _free_ memory block */
+ mem_block = b2r2_mem_block_alloc(0, aligned_size, true);
+ if (!mem_block) {
+ dma_free_coherent(dev, aligned_size,
+ mem_heap->start_virt_ptr,
+ mem_heap->start_phys_addr);
+ kfree(mem_heap);
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Add the free block to the blocks list */
+ list_add(&mem_block->list, &mem_heap->blocks);
+
+ /* Allocate separate heap for B2R2 nodes */
+ mem_heap->node_size = node_size;
+ mem_heap->node_heap = dma_pool_create("b2r2_node_cache",
+ dev, node_size, align, 4096);
+ if (!mem_heap->node_heap) {
+ b2r2_mem_block_free(mem_block);
+ dma_free_coherent(dev, aligned_size,
+ mem_heap->start_virt_ptr,
+ mem_heap->start_phys_addr);
+ kfree(mem_heap);
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(b2r2_mem_init);
+
+/* Module exit function */
+void b2r2_mem_exit(void)
+{
+ struct list_head *ptr;
+
+ /* Free B2R2 node heap */
+ dma_pool_destroy(mem_heap->node_heap);
+
+#ifdef CONFIG_DEBUG_FS
+ /* debugfs root dir */
+ if (debugfs_root_dir) {
+ debugfs_remove_recursive(debugfs_root_dir);
+ debugfs_root_dir = NULL;
+ }
+#endif
+
+ list_for_each(ptr, &mem_heap->blocks) {
+ struct b2r2_mem_block *mem_block =
+ list_entry(ptr, struct b2r2_mem_block, list);
+
+ b2r2_mem_block_free(mem_block);
+ }
+
+ dma_free_coherent(mem_heap->dev, mem_heap->size,
+ mem_heap->start_virt_ptr,
+ mem_heap->start_phys_addr);
+ kfree(mem_heap);
+}
+EXPORT_SYMBOL(b2r2_mem_exit);
+
+/* Return status of the heap */
+int b2r2_mem_heap_status(struct b2r2_mem_heap_status *mem_heap_status)
+{
+ struct list_head *ptr;
+
+ if (!mem_heap || !mem_heap_status)
+ return -EINVAL;
+ memset(mem_heap_status, 0, sizeof(*mem_heap_status));
+
+ /* Lock the heap */
+ spin_lock(&mem_heap->heap_lock);
+
+ /* Fill in static info */
+ mem_heap_status->start_phys_addr = mem_heap->start_phys_addr;
+ mem_heap_status->size = mem_heap->size;
+ mem_heap_status->align = mem_heap->align;
+
+ list_for_each(ptr, &mem_heap->blocks) {
+ struct b2r2_mem_block *mem_block =
+ list_entry(ptr, struct b2r2_mem_block, list);
+
+ if (mem_block->free) {
+ mem_heap_status->num_free++;
+ mem_heap_status->free_size += mem_block->size;
+ } else {
+ if (mem_block->lock_count) {
+ mem_heap_status->num_locked++;
+ mem_heap_status->num_locks +=
+ mem_block->lock_count;
+ }
+ mem_heap_status->num_alloc++;
+ mem_heap_status->allocated_size += mem_block->size;
+ }
+ }
+
+ spin_unlock(&mem_heap->heap_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(b2r2_mem_heap_status);
+
+/* Internal: Allocate a housekeeping structure
+ * for an allocated or free memory block
+ */
+static struct b2r2_mem_block *b2r2_mem_block_alloc(
+ u32 offset, u32 size, bool free)
+{
+ struct b2r2_mem_block *mem_block = kmalloc(
+ sizeof(struct b2r2_mem_block), GFP_KERNEL);
+
+ if (mem_block) {
+ mem_block->offset = offset;
+ mem_block->size = size;
+ mem_block->free = free;
+ mem_block->lock_count = 0;
+
+ INIT_LIST_HEAD(&mem_block->list);
+
+#ifdef CONFIG_DEBUG_FS
+ mem_block->debugfs_block = NULL;
+ /* Add the block in debugfs */
+ debugfs_create_mem_block_entry(mem_block,
+ mem_heap->debugfs_dir_blocks);
+#endif
+ }
+
+ return mem_block;
+}
+
+/* Internal: Release housekeeping structure */
+static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block)
+{
+ if (mem_block) {
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(mem_block->debugfs_block);
+#endif
+ kfree(mem_block);
+ }
+}
+
+/* Allocate a block from the heap */
+int b2r2_mem_alloc(u32 requested_size, u32 *returned_size, u32 *mem_handle)
+{
+ int ret = 0;
+ struct list_head *ptr;
+ struct b2r2_mem_block *found_mem_block = NULL;
+ u32 aligned_size;
+
+ if (!mem_handle)
+ return -EINVAL;
+
+ printk(KERN_INFO "%s: size=%d\n", __func__, requested_size);
+
+ *mem_handle = 0;
+ if (!mem_heap)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&mem_heap->heap_lock);
+
+ aligned_size = align_up(mem_heap->align, requested_size);
+ /* Try to find the best matching free block of suitable size */
+ list_for_each(ptr, &mem_heap->blocks) {
+ struct b2r2_mem_block *mem_block =
+ list_entry(ptr, struct b2r2_mem_block, list);
+
+ if (mem_block->free && mem_block->size >= aligned_size &&
+ (!found_mem_block ||
+ mem_block->size < found_mem_block->size)) {
+ found_mem_block = mem_block;
+ if (found_mem_block->size == aligned_size)
+ break;
+ }
+ }
+
+ if (found_mem_block) {
+ struct b2r2_mem_block *new_block
+ = b2r2_mem_block_alloc(found_mem_block->offset,
+ requested_size, false);
+
+ if (new_block) {
+ /* Insert the new block before the found block */
+ list_add_tail(&new_block->list,
+ &found_mem_block->list);
+
+ /* Split the free block */
+ found_mem_block->offset += aligned_size;
+ found_mem_block->size -= aligned_size;
+
+ if (found_mem_block->size == 0)
+ b2r2_mem_block_free(found_mem_block);
+ else {
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_mem_block_entry(
+ found_mem_block,
+ mem_heap->debugfs_dir_blocks);
+#endif
+ }
+
+ *mem_handle = (u32) new_block;
+ *returned_size = aligned_size;
+ } else {
+ ret = -ENOMEM;
+ }
+ } else
+ ret = -ENOMEM;
+
+ if (ret != 0) {
+ *returned_size = 0;
+ *mem_handle = (u32) 0;
+ }
+
+ /* Unlock */
+ spin_unlock(&mem_heap->heap_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_mem_alloc);
+
+/* Free the allocated block */
+int b2r2_mem_free(u32 mem_handle)
+{
+ int ret = 0;
+ struct b2r2_mem_block *mem_block = (struct b2r2_mem_block *) mem_handle;
+
+ if (!mem_block)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&mem_heap->heap_lock);
+
+ if (!ret && mem_block->free)
+ ret = -EINVAL;
+
+ if (!ret) {
+ printk(KERN_INFO "%s: freeing block 0x%p\n", __func__, mem_block);
+ /* Release the block */
+
+ mem_block->free = true;
+ mem_block->size = align_up(mem_heap->align,
+ mem_block->size);
+
+ /* Join with previous block if possible */
+ if (mem_block->list.prev != &mem_heap->blocks) {
+ struct b2r2_mem_block *prev_block =
+ list_entry(mem_block->list.prev,
+ struct b2r2_mem_block, list);
+
+ if (prev_block->free &&
+ (prev_block->offset + prev_block->size) ==
+ mem_block->offset) {
+ mem_block->offset = prev_block->offset;
+ mem_block->size += prev_block->size;
+
+ b2r2_mem_block_free(prev_block);
+ }
+ }
+
+ /* Join with next block if possible */
+ if (mem_block->list.next != &mem_heap->blocks) {
+ struct b2r2_mem_block *next_block
+ = list_entry(mem_block->list.next,
+ struct b2r2_mem_block,
+ list);
+
+ if (next_block->free &&
+ (mem_block->offset + mem_block->size) ==
+ next_block->offset) {
+ mem_block->size += next_block->size;
+
+ b2r2_mem_block_free(next_block);
+ }
+ }
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_mem_block_entry(mem_block,
+ mem_heap->debugfs_dir_blocks);
+#endif
+ }
+
+ /* Unlock */
+ spin_unlock(&mem_heap->heap_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_mem_free);
+
+/* Lock the allocated block in memory */
+int b2r2_mem_lock(u32 mem_handle, u32 *phys_addr, void **virt_ptr, u32 *size)
+{
+ struct b2r2_mem_block *mem_block =
+ (struct b2r2_mem_block *) mem_handle;
+
+ if (!mem_block)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&mem_heap->heap_lock);
+
+ mem_block->lock_count++;
+
+ if (phys_addr)
+ *phys_addr = mem_heap->start_phys_addr + mem_block->offset;
+ if (virt_ptr)
+ *virt_ptr = (char *) mem_heap->start_virt_ptr +
+ mem_block->offset;
+ if (size)
+ *size = align_up(mem_heap->align, mem_block->size);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_mem_block_entry(mem_block,
+ mem_heap->debugfs_dir_blocks);
+#endif
+
+ spin_unlock(&mem_heap->heap_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(b2r2_mem_lock);
+
+/* Unlock the allocated block in memory */
+int b2r2_mem_unlock(u32 mem_handle)
+{
+ struct b2r2_mem_block *mem_block =
+ (struct b2r2_mem_block *) mem_handle;
+
+ if (!mem_block)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&mem_heap->heap_lock);
+
+ mem_block->lock_count--;
+
+ spin_unlock(&mem_heap->heap_lock);
+
+ /* debugfs will be updated in release */
+ return 0;
+/* return b2r2_mem_free(mem_handle);*/
+}
+EXPORT_SYMBOL(b2r2_mem_unlock);
+
+/* Allocate one or more b2r2 nodes from DMA pool */
+int b2r2_node_alloc(u32 num_nodes, struct b2r2_node **first_node)
+{
+ int i;
+ int ret = 0;
+ u32 physical_address;
+ struct b2r2_node *first_node_ptr;
+ struct b2r2_node *node_ptr;
+
+ /* Check input parameters */
+ if ((num_nodes <= 0) || !first_node) {
+ printk(KERN_ERR
+ "B2R2_MEM: Invalid parameter for b2r2_node_alloc, "
+ "num_nodes=%d, first_node=%ld\n",
+ (int) num_nodes, (long) first_node);
+ return -EINVAL;
+ }
+
+ /* Allocate the first node */
+ first_node_ptr = dma_pool_alloc(mem_heap->node_heap,
+ GFP_DMA | GFP_KERNEL, &physical_address);
+ if (first_node_ptr) {
+ /* Initialize first node */
+ first_node_ptr->next = NULL;
+ first_node_ptr->physical_address = physical_address +
+ offsetof(struct b2r2_node, node);
+
+ /* Allocate and initialize remaining nodes, */
+ /* and link them into a list */
+ for (i = 1, node_ptr = first_node_ptr; i < num_nodes; i++) {
+ node_ptr->next = dma_pool_alloc(mem_heap->node_heap,
+ GFP_DMA | GFP_KERNEL, &physical_address);
+ if (node_ptr->next) {
+ node_ptr = node_ptr->next;
+ node_ptr->next = NULL;
+ node_ptr->physical_address = physical_address +
+ offsetof(struct b2r2_node, node);
+ } else {
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory for node\n");
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ /* If all nodes were allocated successfully, */
+ /* return the first node */
+ if (!ret)
+ *first_node = first_node_ptr;
+ else
+ b2r2_node_free(first_node_ptr);
+ } else {
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory for node\n");
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_node_alloc);
+
+/* Free a linked list of b2r2 nodes */
+void b2r2_node_free(struct b2r2_node *first_node)
+{
+ struct b2r2_node *current_node = first_node;
+ struct b2r2_node *next_node = NULL;
+
+ /* Traverse the linked list and free the nodes */
+ while (current_node != NULL) {
+ next_node = current_node->next;
+ dma_pool_free(mem_heap->node_heap, current_node,
+ current_node->physical_address -
+ offsetof(struct b2r2_node, node));
+ current_node = next_node;
+ }
+}
+EXPORT_SYMBOL(b2r2_node_free);
+
+MODULE_AUTHOR("Robert Lind <robert.lind@ericsson.com");
+MODULE_DESCRIPTION("Ericsson AB B2R2 physical memory driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/b2r2/b2r2_mem_alloc.h b/drivers/video/b2r2/b2r2_mem_alloc.h
new file mode 100644
index 00000000000..33309c972f5
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_mem_alloc.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 internal Memory allocator
+ *
+ * Author: Robert Lind <robert.lind@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_MEM_ALLOC_H
+#define __B2R2_MEM_ALLOC_H
+
+#include "b2r2_internal.h"
+
+
+/**
+ * struct b2r2_mem_heap_status - Information about current state of the heap
+ * @start_phys_addr: Physical address of the the memory area
+ * @size: Size of the memory area
+ * @align: Alignment of start and allocation sizes (in bytes).
+ * @num_alloc: Number of memory allocations
+ * @allocated_size: Size allocated (sum of requested sizes)
+ * @num_free: Number of free blocks (fragments)
+ * @free_size: Free size available for allocation
+ * @num_locks: Sum of number of number of locks on memory allocations
+ * @num_locked: Number of locked memory allocations
+ * @num_nodes: Number of node allocations
+ *
+ **/
+struct b2r2_mem_heap_status {
+ u32 start_phys_addr;
+ u32 size;
+ u32 align;
+ u32 num_alloc;
+ u32 allocated_size;
+ u32 num_free;
+ u32 free_size;
+ u32 num_locks;
+ u32 num_locked;
+ u32 num_nodes;
+};
+
+
+/* B2R2 memory API (kernel) */
+
+/**
+ * b2r2_mem_init() - Initializes the B2R2 memory manager
+ * @dev: Pointer to device to use for allocating the memory heap
+ * @heap_size: Size of the heap (in bytes)
+ * @align: Alignment to use for memory allocations on heap (in bytes)
+ * @node_size: Size of each B2R2 node (in bytes)
+ *
+ * Returns 0 if success, else negative error code
+ **/
+int b2r2_mem_init(struct device *dev, u32 heap_size, u32 align, u32 node_size);
+
+/**
+ * b2r2_mem_exit() - Cleans up the B2R2 memory manager
+ *
+ **/
+void b2r2_mem_exit(void);
+
+/**
+ * b2r2_mem_heap_status() - Get information about the current heap state
+ * @mem_heap_status: Struct containing status info on succesful return
+ *
+ * Returns 0 if success, else negative error code
+ **/
+int b2r2_mem_heap_status(struct b2r2_mem_heap_status *mem_heap_status);
+
+/**
+ * b2r2_mem_alloc() - Allocates memory block from physical memory heap
+ * @requested_size: Requested size
+ * @returned_size: Actual size of memory block. Might be adjusted due to
+ * alignment but is always >= requested size if function
+ * succeeds
+ * @mem_handle: Returned memory handle
+ *
+ * All memory allocations are movable when not locked.
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_alloc(u32 requested_size, u32 *returned_size, u32 *mem_handle);
+
+/**
+ * b2r2_mem_free() - Frees an allocation
+ * @mem_handle: Memory handle
+ *
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_free(u32 mem_handle);
+
+/**
+ * b2r2_mem_lock() - Lock memory in memory and return physical address
+ * @mem_handle: Memory handle
+ * @phys_addr: Returned physical address to start of memory allocation.
+ * May be NULL.
+ * @virt_ptr: Returned virtual address pointer to start of memory allocation.
+ * May be NULL.
+ * @size: Returned size of memory allocation. May be NULL.
+ *
+ * The adress of the memory allocation is locked and the physical address
+ * is returned.
+ * The lock count is incremented by one.
+ * You need to call b2r2_mem_unlock once for each call to
+ * b2r2_mem_lock.
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_lock(u32 mem_handle, u32 *phys_addr, void **virt_ptr, u32 *size);
+
+/**
+ * b2r2_mem_unlock() - Unlock previously locked memory
+ * @mem_handle: Memory handle
+ *
+ * Decrements lock count. When lock count reaches 0 the
+ * memory area is movable again.
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_unlock(u32 mem_handle);
+
+/**
+ * b2r2_node_alloc() - Allocates B2R2 node from physical memory heap
+ * @num_nodes: Number of linked nodes to allocate
+ * @first_node: Returned pointer to first node in linked list
+ *
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_node_alloc(u32 num_nodes, struct b2r2_node **first_node);
+
+/**
+ * b2r2_node_free() - Frees a linked list of allocated B2R2 nodes
+ * @first_node: Pointer to first node in linked list
+ *
+ * Returns 0 if OK else negative error value
+ **/
+void b2r2_node_free(struct b2r2_node *first_node);
+
+
+#endif /* __B2R2_MEM_ALLOC_H */
diff --git a/drivers/video/b2r2/b2r2_node_gen.c b/drivers/video/b2r2/b2r2_node_gen.c
new file mode 100644
index 00000000000..f452a4b11df
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_node_gen.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 node generator
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <asm/dma-mapping.h>
+#include "b2r2_internal.h"
+
+static void free_nodes(struct b2r2_node *first_node)
+{
+ struct b2r2_node *node = first_node;
+ int no_of_nodes = 0;
+
+ while (node) {
+ no_of_nodes++;
+ node = node->next;
+ }
+
+ dma_free_coherent(b2r2_blt_device(),
+ no_of_nodes * sizeof(struct b2r2_node),
+ first_node,
+ first_node->physical_address -
+ offsetof(struct b2r2_node, node));
+}
+
+struct b2r2_node *b2r2_blt_alloc_nodes(int no_of_nodes)
+{
+ u32 physical_address;
+ struct b2r2_node *nodes;
+ struct b2r2_node *tmpnode;
+
+ if (no_of_nodes <= 0) {
+ dev_err(b2r2_blt_device(), "%s: Wrong number of nodes (%d)",
+ __func__, no_of_nodes);
+ return NULL;
+ }
+
+ /* Allocate the memory */
+ nodes = (struct b2r2_node *) dma_alloc_coherent(b2r2_blt_device(),
+ no_of_nodes * sizeof(struct b2r2_node),
+ &physical_address, GFP_DMA | GFP_KERNEL);
+
+ if (nodes == NULL) {
+ dev_err(b2r2_blt_device(),
+ "%s: Failed to alloc memory for nodes",
+ __func__);
+ return NULL;
+ }
+
+ /* Build the linked list */
+ tmpnode = nodes;
+ physical_address += offsetof(struct b2r2_node, node);
+ while (no_of_nodes--) {
+ tmpnode->physical_address = physical_address;
+ if (no_of_nodes)
+ tmpnode->next = tmpnode + 1;
+ else
+ tmpnode->next = NULL;
+
+ tmpnode++;
+ physical_address += sizeof(struct b2r2_node);
+ }
+
+ return nodes;
+}
+
+void b2r2_blt_free_nodes(struct b2r2_node *first_node)
+{
+ free_nodes(first_node);
+}
+
diff --git a/drivers/video/b2r2/b2r2_node_split.c b/drivers/video/b2r2/b2r2_node_split.c
new file mode 100644
index 00000000000..556e5f69e07
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_node_split.c
@@ -0,0 +1,3513 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 node splitter
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "b2r2_debug.h"
+#include "b2r2_node_split.h"
+#include "b2r2_internal.h"
+#include "b2r2_hw.h"
+#include "b2r2_filters.h"
+
+#include <linux/kernel.h>
+
+/*
+ * Macros and constants
+ */
+#define ABS(x) ((x) < 0 ? -(x) : (x))
+#define MAX(x, y) ((x) > (y) ? (x) : (y))
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+
+#define INSTANCES_DEFAULT_SIZE 10
+#define INSTANCES_GROW_SIZE 5
+
+/*
+ * Internal types
+ */
+
+
+/*
+ * Global variables
+ */
+
+/**
+ * VMX values for different color space conversions
+ */
+static const u32 vmx_rgb_to_yuv[] = {
+ B2R2_VMX0_RGB_TO_YUV_601_VIDEO,
+ B2R2_VMX1_RGB_TO_YUV_601_VIDEO,
+ B2R2_VMX2_RGB_TO_YUV_601_VIDEO,
+ B2R2_VMX3_RGB_TO_YUV_601_VIDEO,
+};
+
+static const u32 vmx_rgb_to_blt_yuv888[] = {
+ B2R2_VMX0_RGB_TO_BLT_YUV888_601_VIDEO,
+ B2R2_VMX1_RGB_TO_BLT_YUV888_601_VIDEO,
+ B2R2_VMX2_RGB_TO_BLT_YUV888_601_VIDEO,
+ B2R2_VMX3_RGB_TO_BLT_YUV888_601_VIDEO,
+};
+
+static const u32 vmx_yuv_to_rgb[] = {
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO,
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO,
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO,
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO,
+};
+
+static const u32 vmx_blt_yuv888_to_rgb[] = {
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO,
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO,
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO,
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO,
+};
+
+static const u32 vmx_yuv_to_blt_yuv888[] = {
+ B2R2_VMX0_YUV_TO_BLT_YUV888,
+ B2R2_VMX1_YUV_TO_BLT_YUV888,
+ B2R2_VMX2_YUV_TO_BLT_YUV888,
+ B2R2_VMX3_YUV_TO_BLT_YUV888,
+};
+
+static const u32 vmx_blt_yuv888_to_yuv[] = {
+ B2R2_VMX0_BLT_YUV888_TO_YUV,
+ B2R2_VMX1_BLT_YUV888_TO_YUV,
+ B2R2_VMX2_BLT_YUV888_TO_YUV,
+ B2R2_VMX3_BLT_YUV888_TO_YUV,
+};
+
+static const u32 vmx_yvu_to_blt_yuv888[] = {
+ B2R2_VMX0_YVU_TO_BLT_YUV888,
+ B2R2_VMX1_YVU_TO_BLT_YUV888,
+ B2R2_VMX2_YVU_TO_BLT_YUV888,
+ B2R2_VMX3_YVU_TO_BLT_YUV888,
+};
+
+static const u32 vmx_blt_yuv888_to_yvu[] = {
+ B2R2_VMX0_BLT_YUV888_TO_YVU,
+ B2R2_VMX1_BLT_YUV888_TO_YVU,
+ B2R2_VMX2_BLT_YUV888_TO_YVU,
+ B2R2_VMX3_BLT_YUV888_TO_YVU,
+};
+
+static const u32 vmx_yvu_to_rgb[] = {
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO,
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO,
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO,
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO,
+};
+
+static const u32 vmx_rgb_to_yvu[] = {
+ B2R2_VMX0_RGB_TO_YVU_601_VIDEO,
+ B2R2_VMX1_RGB_TO_YVU_601_VIDEO,
+ B2R2_VMX2_RGB_TO_YVU_601_VIDEO,
+ B2R2_VMX3_RGB_TO_YVU_601_VIDEO,
+};
+
+static const u32 vmx_rgb_to_bgr[] = {
+ B2R2_VMX0_RGB_TO_BGR,
+ B2R2_VMX1_RGB_TO_BGR,
+ B2R2_VMX2_RGB_TO_BGR,
+ B2R2_VMX3_RGB_TO_BGR,
+};
+
+static const u32 vmx_bgr_to_yuv[] = {
+ B2R2_VMX0_BGR_TO_YUV_601_VIDEO,
+ B2R2_VMX1_BGR_TO_YUV_601_VIDEO,
+ B2R2_VMX2_BGR_TO_YUV_601_VIDEO,
+ B2R2_VMX3_BGR_TO_YUV_601_VIDEO,
+};
+
+static const u32 vmx_yuv_to_bgr[] = {
+ B2R2_VMX0_YUV_TO_BGR_601_VIDEO,
+ B2R2_VMX1_YUV_TO_BGR_601_VIDEO,
+ B2R2_VMX2_YUV_TO_BGR_601_VIDEO,
+ B2R2_VMX3_YUV_TO_BGR_601_VIDEO,
+};
+
+static const u32 vmx_bgr_to_yvu[] = {
+ B2R2_VMX0_BGR_TO_YVU_601_VIDEO,
+ B2R2_VMX1_BGR_TO_YVU_601_VIDEO,
+ B2R2_VMX2_BGR_TO_YVU_601_VIDEO,
+ B2R2_VMX3_BGR_TO_YVU_601_VIDEO,
+};
+
+static const u32 vmx_yvu_to_bgr[] = {
+ B2R2_VMX0_YVU_TO_BGR_601_VIDEO,
+ B2R2_VMX1_YVU_TO_BGR_601_VIDEO,
+ B2R2_VMX2_YVU_TO_BGR_601_VIDEO,
+ B2R2_VMX3_YVU_TO_BGR_601_VIDEO,
+};
+
+static const u32 vmx_yvu_to_yuv[] = {
+ B2R2_VMX0_YVU_TO_YUV_601_VIDEO,
+ B2R2_VMX1_YVU_TO_YUV_601_VIDEO,
+ B2R2_VMX2_YVU_TO_YUV_601_VIDEO,
+ B2R2_VMX3_YVU_TO_YUV_601_VIDEO,
+};
+
+/*
+ * Forward declaration of private functions
+ */
+
+static int analyze_fmt_conv(struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 **vmx, u32 *node_count);
+static int analyze_color_fill(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count);
+static int analyze_copy(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_scaling(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_rotate(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_transform(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_rot_scale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_scale_factors(struct b2r2_node_split_job *this);
+
+static void configure_src(struct b2r2_node *node,
+ struct b2r2_node_split_buf *src, const u32 *ivmx);
+static int configure_dst(struct b2r2_node *node,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next);
+static void configure_blend(struct b2r2_node *node, u32 flags, u32 global_alpha,
+ bool swap_fg_bg);
+static void configure_clip(struct b2r2_node *node,
+ struct b2r2_blt_rect *clip_rect);
+
+static int configure_tile(struct b2r2_node_split_job *this,
+ struct b2r2_node *node, struct b2r2_node **next);
+static void configure_direct_fill(struct b2r2_node *node, u32 color,
+ struct b2r2_node_split_buf *dst, struct b2r2_node **next);
+static int configure_fill(struct b2r2_node *node, u32 color,
+ enum b2r2_blt_fmt fmt, struct b2r2_node_split_buf *dst,
+ const u32 *ivmx, struct b2r2_node **next);
+static void configure_direct_copy(struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, struct b2r2_node **next);
+static int configure_copy(struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this);
+static int configure_rotate(struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this);
+static int configure_scale(struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, u16 h_rsf, u16 v_rsf,
+ const u32 *ivmx, struct b2r2_node **next,
+ struct b2r2_node_split_job *this);
+static int configure_rot_scale(struct b2r2_node_split_job *this,
+ struct b2r2_node *node, struct b2r2_node **next);
+
+static int check_rect(const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect,
+ const struct b2r2_blt_rect *clip);
+static void set_buf(struct b2r2_node_split_buf *buf, u32 addr,
+ const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect, bool color_fill, u32 color);
+static int setup_tmp_buf(struct b2r2_node_split_buf *this, u32 max_size,
+ enum b2r2_blt_fmt pref_fmt, u32 pref_width, u32 pref_height);
+
+static enum b2r2_ty get_alpha_range(enum b2r2_blt_fmt fmt);
+static u32 set_alpha(enum b2r2_blt_fmt fmt, u8 alpha, u32 color);
+static u8 get_alpha(enum b2r2_blt_fmt fmt, u32 pixel);
+static bool fmt_has_alpha(enum b2r2_blt_fmt fmt);
+
+static bool is_rgb_fmt(enum b2r2_blt_fmt fmt);
+static bool is_bgr_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yuv_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yvu_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yuv420_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yuv422_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yuv444_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yvu420_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yvu422_fmt(enum b2r2_blt_fmt fmt);
+
+static int fmt_byte_pitch(enum b2r2_blt_fmt fmt, u32 width);
+static enum b2r2_native_fmt to_native_fmt(enum b2r2_blt_fmt fmt);
+static u32 to_RGB888(u32 color, const enum b2r2_blt_fmt fmt);
+static enum b2r2_fmt_type get_fmt_type(enum b2r2_blt_fmt fmt);
+
+static bool is_transform(const struct b2r2_blt_request *req);
+static int calculate_scale_factor(u32 from, u32 to, u16 *sf_out);
+static s32 rescale(s32 dim, u16 sf);
+static s32 inv_rescale(s32 dim, u16 sf);
+
+static void set_target(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src(struct b2r2_src_config *src, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src_1(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src_2(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src_3(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_ivmx(struct b2r2_node *node, const u32 *vmx_values);
+
+static void reset_nodes(struct b2r2_node *node);
+
+/*
+ * Public functions
+ */
+
+/**
+ * b2r2_node_split_analyze() - analyzes the request
+ */
+int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
+ u32 max_buf_size, u32 *node_count, struct b2r2_work_buf **bufs,
+ u32 *buf_count, struct b2r2_node_split_job *this)
+{
+ int ret;
+ bool color_fill;
+
+ b2r2_log_info("%s\n", __func__);
+
+ memset(this, 0, sizeof(*this));
+
+ /* Copy parameters */
+ this->flags = req->user_req.flags;
+ this->transform = req->user_req.transform;
+ this->max_buf_size = max_buf_size;
+ this->global_alpha = req->user_req.global_alpha;
+ this->buf_count = 0;
+ this->node_count = 0;
+
+ if (this->flags & B2R2_BLT_FLAG_BLUR) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Unsupported formats on src */
+ switch (req->user_req.src_img.fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (is_bgr_fmt(req->user_req.dst_img.fmt)) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Unsupported formats on dst */
+ switch (req->user_req.dst_img.fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (is_bgr_fmt(req->user_req.src_img.fmt)) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if ((this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) &&
+ (is_yuv_fmt(req->user_req.src_img.fmt) ||
+ req->user_req.src_img.fmt == B2R2_BLT_FMT_1_BIT_A1 ||
+ req->user_req.src_img.fmt == B2R2_BLT_FMT_8_BIT_A8)) {
+ b2r2_log_warn("%s: Unsupported: source color keying with "
+ "YUV or pure alpha formats.\n",
+ __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ if (this->flags & (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_MASK)) {
+ b2r2_log_warn("%s: Unsupported: source mask, "
+ "destination color keying.\n",
+ __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
+ req->user_req.clut == NULL) {
+ b2r2_log_warn("%s: Invalid request: no table specified "
+ "for CLUT color correction.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check for color fill */
+ color_fill = (this->flags & (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0;
+
+ /* Configure the source and destination buffers */
+ set_buf(&this->src, req->src_resolved.physical_address,
+ &req->user_req.src_img, &req->user_req.src_rect,
+ color_fill, req->user_req.src_color);
+
+ set_buf(&this->dst, req->dst_resolved.physical_address,
+ &req->user_req.dst_img, &req->user_req.dst_rect, false,
+ 0);
+
+ b2r2_log_info("%s:\n"
+ "\t\tsrc.rect=(%4d, %4d, %4d, %4d)\t"
+ "dst.rect=(%4d, %4d, %4d, %4d)\n", __func__, this->src.rect.x,
+ this->src.rect.y, this->src.rect.width, this->src.rect.height,
+ this->dst.rect.x, this->dst.rect.y, this->dst.rect.width,
+ this->dst.rect.height);
+
+ if (this->flags & B2R2_BLT_FLAG_DITHER)
+ this->dst.dither = B2R2_TTY_RGB_ROUND_DITHER;
+
+ if (this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY)
+ this->flag_param = req->user_req.src_color;
+
+ /* Check for blending */
+ if ((this->flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) &&
+ (this->global_alpha != 255))
+ this->blend = true;
+ else if (this->flags & B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)
+ this->blend = (color_fill && fmt_has_alpha(this->dst.fmt)) ||
+ fmt_has_alpha(this->src.fmt);
+
+ if (this->blend && this->src.type == B2R2_FMT_TYPE_PLANAR) {
+ b2r2_log_warn("%s: Unsupported: blend with planar source\n",
+ __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Check for clipping */
+ this->clip = (this->flags &
+ B2R2_BLT_FLAG_DESTINATION_CLIP) != 0;
+ if (this->clip) {
+ s32 l = req->user_req.dst_clip_rect.x;
+ s32 r = l + req->user_req.dst_clip_rect.width;
+ s32 t = req->user_req.dst_clip_rect.y;
+ s32 b = t + req->user_req.dst_clip_rect.height;
+
+ /* Intersect the clip and buffer rects */
+ if (l < 0)
+ l = 0;
+ if (r > req->user_req.dst_img.width)
+ r = req->user_req.dst_img.width;
+ if (t < 0)
+ t = 0;
+ if (b > req->user_req.dst_img.height)
+ b = req->user_req.dst_img.height;
+
+ this->clip_rect.x = l;
+ this->clip_rect.y = t;
+ this->clip_rect.width = r - l;
+ this->clip_rect.height = b - t;
+ } else {
+ /* Set the clip rectangle to the buffer bounds */
+ this->clip_rect.x = 0;
+ this->clip_rect.y = 0;
+ this->clip_rect.width = req->user_req.dst_img.width;
+ this->clip_rect.height = req->user_req.dst_img.height;
+ }
+
+ /* Validate the destination */
+ ret = check_rect(&req->user_req.dst_img, &req->user_req.dst_rect,
+ &this->clip_rect);
+ if (ret < 0)
+ goto error;
+
+ /* Validate the source (if not color fill) */
+ if (!color_fill) {
+ ret = check_rect(&req->user_req.src_img,
+ &req->user_req.src_rect, NULL);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Do the analysis depending on the type of operation */
+ if (color_fill) {
+ ret = analyze_color_fill(this, req, &this->node_count);
+ } else {
+
+ bool upsample;
+ bool downsample;
+
+ /*
+ * YUV formats that are non-raster, non-yuv444 needs to be
+ * up (or down) sampled using the resizer.
+ *
+ * NOTE: The resizer needs to be enabled for YUV444 as well,
+ * even though there is no upsampling. This is most
+ * likely a bug in the hardware.
+ */
+ upsample = this->src.type != B2R2_FMT_TYPE_RASTER &&
+ is_yuv_fmt(this->src.fmt);
+ downsample = this->dst.type != B2R2_FMT_TYPE_RASTER &&
+ is_yuv_fmt(this->dst.fmt);
+
+ if (is_transform(req) || upsample || downsample)
+ ret = analyze_transform(this, req, &this->node_count,
+ &this->buf_count);
+ else
+ ret = analyze_copy(this, req, &this->node_count,
+ &this->buf_count);
+ }
+
+ if (ret == -ENOSYS) {
+ goto unsupported;
+ } else if (ret < 0) {
+ b2r2_log_warn("%s: Analysis failed!\n", __func__);
+ goto error;
+ }
+
+ /* Setup the origin and movement of the destination window */
+ if (this->dst.hso == B2R2_TY_HSO_RIGHT_TO_LEFT) {
+ this->dst.dx = -this->dst.win.width;
+ this->dst.win.x = this->dst.rect.x + this->dst.rect.width - 1;
+ } else {
+ this->dst.dx = this->dst.win.width;
+ this->dst.win.x = this->dst.rect.x;
+ }
+ if (this->dst.vso == B2R2_TY_VSO_BOTTOM_TO_TOP) {
+ this->dst.dy = -this->dst.win.height;
+ this->dst.win.y = this->dst.rect.y + this->dst.rect.height - 1;
+ } else {
+ this->dst.dy = this->dst.win.height;
+ this->dst.win.y = this->dst.rect.y;
+ }
+
+ *buf_count = this->buf_count;
+ *node_count = this->node_count;
+
+ if (this->buf_count > 0)
+ *bufs = &this->work_bufs[0];
+
+ b2r2_log_info("%s: dst.win=(%d, %d, %d, %d), dst.dx=%d, dst.dy=%d\n",
+ __func__, this->dst.win.x, this->dst.win.y,
+ this->dst.win.width, this->dst.win.height, this->dst.dx,
+ this->dst.dy);
+ if (this->buf_count > 0)
+ b2r2_log_info("%s: buf_count=%d, buf_size=%d, node_count=%d\n",
+ __func__, *buf_count, bufs[0]->size, *node_count);
+ else
+ b2r2_log_info("%s: buf_count=%d, node_count=%d\n",
+ __func__, *buf_count, *node_count);
+
+ return 0;
+
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+unsupported:
+ return ret;
+}
+
+/**
+ * b2r2_node_split_configure() - configures the node list
+ */
+int b2r2_node_split_configure(struct b2r2_node_split_job *this,
+ struct b2r2_node *first)
+{
+ int ret;
+
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node *node = first;
+
+ u32 x_pixels = 0;
+ u32 y_pixels = 0;
+
+ reset_nodes(node);
+
+ while (y_pixels < dst->rect.height) {
+ s32 dst_x = dst->win.x;
+ s32 dst_w = dst->win.width;
+
+ /* Clamp window height */
+ if (dst->win.height > dst->rect.height - y_pixels)
+ dst->win.height = dst->rect.height - y_pixels;
+
+ while (x_pixels < dst->rect.width) {
+
+ /* Clamp window width */
+ if (dst_w > dst->rect.width - x_pixels)
+ dst->win.width = dst->rect.width - x_pixels;
+
+ ret = configure_tile(this, node, &node);
+ if (ret < 0)
+ goto error;
+
+ dst->win.x += dst->dx;
+ x_pixels += max(dst->dx, -dst->dx);
+ b2r2_log_info("%s: x_pixels=%d\n", __func__, x_pixels);
+ }
+
+ dst->win.y += dst->dy;
+ y_pixels += max(dst->dy, -dst->dy);
+
+ dst->win.x = dst_x;
+ dst->win.width = dst_w;
+ x_pixels = 0;
+
+ b2r2_log_info("%s: y_pixels=%d\n", __func__, y_pixels);
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn("%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * b2r2_node_split_assign_buffers() - assigns temporary buffers to the node list
+ */
+int b2r2_node_split_assign_buffers(struct b2r2_node_split_job *this,
+ struct b2r2_node *first, struct b2r2_work_buf *bufs,
+ u32 buf_count)
+{
+ struct b2r2_node *node = first;
+
+ while (node != NULL) {
+ /* The indices are offset by one */
+ if (node->dst_tmp_index) {
+ BUG_ON(node->dst_tmp_index > buf_count);
+
+ b2r2_log_info("%s: assigning buf %d as dst\n",
+ __func__, node->dst_tmp_index);
+
+ node->node.GROUP1.B2R2_TBA =
+ bufs[node->dst_tmp_index - 1].phys_addr;
+ }
+ if (node->src_tmp_index) {
+ u32 addr = bufs[node->src_tmp_index - 1].phys_addr;
+
+ b2r2_log_info("%s: "
+ "assigning buf %d as src %d ", __func__,
+ node->src_tmp_index, node->src_index);
+
+ BUG_ON(node->src_tmp_index > buf_count);
+
+ switch (node->src_index) {
+ case 1:
+ b2r2_log_info("1\n");
+ node->node.GROUP3.B2R2_SBA = addr;
+ break;
+ case 2:
+ b2r2_log_info("2\n");
+ node->node.GROUP4.B2R2_SBA = addr;
+ break;
+ case 3:
+ b2r2_log_info("3\n");
+ node->node.GROUP5.B2R2_SBA = addr;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+ }
+
+ b2r2_log_info("%s: tba=%p\tsba=%p\n", __func__,
+ (void *)node->node.GROUP1.B2R2_TBA,
+ (void *)node->node.GROUP4.B2R2_SBA);
+
+ node = node->next;
+ }
+
+ return 0;
+}
+
+/**
+ * b2r2_node_split_unassign_buffers() - releases temporary buffers
+ */
+void b2r2_node_split_unassign_buffers(struct b2r2_node_split_job *this,
+ struct b2r2_node *first)
+{
+ return;
+}
+
+/**
+ * b2r2_node_split_cancel() - cancels and releases a job instance
+ */
+void b2r2_node_split_cancel(struct b2r2_node_split_job *this)
+{
+ memset(this, 0, sizeof(*this));
+
+ return;
+}
+
+/*
+ * Private functions
+ */
+
+static int check_rect(const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect,
+ const struct b2r2_blt_rect *clip)
+{
+ int ret;
+
+ s32 l, r, b, t;
+
+ /* Check rectangle dimensions*/
+ if ((rect->width <= 0) || (rect->height <= 0)) {
+ b2r2_log_warn("%s: "
+ "Illegal rect (%d, %d, %d, %d)\n",
+ __func__, rect->x, rect->y, rect->width, rect->height);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* If we are using clip we should only look at the intersection of the
+ rects */
+ if (clip) {
+ l = MAX(rect->x, clip->x);
+ t = MAX(rect->y, clip->y);
+ r = MIN(rect->x + rect->width, clip->x + clip->width);
+ b = MIN(rect->y + rect->height, clip->y + clip->height);
+ } else {
+ l = rect->x;
+ t = rect->y;
+ r = rect->x + rect->width;
+ b = rect->y + rect->height;
+ }
+
+ /* Check so that the rect isn't outside the buffer */
+ if ((l < 0) || (t < 0) || (l >= img->width) || (t >= img->height)) {
+ b2r2_log_warn("%s: "
+ "rect origin outside buffer\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if ((r > img->width) || (b > img->height)) {
+ b2r2_log_warn("%s: "
+ "rect ends outside buffer\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Check so the intersected rectangle isn't empty */
+ if ((l == r) || (t == b)) {
+ b2r2_log_warn("%s: "
+ "rect is empty (width or height zero)\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ return 0;
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * analyze_fmt_conv() - analyze the format conversions needed for a job
+ */
+static int analyze_fmt_conv(struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 **vmx, u32 *node_count)
+{
+ if (is_rgb_fmt(src->fmt)) {
+ if (is_yvu_fmt(dst->fmt))
+ *vmx = &vmx_rgb_to_yvu[0];
+ else if (dst->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ /*
+ * (A)YUV/VUY(A) formats differ only in component
+ * order. This is handled by the endianness bit
+ * in B2R2_STY/TTY registers when src/target are set.
+ */
+ *vmx = &vmx_rgb_to_blt_yuv888[0];
+ else if (is_yuv_fmt(dst->fmt))
+ *vmx = &vmx_rgb_to_yuv[0];
+ else if (is_bgr_fmt(dst->fmt))
+ *vmx = &vmx_rgb_to_bgr[0];
+ } else if (is_yvu_fmt(src->fmt)) {
+ if (is_rgb_fmt(dst->fmt))
+ *vmx = &vmx_yvu_to_rgb[0];
+ else if (is_bgr_fmt(dst->fmt))
+ *vmx = &vmx_yvu_to_bgr[0];
+ else if (dst->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ *vmx = &vmx_yvu_to_blt_yuv888[0];
+ else if (is_yuv_fmt(dst->fmt) &&
+ !is_yvu_fmt(dst->fmt))
+ *vmx = &vmx_yvu_to_yuv[0];
+ } else if (src->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ src->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ src->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ src->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) {
+ /*
+ * (A)YUV/VUY(A) formats differ only in component
+ * order. This is handled by the endianness bit
+ * in B2R2_STY/TTY registers when src/target are set.
+ */
+ if (is_rgb_fmt(dst->fmt)) {
+ *vmx = &vmx_blt_yuv888_to_rgb[0];
+ } else if (is_yvu_fmt(dst->fmt)) {
+ *vmx = &vmx_blt_yuv888_to_yvu[0];
+ } else if (is_yuv_fmt(dst->fmt)) {
+ switch (dst->fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888: /* do nothing */
+ break;
+ default:
+ *vmx = &vmx_blt_yuv888_to_yuv[0];
+ break;
+ }
+ }
+ } else if (is_yuv_fmt(src->fmt)) {
+ if (is_rgb_fmt(dst->fmt))
+ *vmx = &vmx_yuv_to_rgb[0];
+ else if (is_bgr_fmt(dst->fmt))
+ *vmx = &vmx_yuv_to_bgr[0];
+ else if (dst->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ *vmx = &vmx_yuv_to_blt_yuv888[0];
+ else if (is_yvu_fmt(dst->fmt))
+ *vmx = &vmx_yvu_to_yuv[0];
+ } else if (is_bgr_fmt(src->fmt)) {
+ if (is_rgb_fmt(dst->fmt))
+ *vmx = &vmx_rgb_to_bgr[0];
+ else if (is_yvu_fmt(dst->fmt))
+ *vmx = &vmx_bgr_to_yvu[0];
+ else if (dst->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ BUG_ON(1);
+ else if (is_yuv_fmt(dst->fmt))
+ *vmx = &vmx_bgr_to_yuv[0];
+ }
+
+ if (dst->type == B2R2_FMT_TYPE_RASTER) {
+ *node_count = 1;
+ } else if (dst->type == B2R2_FMT_TYPE_SEMI_PLANAR) {
+ *node_count = 2;
+ } else if (dst->type == B2R2_FMT_TYPE_PLANAR) {
+ *node_count = 3;
+ } else {
+ /* That's strange... */
+ BUG_ON(1);
+ }
+
+ return 0;
+}
+
+/**
+ * analyze_color_fill() - analyze a color fill operation
+ */
+static int analyze_color_fill(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count)
+{
+ int ret;
+
+ /* Destination must be raster for raw fill to work */
+ if (this->dst.type != B2R2_FMT_TYPE_RASTER) {
+ b2r2_log_warn("%s: fill requires raster destination\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* We will try to fill the entire rectangle in one go */
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ /* Check if this is a direct fill */
+ if ((!this->blend) && ((this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_ARGB8888) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_AYUV8888) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_VUYA8888))) {
+ this->type = B2R2_DIRECT_FILL;
+
+ /* The color format will be the same as the dst fmt */
+ this->src.fmt = this->dst.fmt;
+
+ /* The entire destination rectangle will be */
+ memcpy(&this->dst.win, &this->dst.rect,
+ sizeof(this->dst.win));
+ *node_count = 1;
+ } else {
+ this->type = B2R2_FILL;
+
+ /* Determine the fill color format */
+ if (this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW) {
+ /* The color format will be the same as the dst fmt */
+ this->src.fmt = this->dst.fmt;
+ } else {
+ /* If the dst fmt is YUV the fill fmt will be as well */
+ if (is_yuv_fmt(this->dst.fmt)) {
+ this->src.fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
+ } else if (is_rgb_fmt(this->dst.fmt)) {
+ this->src.fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+ } else if (is_bgr_fmt(this->dst.fmt)) {
+ /* Color will still be ARGB, we will translate
+ using IVMX (configured later) */
+ this->src.fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+ } else {
+ /* Wait, what? */
+ b2r2_log_warn("%s: "
+ "Illegal destination format for fill",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+
+ /* Also, B2R2 seems to ignore the pixel alpha value */
+ if (((this->flags & B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)
+ != 0) &&
+ ((this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW)
+ == 0) && fmt_has_alpha(this->src.fmt)) {
+ u8 pixel_alpha = get_alpha(this->src.fmt,
+ this->src.color);
+ u32 new_global = pixel_alpha * this->global_alpha / 255;
+
+ this->global_alpha = (u8)new_global;
+
+ /* Set the pixel alpha to full opaque so we don't get
+ any nasty suprises */
+ this->src.color = set_alpha(this->src.fmt, 0xFF,
+ this->src.color);
+ }
+
+ ret = analyze_fmt_conv(&this->src, &this->dst, &this->ivmx,
+ node_count);
+ if (ret < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * analyze_transform() - analyze a transform operation (rescale, rotate, etc.)
+ */
+static int analyze_transform(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+
+ bool is_scaling;
+
+ b2r2_log_info("%s\n", __func__);
+
+ /*
+ * The transform enum is defined so that all rotation transforms are
+ * masked with the rotation flag
+ */
+ this->rotation = (this->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0;
+
+ /* B2R2 cannot do rotations if the destination is not raster, or 422R */
+ if (this->rotation && (this->dst.type != B2R2_FMT_TYPE_RASTER ||
+ this->dst.fmt == B2R2_BLT_FMT_Y_CB_Y_CR ||
+ this->dst.fmt == B2R2_BLT_FMT_CB_Y_CR_Y)) {
+ b2r2_log_warn("%s: Unsupported operation "
+ "(rot && (!dst_raster || dst==422R))",
+ __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Flip the image by changing the scan order of the destination */
+ if (this->transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ this->dst.hso = B2R2_TY_HSO_RIGHT_TO_LEFT;
+ if (this->transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+
+ /* Check for scaling */
+ if (this->rotation) {
+ is_scaling = (this->src.rect.width != this->dst.rect.height) ||
+ (this->src.rect.height != this->dst.rect.width);
+ } else {
+ is_scaling = (this->src.rect.width != this->dst.rect.width) ||
+ (this->src.rect.height != this->dst.rect.height);
+ }
+
+ /* Plane separated formats must be treated as scaling */
+ is_scaling = is_scaling ||
+ (this->src.type == B2R2_FMT_TYPE_SEMI_PLANAR) ||
+ (this->src.type == B2R2_FMT_TYPE_PLANAR) ||
+ (this->dst.type == B2R2_FMT_TYPE_SEMI_PLANAR) ||
+ (this->dst.type == B2R2_FMT_TYPE_PLANAR);
+
+ if (is_scaling && this->rotation && this->blend) {
+ /* TODO: This is unsupported. Fix it! */
+ b2r2_log_info("%s: Unsupported operation (rot+rescale+blend)\n",
+ __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Check which type of transform */
+ if (is_scaling && this->rotation) {
+ ret = analyze_rot_scale(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ } else if (is_scaling) {
+ ret = analyze_scaling(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ } else if (this->rotation) {
+ ret = analyze_rotate(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ } else {
+ /* No additional nodes needed for a flip */
+ ret = analyze_copy(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ this->type = B2R2_FLIP;
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn("%s: error!\n", __func__);
+unsupported:
+ return ret;
+}
+
+/**
+ * analyze_copy() - analyze a copy operation
+ */
+static int analyze_copy(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ if (!this->blend &&
+ !(this->flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
+ (this->src.fmt == this->dst.fmt) &&
+ (this->src.type == B2R2_FMT_TYPE_RASTER) &&
+ (this->dst.rect.x >= this->clip_rect.x) &&
+ (this->dst.rect.y >= this->clip_rect.y) &&
+ (this->dst.rect.x + this->dst.rect.width <=
+ this->clip_rect.x + this->clip_rect.width) &&
+ (this->dst.rect.y + this->dst.rect.height <=
+ this->clip_rect.y + this->clip_rect.height)) {
+ this->type = B2R2_DIRECT_COPY;
+ *node_count = 1;
+ } else {
+ u32 copy_count;
+
+ this->type = B2R2_COPY;
+
+ ret = analyze_fmt_conv(&this->src, &this->dst, &this->ivmx,
+ &copy_count);
+ if (ret < 0)
+ goto error;
+
+ *node_count = copy_count;
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+ return ret;
+}
+
+static int calc_rot_count(u32 width, u32 height)
+{
+ int count;
+
+ count = width / B2R2_ROTATE_MAX_WIDTH;
+ if (width % B2R2_ROTATE_MAX_WIDTH)
+ count++;
+ if (height > B2R2_ROTATE_MAX_WIDTH &&
+ height % B2R2_ROTATE_MAX_WIDTH)
+ count *= 2;
+
+ return count;
+}
+
+static int analyze_rot_scale_downscale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+
+ struct b2r2_node_split_buf *src = &this->src;
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+
+ u32 num_rows;
+ u32 num_cols;
+
+ u32 rot_count;
+ u32 rescale_count;
+
+ u32 nodes_per_rot;
+ u32 nodes_per_rescale;
+
+ u32 right_width;
+ u32 bottom_height;
+
+ const u32 *dummy_vmx;
+
+ b2r2_log_info("%s\n", __func__);
+
+ /* Calculate the desired tmp buffer size */
+ tmp->win.width = rescale(B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf);
+ tmp->win.width >>= 10;
+ tmp->win.width = min(tmp->win.width, dst->rect.height);
+ tmp->win.height = dst->rect.width;
+
+ setup_tmp_buf(tmp, this->max_buf_size, dst->fmt, tmp->win.width,
+ tmp->win.height);
+ tmp->tmp_buf_index = 1;
+ this->work_bufs[0].size = tmp->pitch * tmp->height;
+
+ tmp->win.width = tmp->rect.width;
+ tmp->win.height = tmp->rect.height;
+
+ tmp->dither = dst->dither;
+ dst->dither = 0;
+
+ /* Update the dst window with the actual tmp buffer dimensions */
+ dst->win.width = tmp->win.height;
+ dst->win.height = tmp->win.width;
+
+ /* The rotated stripes are written to the destination bottom-up */
+ if (this->dst.vso == B2R2_TY_VSO_TOP_TO_BOTTOM)
+ this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+ else
+ this->dst.vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ /*
+ * Calculate how many nodes are required to copy to and from the tmp
+ * buffer
+ */
+ ret = analyze_fmt_conv(src, tmp, &this->ivmx, &nodes_per_rescale);
+ if (ret < 0)
+ goto error;
+
+ /* We will not do any format conversion in the rotation stage */
+ ret = analyze_fmt_conv(tmp, dst, &dummy_vmx, &nodes_per_rot);
+ if (ret < 0)
+ goto error;
+
+ /* Calculate node count for the inner tiles */
+ num_cols = dst->rect.width / dst->win.width;
+ num_rows = dst->rect.height / dst->win.height;
+
+ rescale_count = num_cols * num_rows;
+ rot_count = calc_rot_count(dst->win.height, dst->win.width) *
+ num_cols * num_rows;
+
+ right_width = dst->rect.width % dst->win.width;
+ bottom_height = dst->rect.height % dst->win.height;
+
+ /* Calculate node count for the rightmost tiles */
+ if (right_width) {
+ u32 count = calc_rot_count(dst->win.height, right_width);
+
+ rot_count += count * num_rows;
+ rescale_count += num_rows;
+ b2r2_log_info("%s: rightmost: %d nodes\n", __func__,
+ count*num_rows);
+ }
+
+ /* Calculate node count for the bottom tiles */
+ if (bottom_height) {
+ u32 count = calc_rot_count(bottom_height, dst->win.width);
+
+ rot_count += count * num_cols;
+ rescale_count += num_cols;
+ b2r2_log_info("%s: bottom: %d nodes\n", __func__,
+ count * num_cols);
+
+ }
+
+ /* And finally for the bottom right corner */
+ if (right_width && bottom_height) {
+ u32 count = calc_rot_count(bottom_height, right_width);
+
+ rot_count += count;
+ rescale_count++;
+ b2r2_log_info("%s: bottom right: %d nodes\n", __func__, count);
+
+ }
+
+ *node_count = rot_count * nodes_per_rot;
+ *node_count += rescale_count * nodes_per_rescale;
+ *buf_count = 1;
+
+ return 0;
+
+error:
+ b2r2_log_warn("%s: error!\n", __func__);
+ return ret;
+}
+
+static int analyze_rot_scale_upscale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ /* TODO: When upscaling we should optimally to the rotation first... */
+ return analyze_rot_scale_downscale(this, req, node_count, buf_count);
+}
+
+/**
+ * analyze_rot_scaling() - analyzes a combined rotation and scaling op
+ */
+static int analyze_rot_scale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+
+ bool upscale;
+
+ ret = analyze_scale_factors(this);
+ if (ret < 0)
+ goto error;
+
+ upscale = (u32)this->h_rsf * (u32)this->v_rsf < (1 << 20);
+
+ if (upscale)
+ ret = analyze_rot_scale_upscale(this, req, node_count,
+ buf_count);
+ else
+ ret = analyze_rot_scale_downscale(this, req, node_count,
+ buf_count);
+
+ if (ret < 0)
+ goto error;
+
+ this->type = B2R2_SCALE_AND_ROTATE;
+
+ return 0;
+
+error:
+ return ret;
+}
+
+/**
+ * analyze_scaling() - analyze a rescale operation
+ */
+static int analyze_scaling(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+
+ u32 copy_count;
+ u32 nbr_cols;
+
+ s32 dst_w;
+
+ b2r2_log_info("%s\n", __func__);
+
+ ret = analyze_scale_factors(this);
+ if (ret < 0)
+ goto error;
+
+ /* Find out how many nodes a simple copy would require */
+ ret = analyze_fmt_conv(&this->src, &this->dst, &this->ivmx,
+ &copy_count);
+ if (ret < 0)
+ goto error;
+
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ /*
+ * We need to subtract from the actual maximum rescale width since the
+ * start of the stripe will be floored and the end ceiled. This could in
+ * some cases cause the stripe to be one pixel more than the maximum
+ * width.
+ *
+ * Example:
+ * x = 127.8, w = 127.8
+ *
+ * The stripe will touch pixels 127.8 through 255.6, i.e. 129 pixels.
+ */
+ dst_w = rescale(B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf);
+ if (dst_w < (1 << 10))
+ dst_w = 1;
+ else
+ dst_w >>= 10;
+
+ b2r2_log_info("%s: dst_w=%d dst.rect.width=%d\n", __func__, dst_w,
+ this->dst.rect.width);
+
+ this->dst.win.width = min(dst_w, this->dst.rect.width);
+
+ b2r2_log_info("%s: dst.win.width=%d\n", __func__, this->dst.win.width);
+
+ nbr_cols = this->dst.rect.width / this->dst.win.width;
+ if (this->dst.rect.width % this->dst.win.width)
+ nbr_cols++;
+
+ *node_count = copy_count * nbr_cols;
+
+ this->type = B2R2_SCALE;
+
+ b2r2_log_info("%s exit\n", __func__);
+
+ return 0;
+
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * analyze_rotate() - analyze a rotate operation
+ */
+static int analyze_rotate(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+
+ u32 nodes_per_tile;
+
+ /* Find out how many nodes a simple copy would require */
+ ret = analyze_fmt_conv(&this->src, &this->dst, &this->ivmx,
+ &nodes_per_tile);
+ if (ret < 0)
+ goto error;
+
+ this->type = B2R2_ROTATE;
+
+ /* The rotated stripes are written to the destination bottom-up */
+ if (this->dst.vso == B2R2_TY_VSO_TOP_TO_BOTTOM)
+ this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+ else
+ this->dst.vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ this->dst.win.height = min(this->dst.win.height, B2R2_ROTATE_MAX_WIDTH);
+
+ /*
+ * B2R2 cannot do rotations on stripes that are not a multiple of 16
+ * pixels high (if larger than 16 pixels).
+ */
+ if (this->dst.win.width > 16)
+ this->dst.win.width -= (this->dst.win.width % 16);
+
+ /* Blending cannot be combined with rotation */
+ if (this->blend) {
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+ enum b2r2_blt_fmt tmp_fmt;
+
+ if (is_yuv_fmt(this->dst.fmt))
+ tmp_fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
+ else if (is_bgr_fmt(this->dst.fmt))
+ tmp_fmt = B2R2_BLT_FMT_32_BIT_ABGR8888;
+ else
+ tmp_fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+
+ setup_tmp_buf(tmp, this->max_buf_size, tmp_fmt,
+ this->dst.win.width, this->dst.win.height);
+
+ tmp->tmp_buf_index = 1;
+
+ tmp->vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+
+ this->dst.win.width = tmp->rect.width;
+ this->dst.win.height = tmp->rect.height;
+
+ memcpy(&tmp->win, &tmp->rect, sizeof(tmp->win));
+
+ *buf_count = 1;
+ this->work_bufs[0].size = tmp->pitch * tmp->height;
+
+ /*
+ * One more node per tile is required to rotate to the temp
+ * buffer.
+ */
+ nodes_per_tile++;
+ }
+
+ /* Finally, calculate the node count */
+ *node_count = nodes_per_tile *
+ calc_rot_count(this->src.rect.width, this->src.rect.height);
+
+ return 0;
+
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * analyze_scale_factors() - determines the scale factors for the op
+ */
+static int analyze_scale_factors(struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ u16 hsf;
+ u16 vsf;
+
+ if (this->rotation) {
+ ret = calculate_scale_factor(this->src.rect.width,
+ this->dst.rect.height, &hsf);
+ if (ret < 0)
+ goto error;
+
+ ret = calculate_scale_factor(this->src.rect.height,
+ this->dst.rect.width, &vsf);
+ if (ret < 0)
+ goto error;
+ } else {
+ ret = calculate_scale_factor(this->src.rect.width,
+ this->dst.rect.width, &hsf);
+ if (ret < 0)
+ goto error;
+
+ ret = calculate_scale_factor(this->src.rect.height,
+ this->dst.rect.height, &vsf);
+ if (ret < 0)
+ goto error;
+ }
+
+ this->h_rescale = hsf != (1 << 10);
+ this->v_rescale = vsf != (1 << 10);
+
+ this->h_rsf = hsf;
+ this->v_rsf = vsf;
+
+ b2r2_log_info("%s: h_rsf=%.4x\n", __func__, this->h_rsf);
+ b2r2_log_info("%s: v_rsf=%.4x\n", __func__, this->v_rsf);
+
+ return 0;
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_tile() - configures one tile of a blit operation
+ */
+static int configure_tile(struct b2r2_node_split_job *this,
+ struct b2r2_node *node, struct b2r2_node **next)
+{
+ int ret = 0;
+
+ struct b2r2_node *last;
+ struct b2r2_node_split_buf *src = &this->src;
+ struct b2r2_node_split_buf *dst = &this->dst;
+
+ struct b2r2_blt_rect dst_norm;
+ struct b2r2_blt_rect src_norm;
+
+ /* Normalize the dest coords to the dest rect coordinate space */
+ dst_norm.x = dst->win.x - dst->rect.x;
+ dst_norm.y = dst->win.y - dst->rect.y;
+ dst_norm.width = dst->win.width;
+ dst_norm.height = dst->win.height;
+
+ if (dst->vso == B2R2_TY_VSO_BOTTOM_TO_TOP) {
+ /* The y coord should be counted from the bottom */
+ dst_norm.y = dst->rect.height - (dst_norm.y + 1);
+ }
+ if (dst->hso == B2R2_TY_HSO_RIGHT_TO_LEFT) {
+ /* The x coord should be counted from the right */
+ dst_norm.x = dst->rect.width - (dst_norm.x + 1);
+ }
+
+ /* If the destination is rotated we should swap x, y */
+ if (this->rotation) {
+ src_norm.x = dst_norm.y;
+ src_norm.y = dst_norm.x;
+ src_norm.width = dst_norm.height;
+ src_norm.height = dst_norm.width;
+ } else {
+ src_norm.x = dst_norm.x;
+ src_norm.y = dst_norm.y;
+ src_norm.width = dst_norm.width;
+ src_norm.height = dst_norm.height;
+ }
+
+ /* Convert to src coordinate space */
+ src->win.x = src_norm.x + src->rect.x;
+ src->win.y = src_norm.y + src->rect.y;
+ src->win.width = src_norm.width;
+ src->win.height = src_norm.height;
+
+ /* Do the configuration depending on operation type */
+ switch (this->type) {
+ case B2R2_DIRECT_FILL:
+ configure_direct_fill(node, this->src.color, dst, &last);
+ break;
+
+ case B2R2_DIRECT_COPY:
+ configure_direct_copy(node, src, dst, &last);
+ break;
+
+ case B2R2_FILL:
+ ret = configure_fill(node, src->color, src->fmt,
+ dst, this->ivmx, &last);
+ break;
+
+ case B2R2_FLIP: /* FLIP is just a copy with different VSO/HSO */
+ case B2R2_COPY:
+ ret = configure_copy(node, src, dst, this->ivmx, &last, this);
+ break;
+
+ case B2R2_ROTATE:
+ {
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+
+ if (this->blend) {
+ b2r2_log_info("%s: rotation + blend\n",
+ __func__);
+
+ tmp->win.x = 0;
+ tmp->win.y = tmp->win.height - 1;
+ tmp->win.width = dst->win.width;
+ tmp->win.height = dst->win.height;
+
+ /* Rotate to the temp buf */
+ ret = configure_rotate(node, src, tmp,
+ this->ivmx, &node, NULL);
+ if (ret < 0)
+ goto error;
+
+ /* Then do a copy to the destination */
+ ret = configure_copy(node, tmp, dst, NULL,
+ &last, this);
+ } else {
+ /* Just do a rotation */
+ ret = configure_rotate(node, src, dst,
+ this->ivmx, &last, this);
+ }
+ }
+ break;
+
+ case B2R2_SCALE:
+ ret = configure_scale(node, src, dst, this->h_rsf, this->v_rsf,
+ this->ivmx, &last, this);
+ break;
+
+ case B2R2_SCALE_AND_ROTATE:
+ ret = configure_rot_scale(this, node, &last);
+ break;
+
+ default:
+ b2r2_log_warn("%s: Unsupported request\n", __func__);
+ ret = -ENOSYS;
+ goto error;
+ break;
+
+ }
+
+ if (ret < 0)
+ goto error;
+
+ /* Scale and rotate will configure its own blending and clipping */
+ if (this->type != B2R2_SCALE_AND_ROTATE) {
+
+ /* Configure blending and clipping */
+ do {
+ if (node == NULL) {
+ b2r2_log_warn("%s: "
+ "Internal error! Out of nodes!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (this->blend)
+ configure_blend(node, this->flags,
+ this->global_alpha,
+ this->swap_fg_bg);
+ if (this->clip)
+ configure_clip(node, &this->clip_rect);
+
+ node = node->next;
+
+ } while (node != last);
+ }
+
+ /* Consume the nodes */
+ *next = last;
+
+ return 0;
+
+error:
+ b2r2_log_warn("%s: Error!\n", __func__);
+ return ret;
+}
+
+/*
+ * configure_sub_rot() - configure a sub-rotation
+ *
+ * This functions configures a set of nodes for rotation using the destination
+ * window instead of the rectangle for calculating tiles.
+ */
+static int configure_sub_rot(struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx, struct b2r2_node **next,
+ struct b2r2_node_split_job *job)
+{
+ int ret;
+
+ struct b2r2_blt_rect src_win;
+ struct b2r2_blt_rect dst_win;
+
+ u32 y_pixels = 0;
+ u32 x_pixels = 0;
+
+ memcpy(&src_win, &src->win, sizeof(src_win));
+ memcpy(&dst_win, &dst->win, sizeof(dst_win));
+
+ b2r2_log_info("%s: src_win=(%d, %d, %d, %d) "
+ "dst_win=(%d, %d, %d, %d)\n", __func__,
+ src_win.x, src_win.y, src_win.width, src_win.height,
+ dst_win.x, dst_win.y, dst_win.width, dst_win.height);
+
+ dst->win.height = B2R2_ROTATE_MAX_WIDTH;
+ if (dst->win.width % B2R2_ROTATE_MAX_WIDTH)
+ dst->win.width -= dst->win.width % B2R2_ROTATE_MAX_WIDTH;
+
+ while (x_pixels < dst_win.width) {
+ u32 src_x = src->win.x;
+ u32 src_w = src->win.width;
+ u32 dst_y = dst->win.y;
+ u32 dst_h = dst->win.height;
+
+ dst->win.width = min(dst->win.width,
+ dst_win.width - (int)x_pixels);
+ src->win.height = dst->win.width;
+
+ b2r2_log_info("%s: x_pixels=%d\n", __func__, x_pixels);
+
+ while (y_pixels < dst_win.height) {
+ dst->win.height = min(dst->win.height,
+ dst_win.height - (int)y_pixels);
+ src->win.width = dst->win.height;
+
+ b2r2_log_info("%s: y_pixels=%d\n", __func__, y_pixels);
+
+ ret = configure_rotate(node, src, dst, ivmx, &node,
+ job);
+ if (ret < 0)
+ goto error;
+
+ src->win.x += (src->hso == B2R2_TY_HSO_LEFT_TO_RIGHT) ?
+ src->win.width : -src->win.width;
+ dst->win.y += (dst->vso == B2R2_TY_VSO_TOP_TO_BOTTOM) ?
+ dst->win.height : -dst->win.height;
+
+ y_pixels += dst->win.height;
+ }
+
+ src->win.x = src_x;
+ src->win.y += (src->vso == B2R2_TY_VSO_TOP_TO_BOTTOM) ?
+ src->win.height : -src->win.height;
+ src->win.width = src_w;
+
+ dst->win.x += (dst->hso == B2R2_TY_HSO_LEFT_TO_RIGHT) ?
+ dst->win.width : -dst->win.width;
+ dst->win.y = dst_y;
+ dst->win.height = dst_h;
+
+ x_pixels += dst->win.width;
+ y_pixels = 0;
+
+ }
+
+ memcpy(&src->win, &src_win, sizeof(src->win));
+ memcpy(&dst->win, &dst_win, sizeof(dst->win));
+
+ *next = node;
+
+ return 0;
+
+error:
+ b2r2_log_warn("%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_rot_downscale() - configures a combined rotate and downscale
+ *
+ * When doing a downscale it is better to do the rotation last.
+ */
+static int configure_rot_downscale(struct b2r2_node_split_job *this,
+ struct b2r2_node *node, struct b2r2_node **next)
+{
+ int ret;
+
+ struct b2r2_node_split_buf *src = &this->src;
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+
+ tmp->win.x = 0;
+ tmp->win.y = 0;
+ tmp->win.width = dst->win.height;
+ tmp->win.height = dst->win.width;
+
+ ret = configure_scale(node, src, tmp, this->h_rsf, this->v_rsf,
+ this->ivmx, &node, this);
+ if (ret < 0)
+ goto error;
+
+ ret = configure_sub_rot(node, tmp, dst, NULL, &node, this);
+ if (ret < 0)
+ goto error;
+
+ *next = node;
+
+ return 0;
+
+error:
+ b2r2_log_info("%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_rot_upscale() - configures a combined rotate and upscale
+ *
+ * When doing an upscale it is better to do the rotation first.
+ */
+static int configure_rot_upscale(struct b2r2_node_split_job *this,
+ struct b2r2_node *node, struct b2r2_node **next)
+{
+ /* TODO: Implement a optimal upscale (rotation first) */
+ return configure_rot_downscale(this, node, next);
+}
+
+/**
+ * configure_rot_scale() - configures a combined rotation and scaling op
+ */
+static int configure_rot_scale(struct b2r2_node_split_job *this,
+ struct b2r2_node *node, struct b2r2_node **next)
+{
+ int ret;
+
+ bool upscale = (u32)this->h_rsf * (u32)this->v_rsf < (1 << 10);
+
+ if (upscale)
+ ret = configure_rot_upscale(this, node, next);
+ else
+ ret = configure_rot_downscale(this, node, next);
+
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ b2r2_log_warn("%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_direct_fill() - configures the given node for direct fill
+ *
+ * @node - the node to configure
+ * @color - the fill color
+ * @dst - the destination buffer
+ * @next - the next empty node in the node list
+ *
+ * This operation will always consume one node only.
+ */
+static void configure_direct_fill(struct b2r2_node *node, u32 color,
+ struct b2r2_node_split_buf *dst, struct b2r2_node **next)
+{
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_FILL | B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_DIRECT_FILL;
+
+ /* Target setup */
+ set_target(node, dst->addr, dst);
+
+ /* Source setup */
+
+ /* It seems B2R2 checks so that source and dest has the same format */
+ node->node.GROUP3.B2R2_STY = to_native_fmt(dst->fmt);
+ node->node.GROUP2.B2R2_S1CF = color;
+ node->node.GROUP2.B2R2_S2CF = 0;
+
+ /* Consume the node */
+ *next = node->next;
+}
+
+/**
+ * configure_direct_copy() - configures the node for direct copy
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @next - the next empty node in the node list
+ *
+ * This operation will always consume one node only.
+ */
+static void configure_direct_copy(struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, struct b2r2_node **next)
+{
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_DIRECT_COPY;
+
+ /* Source setup, use the base function to avoid altering the INS */
+ set_src(&node->node.GROUP3, src->addr, src);
+
+ /* Target setup */
+ set_target(node, dst->addr, dst);
+
+ /* Consume the node */
+ *next = node->next;
+}
+
+/**
+ * configure_fill() - configures the given node for color fill
+ *
+ * @node - the node to configure
+ * @color - the fill color
+ * @fmt - the source color format
+ * @dst - the destination buffer
+ * @next - the next empty node in the node list
+ *
+ * A normal fill operation can be combined with any other per pixel operations
+ * such as blend.
+ *
+ * This operation will consume as many nodes as are required to write to the
+ * destination format.
+ */
+static int configure_fill(struct b2r2_node *node, u32 color,
+ enum b2r2_blt_fmt fmt, struct b2r2_node_split_buf *dst,
+ const u32 *ivmx, struct b2r2_node **next)
+{
+ int ret;
+ struct b2r2_node *last;
+
+ /* Configure the destination */
+ ret = configure_dst(node, dst, ivmx, &last);
+ if (ret < 0)
+ goto error;
+
+ do {
+ if (node == NULL) {
+ b2r2_log_warn("%s: "
+ "Internal error! Out of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_COLOR_FILL;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER;
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ /* B2R2 has a bug that disables color fill from S2. As a
+ workaround we use S1 for the color. */
+ node->node.GROUP2.B2R2_S1CF = 0;
+ node->node.GROUP2.B2R2_S2CF = color;
+
+ /* TO BE REMOVED: */
+ set_src_2(node, dst->addr, dst);
+ node->node.GROUP4.B2R2_STY = to_native_fmt(fmt);
+
+ /* Setup the iVMX for color conversion */
+ if (ivmx != NULL)
+ set_ivmx(node, ivmx);
+
+ if ((dst->type == B2R2_FMT_TYPE_PLANAR) ||
+ (dst->type == B2R2_FMT_TYPE_SEMI_PLANAR)) {
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP8.B2R2_FCTL =
+ B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+ node->node.GROUP9.B2R2_RSF =
+ (1 << (B2R2_RSF_HSRC_INC_SHIFT + 10)) |
+ (1 << (B2R2_RSF_VSRC_INC_SHIFT + 10));
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+
+ node->node.GROUP10.B2R2_RSF =
+ (1 << (B2R2_RSF_HSRC_INC_SHIFT + 10)) |
+ (1 << (B2R2_RSF_VSRC_INC_SHIFT + 10));
+ node->node.GROUP10.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ }
+
+ node = node->next;
+
+ } while (node != last);
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_copy() - configures the given node for a copy operation
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @ivmx - the iVMX to use for color conversion
+ * @next - the next empty node in the node list
+ *
+ * This operation will consume as many nodes as are required to write to the
+ * destination format.
+ */
+static int configure_copy(struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ struct b2r2_node *last;
+
+ ret = configure_dst(node, dst, ivmx, &last);
+ if (ret < 0)
+ goto error;
+
+ /* Configure the source for each node */
+ do {
+ if (node == NULL) {
+ b2r2_log_warn("%s: "
+ " Internal error! Out of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+ if (this != NULL &&
+ (this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY)
+ != 0) {
+ u32 key_color = 0;
+
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT |
+ B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CKEY_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_KEY;
+
+ key_color = to_RGB888(this->flag_param, src->fmt);
+ node->node.GROUP12.B2R2_KEY1 = key_color;
+ node->node.GROUP12.B2R2_KEY2 = key_color;
+ }
+
+ if (this != NULL &&
+ (this->flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
+ struct b2r2_blt_request *request =
+ container_of(this, struct b2r2_blt_request,
+ node_split_job);
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CLUTOP_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLUT;
+ node->node.GROUP7.B2R2_CCO =
+ B2R2_CCO_CLUT_COLOR_CORRECTION |
+ B2R2_CCO_CLUT_UPDATE;
+ node->node.GROUP7.B2R2_CML = request->clut_phys_addr;
+ }
+ /* Configure the source(s) */
+ configure_src(node, src, ivmx);
+
+ node = node->next;
+ } while (node != last);
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_rotate() - configures the given node for rotation
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @ivmx - the iVMX to use for color conversion
+ * @next - the next empty node in the node list
+ *
+ * This operation will consume as many nodes are are required by the combination
+ * of rotating and writing the destination format.
+ */
+static int configure_rotate(struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ struct b2r2_node *last;
+
+ ret = configure_copy(node, src, dst, ivmx, &last, this);
+ if (ret < 0)
+ goto error;
+
+ do {
+ if (node == NULL) {
+ b2r2_log_warn("%s: "
+ "Internal error! Out of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_ROTATION_ENABLED;
+
+ b2r2_log_debug("%s:\n"
+ "\tB2R2_TXY: %.8x\tB2R2_TSZ: %.8x\n"
+ "\tB2R2_S1XY: %.8x\tB2R2_S1SZ: %.8x\n"
+ "\tB2R2_S2XY: %.8x\tB2R2_S2SZ: %.8x\n"
+ "\tB2R2_S3XY: %.8x\tB2R2_S3SZ: %.8x\n"
+ "-----------------------------------\n",
+ __func__, node->node.GROUP1.B2R2_TXY,
+ node->node.GROUP1.B2R2_TSZ,
+ node->node.GROUP3.B2R2_SXY,
+ node->node.GROUP3.B2R2_SSZ,
+ node->node.GROUP4.B2R2_SXY,
+ node->node.GROUP4.B2R2_SSZ,
+ node->node.GROUP5.B2R2_SXY,
+ node->node.GROUP5.B2R2_SSZ);
+
+ node = node->next;
+
+ } while (node != last);
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn("%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_scale() - configures the given node for scaling
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @h_rsf - the horizontal rescale factor
+ * @v_rsf - the vertical rescale factor
+ * @ivmx - the iVMX to use for color conversion
+ * @next - the next empty node in the node list
+ */
+static int configure_scale(struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ u16 h_rsf, u16 v_rsf,
+ const u32 *ivmx, struct b2r2_node **next,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ struct b2r2_node *last;
+
+ struct b2r2_filter_spec *hf = NULL;
+ struct b2r2_filter_spec *vf = NULL;
+
+ u32 fctl = 0;
+ u32 rsf = 0;
+ u32 rzi = 0;
+ u32 hsrc_init = 0;
+ u32 vsrc_init = 0;
+ u32 hfp = 0;
+ u32 vfp = 0;
+
+ u16 luma_h_rsf = h_rsf;
+ u16 luma_v_rsf = v_rsf;
+
+ struct b2r2_filter_spec *luma_hf = NULL;
+ struct b2r2_filter_spec *luma_vf = NULL;
+
+ u32 luma_fctl = 0;
+ u32 luma_rsf = 0;
+ u32 luma_rzi = 0;
+ u32 luma_hsrc_init = 0;
+ u32 luma_vsrc_init = 0;
+ u32 luma_hfp = 0;
+ u32 luma_vfp = 0;
+
+ s32 src_x;
+ s32 src_y;
+ s32 src_w;
+ s32 src_h;
+
+ bool upsample;
+ bool downsample;
+
+ struct b2r2_blt_rect tmp_win = src->win;
+
+ /* Rescale the normalized source window */
+ src_x = inv_rescale(src->win.x - src->rect.x, luma_h_rsf);
+ src_y = inv_rescale(src->win.y - src->rect.y, luma_v_rsf);
+ src_w = inv_rescale(src->win.width, luma_h_rsf);
+ src_h = inv_rescale(src->win.height, luma_v_rsf);
+
+ /* Convert to src coordinate space */
+ src->win.x = (src_x >> 10) + src->rect.x;
+ src->win.y = (src_y >> 10) + src->rect.y;
+
+ /*
+ * Since the stripe might start and end on a fractional pixel
+ * we need to count all the touched pixels in the width.
+ *
+ * Example:
+ * src_x = 1.8, src_w = 2.8
+ *
+ * The stripe touches pixels 1.8 through 4.6, i.e. 4 pixels
+ */
+ src->win.width = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10;
+ src->win.height = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10;
+
+ luma_hsrc_init = src_x & 0x3ff;
+ luma_vsrc_init = src_y & 0x3ff;
+
+ /* Check for upsampling of chroma */
+ upsample = src->type != B2R2_FMT_TYPE_RASTER &&
+ !is_yuv444_fmt(src->fmt);
+ if (upsample) {
+ h_rsf /= 2;
+
+ if (is_yuv420_fmt(src->fmt))
+ v_rsf /= 2;
+ }
+
+ /* Check for downsampling of chroma */
+ downsample = dst->type != B2R2_FMT_TYPE_RASTER &&
+ !is_yuv444_fmt(dst->fmt);
+ if (downsample) {
+ h_rsf *= 2;
+
+ if (is_yuv420_fmt(dst->fmt))
+ v_rsf *= 2;
+ }
+
+ src_x = inv_rescale(tmp_win.x - src->rect.x, h_rsf);
+ src_y = inv_rescale(tmp_win.y - src->rect.y, v_rsf);
+ hsrc_init = src_x & 0x3ff;
+ vsrc_init = src_y & 0x3ff;
+
+ /* Configure resize and filters */
+ fctl = B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+ luma_fctl = B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf = (h_rsf << B2R2_RSF_HSRC_INC_SHIFT) |
+ (v_rsf << B2R2_RSF_VSRC_INC_SHIFT);
+ luma_rsf = (luma_h_rsf << B2R2_RSF_HSRC_INC_SHIFT) |
+ (luma_v_rsf << B2R2_RSF_VSRC_INC_SHIFT);
+
+ rzi = B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT) |
+ (hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT);
+ luma_rzi = B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT) |
+ (luma_hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (luma_vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT);
+
+ /*
+ * We should only filter if there is an actual rescale (i.e. not when
+ * up or downsampling).
+ */
+ if (luma_h_rsf != (1 << 10)) {
+ hf = b2r2_filter_find(h_rsf);
+ luma_hf = b2r2_filter_find(luma_h_rsf);
+ }
+ if (luma_v_rsf != (1 << 10)) {
+ vf = b2r2_filter_find(v_rsf);
+ luma_vf = b2r2_filter_find(luma_v_rsf);
+ }
+
+ if (hf) {
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ hfp = hf->h_coeffs_phys_addr;
+ }
+
+ if (vf) {
+ fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ vfp = vf->v_coeffs_phys_addr;
+ }
+
+ if (luma_hf) {
+ luma_fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER;
+ luma_hfp = luma_hf->h_coeffs_phys_addr;
+ }
+
+ if (luma_vf) {
+ luma_fctl |= B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER;
+ luma_vfp = luma_vf->v_coeffs_phys_addr;
+ }
+
+ ret = configure_copy(node, src, dst, ivmx, &last, this);
+ if (ret < 0)
+ goto error;
+
+ do {
+ bool chroma_rescale =
+ (h_rsf != (1 << 10)) || (v_rsf != (1 << 10));
+ bool luma_rescale =
+ (luma_h_rsf != (1 << 10)) || (luma_v_rsf != (1 << 10));
+ bool src_raster = src->type == B2R2_FMT_TYPE_RASTER;
+ bool dst_raster = dst->type == B2R2_FMT_TYPE_RASTER;
+ bool dst_chroma =
+ node->node.GROUP1.B2R2_TTY & B2R2_TTY_CHROMA_NOT_LUMA;
+
+ if (node == NULL) {
+ b2r2_log_warn("%s: "
+ "Internal error! Out of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_FILTER_CONTROL;
+
+ /*
+ * If the source format is anything other than raster, we
+ * always have to enable both chroma and luma resizers. This
+ * could be a bug in the hardware, since it is not mentioned in
+ * the specification.
+ *
+ * Otherwise, we will only enable the chroma resizer when
+ * writing chroma and the luma resizer when writing luma
+ * (or both when writing raster). Also, if there is no rescale
+ * to be done there's no point in using the resizers.
+ */
+
+ if (!src_raster || (chroma_rescale &&
+ (dst_raster || dst_chroma))) {
+ /* Enable chroma resize */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_CHROMA;
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI = rzi;
+ node->node.GROUP9.B2R2_HFP = hfp;
+ node->node.GROUP9.B2R2_VFP = vfp;
+ }
+
+ if (!src_raster || (luma_rescale &&
+ (dst_raster || !dst_chroma))) {
+ /* Enable luma resize */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_LUMA;
+ node->node.GROUP8.B2R2_FCTL |= luma_fctl;
+
+ node->node.GROUP10.B2R2_RSF = luma_rsf;
+ node->node.GROUP10.B2R2_RZI = luma_rzi;
+ node->node.GROUP10.B2R2_HFP = luma_hfp;
+ node->node.GROUP10.B2R2_VFP = luma_vfp;
+ }
+
+ b2r2_log_info("%s:\n"
+ "\tB2R2_TXY: %.8x\tB2R2_TSZ: %.8x\n"
+ "\tB2R2_S1XY: %.8x\tB2R2_S1SZ: %.8x\n"
+ "\tB2R2_S2XY: %.8x\tB2R2_S2SZ: %.8x\n"
+ "\tB2R2_S3XY: %.8x\tB2R2_S3SZ: %.8x\n"
+ "----------------------------------\n",
+ __func__, node->node.GROUP1.B2R2_TXY,
+ node->node.GROUP1.B2R2_TSZ,
+ node->node.GROUP3.B2R2_SXY,
+ node->node.GROUP3.B2R2_SSZ,
+ node->node.GROUP4.B2R2_SXY,
+ node->node.GROUP4.B2R2_SSZ,
+ node->node.GROUP5.B2R2_SXY,
+ node->node.GROUP5.B2R2_SSZ);
+
+ node = node->next;
+
+ } while (node != last);
+
+
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_src() - configures the source registers and the iVMX
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @ivmx - the iVMX to use for color conversion
+ *
+ * This operation will not consume any nodes
+ */
+static void configure_src(struct b2r2_node *node,
+ struct b2r2_node_split_buf *src, const u32 *ivmx)
+{
+ struct b2r2_node_split_buf tmp_buf;
+
+ b2r2_log_info("%s: src.win=(%d, %d, %d, %d)\n", __func__,
+ src->win.x, src->win.y, src->win.width,
+ src->win.height);
+
+ /* Configure S1 - S3 */
+ switch (src->type) {
+ case B2R2_FMT_TYPE_RASTER:
+ set_src_2(node, src->addr, src);
+ break;
+ case B2R2_FMT_TYPE_SEMI_PLANAR:
+ memcpy(&tmp_buf, src, sizeof(tmp_buf));
+
+ /*
+ * For 420 and 422 the chroma has lower resolution than the
+ * luma
+ */
+ if (!is_yuv444_fmt(src->fmt)) {
+ tmp_buf.win.x >>= 1;
+ tmp_buf.win.width = (tmp_buf.win.width + 1) / 2;
+
+ if (is_yuv420_fmt(src->fmt)) {
+ tmp_buf.win.height =
+ (tmp_buf.win.height + 1) / 2;
+ tmp_buf.win.y >>= 1;
+ }
+ }
+
+ set_src_3(node, src->addr, src);
+ set_src_2(node, tmp_buf.chroma_addr, &tmp_buf);
+ break;
+ case B2R2_FMT_TYPE_PLANAR:
+ memcpy(&tmp_buf, src, sizeof(tmp_buf));
+
+ if (!is_yuv444_fmt(src->fmt)) {
+ /*
+ * Each chroma buffer will have half as many values
+ * per line as the luma buffer
+ */
+ tmp_buf.pitch = (tmp_buf.pitch + 1) / 2;
+
+ /* Horizontal resolution is half */
+ tmp_buf.win.x >>= 1;
+ tmp_buf.win.width = (tmp_buf.win.width + 1) / 2;
+
+ /*
+ * If the buffer is in YUV420 format, the vertical
+ * resolution is half as well
+ */
+ if (is_yuv420_fmt(src->fmt)) {
+ tmp_buf.win.height =
+ (tmp_buf.win.height + 1) / 2;
+ tmp_buf.win.y >>= 1;
+ }
+ }
+
+ set_src_3(node, src->addr, src); /* Y */
+ set_src_2(node, tmp_buf.chroma_addr, &tmp_buf); /* U */
+ set_src_1(node, tmp_buf.chroma_cr_addr, &tmp_buf); /* V */
+
+ break;
+ default:
+ /* Should never, ever happen */
+ BUG_ON(1);
+ break;
+ }
+
+ /* Configure the iVMX for color space conversions */
+ if (ivmx != NULL)
+ set_ivmx(node, ivmx);
+}
+
+/**
+ * configure_dst() - configures the destination registers of the given node
+ *
+ * @node - the node to configure
+ * @ivmx - the iVMX to use for color conversion
+ * @dst - the destination buffer
+ *
+ * This operation will consume as many nodes as are required to write the
+ * destination format.
+ */
+static int configure_dst(struct b2r2_node *node,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next)
+{
+ int ret;
+ int nbr_planes = 1;
+ int i;
+
+ struct b2r2_node_split_buf dst_planes[3];
+
+ b2r2_log_info("%s: dst.win=(%d, %d, %d, %d)\n", __func__,
+ dst->win.x, dst->win.y, dst->win.width,
+ dst->win.height);
+
+ memcpy(&dst_planes[0], dst, sizeof(dst_planes[0]));
+
+ if (dst->type != B2R2_FMT_TYPE_RASTER) {
+ /* There will be at least 2 planes */
+ nbr_planes = 2;
+
+ memcpy(&dst_planes[1], dst, sizeof(dst_planes[1]));
+
+ dst_planes[1].addr = dst->chroma_addr;
+ dst_planes[1].plane_selection = B2R2_TTY_CHROMA_NOT_LUMA;
+
+ if (!is_yuv444_fmt(dst->fmt)) {
+ /* Horizontal resolution is half */
+ dst_planes[1].win.x /= 2;
+ /*
+ * Must round up the chroma size to handle cases when
+ * luma size is not divisible by 2. E.g. luma width==7 r
+ * equires chroma width==4. Chroma width==7/2==3 is only
+ * enough for luma width==6.
+ */
+ dst_planes[1].win.width =
+ (dst_planes[1].win.width + 1) / 2;
+
+ /*
+ * If the buffer is in YUV420 format, the vertical
+ * resolution is half as well. Height must be rounded in
+ * the same way as is done for width.
+ */
+ if (is_yuv420_fmt(dst->fmt)) {
+ dst_planes[1].win.y /= 2;
+ dst_planes[1].win.height =
+ (dst_planes[1].win.height + 1) / 2;
+ }
+ }
+
+ if (dst->type == B2R2_FMT_TYPE_PLANAR) {
+ /* There will be a third plane as well */
+ nbr_planes = 3;
+
+ if (!is_yuv444_fmt(dst->fmt)) {
+ /* The chroma planes have half the luma pitch */
+ dst_planes[1].pitch /= 2;
+ }
+
+ memcpy(&dst_planes[2], &dst_planes[1],
+ sizeof(dst_planes[2]));
+ dst_planes[2].addr = dst->chroma_cr_addr;
+
+ /*
+ * The third plane will be Cr.
+ * The flag B2R2_TTY_CB_NOT_CR actually works
+ * the other way around, i.e. as if it was
+ * B2R2_TTY_CR_NOT_CB.
+ */
+ dst_planes[2].chroma_selection = B2R2_TTY_CB_NOT_CR;
+ }
+
+ }
+
+ /* Configure one node for each plane */
+ for (i = 0; i < nbr_planes; i++) {
+
+ if (node == NULL) {
+ b2r2_log_warn("%s: "
+ "Internal error! Out of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * When writing chroma, there's no need to read the luma and
+ * vice versa.
+ */
+ if ((node->node.GROUP3.B2R2_STY & B2R2_NATIVE_YUV) &&
+ (nbr_planes > 1)) {
+ if (i != 0) {
+ node->node.GROUP4.B2R2_STY |=
+ B2R2_S3TY_ENABLE_BLANK_ACCESS;
+ }
+ if (i != 1) {
+ node->node.GROUP0.B2R2_INS &=
+ ~B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER;
+ }
+ if (i != 2) {
+ node->node.GROUP0.B2R2_INS &=
+ ~B2R2_INS_SOURCE_1_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_COLOR_FILL_REGISTER;
+ }
+ } else if ((node->node.GROUP3.B2R2_STY &
+ (B2R2_NATIVE_YCBCR42X_MBN |
+ B2R2_NATIVE_YCBCR42X_R2B)) &&
+ (nbr_planes > 1)) {
+ if (i != 0) {
+ node->node.GROUP4.B2R2_STY |=
+ B2R2_S3TY_ENABLE_BLANK_ACCESS;
+ }
+ }
+
+
+ set_target(node, dst_planes[i].addr, &dst_planes[i]);
+
+ node = node->next;
+ }
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * configure_blend() - configures the given node for alpha blending
+ *
+ * @node - the node to configure
+ * @flags - the flags passed in the blt_request
+ * @global_alpha - the global alpha to use (if enabled in flags)
+ * @swap_fg_bg - if true, fg will be on s1 instead of s2
+ *
+ * This operation will not consume any nodes.
+ *
+ * NOTE: This method should be called _AFTER_ the destination has been
+ * configured.
+ *
+ * WARNING: Take care when using this with semi-planar or planar sources since
+ * either S1 or S2 will be overwritten!
+ */
+static void configure_blend(struct b2r2_node *node, u32 flags, u32 global_alpha,
+ bool swap_fg_bg)
+{
+ node->node.GROUP0.B2R2_ACK &= ~(B2R2_ACK_MODE_BYPASS_S2_S3);
+
+ /* Check if the foreground is premultiplied */
+ if ((flags & B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT) != 0)
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BLEND_NOT_PREMULT;
+ else
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BLEND_PREMULT;
+
+
+ /* Check if global alpha blend should be enabled */
+ if ((flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) != 0) {
+
+ /* B2R2 expects the global alpha to be in 0...128 range */
+ global_alpha = (global_alpha*128)/255;
+
+ node->node.GROUP0.B2R2_ACK |=
+ global_alpha << B2R2_ACK_GALPHA_ROPID_SHIFT;
+ } else {
+ node->node.GROUP0.B2R2_ACK |=
+ (128 << B2R2_ACK_GALPHA_ROPID_SHIFT);
+ }
+
+ /* Copy the destination config to the appropriate source and clear any
+ clashing flags */
+ if (swap_fg_bg) {
+ /* S1 will be foreground, S2 background */
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_SWAP_FG_BG;
+
+ node->node.GROUP4.B2R2_SBA = node->node.GROUP1.B2R2_TBA;
+ node->node.GROUP4.B2R2_STY = node->node.GROUP1.B2R2_TTY;
+ node->node.GROUP4.B2R2_SXY = node->node.GROUP1.B2R2_TXY;
+ node->node.GROUP4.B2R2_SSZ = node->node.GROUP1.B2R2_TSZ;
+
+ node->node.GROUP4.B2R2_STY &= ~(B2R2_S2TY_A1_SUBST_KEY_MODE |
+ B2R2_S2TY_CHROMA_LEFT_EXT_AVERAGE);
+ } else {
+ /* S1 will be background, S2 foreground */
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_FETCH_FROM_MEM;
+
+ node->node.GROUP3.B2R2_SBA = node->node.GROUP1.B2R2_TBA;
+ node->node.GROUP3.B2R2_STY |= node->node.GROUP1.B2R2_TTY;
+ node->node.GROUP3.B2R2_SXY = node->node.GROUP1.B2R2_TXY;
+
+ node->node.GROUP3.B2R2_STY &= ~(B2R2_S1TY_A1_SUBST_KEY_MODE);
+
+ }
+}
+
+/**
+ * configure_clip() - configures destination clipping for the given node
+ *
+ * @node - the node to configure
+ * @clip_rect - the clip rectangle
+ *
+ * This operation does not consume any nodes.
+ */
+static void configure_clip(struct b2r2_node *node,
+ struct b2r2_blt_rect *clip_rect)
+{
+ s32 l = clip_rect->x;
+ s32 r = clip_rect->x + clip_rect->width - 1;
+ s32 t = clip_rect->y;
+ s32 b = clip_rect->y + clip_rect->height - 1;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLIP_WINDOW;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RECT_CLIP_ENABLED;
+
+ /* Clip window setup */
+ node->node.GROUP6.B2R2_CWO =
+ ((t & 0x7FFF) << B2R2_CWO_Y_SHIFT) |
+ ((l & 0x7FFF) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((b & 0x7FFF) << B2R2_CWO_Y_SHIFT) |
+ ((r & 0x7FFF) << B2R2_CWO_X_SHIFT);
+}
+
+/**
+ * set_buf() - configures the given buffer with the provided values
+ *
+ * @addr - the physical base address
+ * @img - the blt image to base the buffer on
+ * @rect - the rectangle to use
+ * @color_fill - determines whether the buffer should be used for color fill
+ * @color - the color to use in case of color fill
+ */
+static void set_buf(struct b2r2_node_split_buf *buf, u32 addr,
+ const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect, bool color_fill, u32 color)
+{
+ memset(buf, 0, sizeof(*buf));
+
+ buf->fmt = img->fmt;
+ buf->type = get_fmt_type(img->fmt);
+
+ if (color_fill) {
+ buf->type = B2R2_FMT_TYPE_RASTER;
+ buf->color = color;
+ } else {
+ buf->addr = addr;
+
+ buf->alpha_range = get_alpha_range(img->fmt);
+
+ if (img->pitch == 0)
+ buf->pitch = fmt_byte_pitch(img->fmt, img->width);
+ else
+ buf->pitch = img->pitch;
+
+ buf->height = img->height;
+ buf->width = img->width;
+
+ switch (buf->type) {
+ case B2R2_FMT_TYPE_SEMI_PLANAR:
+ buf->chroma_addr = (u32)(((u8 *)addr) +
+ buf->pitch * buf->height);
+ break;
+ case B2R2_FMT_TYPE_PLANAR:
+ if (is_yuv422_fmt(buf->fmt) ||
+ is_yuv420_fmt(buf->fmt)) {
+ buf->chroma_addr = (u32)(((u8 *)addr) +
+ buf->pitch * buf->height);
+ } else {
+ buf->chroma_cr_addr = (u32)(((u8 *)addr) +
+ buf->pitch * buf->height);
+ }
+ if (is_yuv420_fmt(buf->fmt)) {
+ /*
+ * Use ceil(height/2) in case
+ * buffer height is not divisible by 2.
+ */
+ buf->chroma_cr_addr =
+ (u32)(((u8 *)buf->chroma_addr) +
+ (buf->pitch >> 1) *
+ ((buf->height + 1) >> 1));
+ } else if (is_yuv422_fmt(buf->fmt)) {
+ buf->chroma_cr_addr =
+ (u32)(((u8 *)buf->chroma_addr) +
+ (buf->pitch >> 1) * buf->height);
+ } else if (is_yvu420_fmt(buf->fmt)) {
+ buf->chroma_addr =
+ (u32)(((u8 *)buf->chroma_cr_addr) +
+ (buf->pitch >> 1) *
+ ((buf->height + 1) >> 1));
+ } else if (is_yvu422_fmt(buf->fmt)) {
+ buf->chroma_addr =
+ (u32)(((u8 *)buf->chroma_cr_addr) +
+ (buf->pitch >> 1) * buf->height);
+ }
+ break;
+ default:
+ break;
+ }
+
+ memcpy(&buf->rect, rect, sizeof(buf->rect));
+ }
+}
+
+/**
+ * setup_tmp_buf() - configure a temporary buffer
+ */
+static int setup_tmp_buf(struct b2r2_node_split_buf *tmp, u32 max_size,
+ enum b2r2_blt_fmt pref_fmt, u32 pref_width, u32 pref_height)
+{
+ int ret;
+
+ enum b2r2_blt_fmt fmt;
+
+ u32 width;
+ u32 height;
+ u32 pitch;
+ u32 size;
+
+ /* Determine what format we should use for the tmp buf */
+ if (is_rgb_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+ } else if (is_bgr_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_32_BIT_ABGR8888;
+ } else if (is_yvu_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_CB_Y_CR_Y;
+ } else if (is_yuv_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
+ } else {
+ /* Wait, what? */
+ b2r2_log_warn("%s: "
+ "Cannot create tmp buf from this fmt (%d)\n", __func__,
+ pref_fmt);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* See if we can fit the entire preferred rectangle */
+ width = pref_width;
+ height = pref_height;
+ pitch = fmt_byte_pitch(fmt, width);
+ size = pitch * height;
+
+ if (size > max_size) {
+ /* We need to limit the size, so we choose a different width */
+ width = MIN(width, B2R2_RESCALE_MAX_WIDTH);
+ pitch = fmt_byte_pitch(fmt, width);
+ height = MIN(height, max_size / pitch);
+ size = pitch * height;
+ }
+
+ /* We should at least have enough room for one scanline */
+ if (height == 0) {
+ b2r2_log_warn("%s: Not enough tmp mem!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memset(tmp, 0, sizeof(*tmp));
+
+ tmp->fmt = fmt;
+ tmp->type = B2R2_FMT_TYPE_RASTER;
+ tmp->height = height;
+ tmp->width = width;
+ tmp->pitch = pitch;
+
+ tmp->rect.width = width;
+ tmp->rect.height = tmp->height;
+ tmp->alpha_range = B2R2_TY_ALPHA_RANGE_255;
+
+ return 0;
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * get_alpha_range() - returns the alpha range of the given format
+ */
+static enum b2r2_ty get_alpha_range(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ return B2R2_TY_ALPHA_RANGE_255; /* 0 - 255 */
+ default:
+ return B2R2_TY_ALPHA_RANGE_128; /* 0 - 128 */
+ }
+}
+
+/**
+ * get_alpha() - returns the pixel alpha in 0...255 range
+ */
+static u8 get_alpha(enum b2r2_blt_fmt fmt, u32 pixel)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ return (pixel >> 24) & 0xff;
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return pixel & 0xff;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ return (pixel & 0xfff) >> 16;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return (((pixel >> 12) & 0xf) * 255) / 15;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ return (pixel >> 15) * 255;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return pixel * 255;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return pixel;
+ default:
+ return 255;
+ }
+}
+
+/**
+ * set_alpha() - returns a color value with the alpha component set
+ */
+static u32 set_alpha(enum b2r2_blt_fmt fmt, u8 alpha, u32 color)
+{
+ u32 alpha_mask;
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ color &= 0x00ffffff;
+ alpha_mask = alpha << 24;
+ break;
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ color &= 0xffffff00;
+ alpha_mask = alpha;
+ break;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ color &= 0x00ffff;
+ alpha_mask = alpha << 16;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ color &= 0x0fff;
+ alpha_mask = (alpha << 8) & 0xF000;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ color &= 0x7fff;
+ alpha_mask = (alpha / 255) << 15 ;
+ break;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ color = 0;
+ alpha_mask = (alpha / 255);
+ break;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ color = 0;
+ alpha_mask = alpha;
+ break;
+ default:
+ alpha_mask = 0;
+ }
+
+ return color | alpha_mask;
+}
+
+/**
+ * fmt_has_alpha() - returns whether the given format carries an alpha value
+ */
+static bool fmt_has_alpha(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_rgb_fmt() - returns whether the given format is a rgb format
+ */
+static bool is_rgb_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_bgr_fmt() - returns whether the given format is a bgr format
+ */
+static bool is_bgr_fmt(enum b2r2_blt_fmt fmt)
+{
+ return (fmt == B2R2_BLT_FMT_32_BIT_ABGR8888);
+}
+
+/**
+ * is_yuv_fmt() - returns whether the given format is a yuv format
+ */
+static bool is_yuv_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_yvu_fmt() - returns whether the given format is a yvu format
+ */
+static bool is_yvu_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_yuv420_fmt() - returns whether the given format is a yuv420 format
+ */
+static bool is_yuv420_fmt(enum b2r2_blt_fmt fmt)
+{
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_yuv422_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_yvu420_fmt() - returns whether the given format is a yvu420 format
+ */
+static bool is_yvu420_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_yvu422_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+/**
+ * is_yuv444_fmt() - returns whether the given format is a yuv444 format
+ */
+static bool is_yuv444_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * get_fmt_byte_pitch() - returns the pitch of a pixmap with the given width
+ */
+static int fmt_byte_pitch(enum b2r2_blt_fmt fmt, u32 width)
+{
+ int pitch;
+
+ switch (fmt) {
+
+ case B2R2_BLT_FMT_1_BIT_A1:
+ pitch = width >> 3; /* Shift is faster than division */
+ if ((width & 0x3) != 0) /* Check for remainder */
+ pitch++;
+ return pitch;
+
+ case B2R2_BLT_FMT_8_BIT_A8: /* Fall through */
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: /* Fall through */
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: /* Fall through */
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return width;
+
+ case B2R2_BLT_FMT_16_BIT_ARGB4444: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_ARGB1555: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_RGB565: /* Fall through */
+ case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return width << 1;
+
+ case B2R2_BLT_FMT_24_BIT_RGB888: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_ARGB8565: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ return width * 3;
+
+ case B2R2_BLT_FMT_32_BIT_ARGB8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return width << 2;
+
+ default:
+ /* Should never, ever happen */
+ BUG_ON(1);
+ return 0;
+ }
+}
+
+/**
+ * to_native_fmt() - returns the native B2R2 format
+ */
+static enum b2r2_native_fmt to_native_fmt(enum b2r2_blt_fmt fmt)
+{
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_UNUSED:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return B2R2_NATIVE_A1;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return B2R2_NATIVE_A8;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return B2R2_NATIVE_ARGB4444;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ return B2R2_NATIVE_ARGB1555;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ return B2R2_NATIVE_ARGB8565;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ return B2R2_NATIVE_RGB888;
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888: /* Not actually supported by HW */
+ return B2R2_NATIVE_YCBCR888;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Not actually supported by HW */
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ return B2R2_NATIVE_ARGB8888;
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888: /* Not actually supported by HW */
+ return B2R2_NATIVE_AYCBCR8888;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return B2R2_NATIVE_YCBCR42X_R2B;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return B2R2_NATIVE_YCBCR42X_MBN;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return B2R2_NATIVE_YUV;
+ default:
+ /* Should never ever happen */
+ return B2R2_NATIVE_BYTE;
+ }
+}
+
+/**
+ * Bit-expand the color from fmt to RGB888 with blue at LSB.
+ * Copy MSBs into missing LSBs.
+ */
+static u32 to_RGB888(u32 color, const enum b2r2_blt_fmt fmt)
+{
+ u32 out_color = 0;
+ u32 r = 0;
+ u32 g = 0;
+ u32 b = 0;
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ r = ((color & 0xf00) << 12) | ((color & 0xf00) << 8);
+ g = ((color & 0xf0) << 8) | ((color & 0xf0) << 4);
+ b = ((color & 0xf) << 4) | (color & 0xf);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ r = ((color & 0x7c00) << 9) | ((color & 0x7000) << 4);
+ g = ((color & 0x3e0) << 6) | ((color & 0x380) << 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ out_color = color & 0xffffff;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ r = (color & 0xff) << 16;
+ g = color & 0xff00;
+ b = (color & 0xff0000) >> 16;
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ default:
+ break;
+ }
+
+ return out_color;
+}
+
+/**
+ * get_fmt_type() - returns the type of the given format (raster, planar, etc.)
+ */
+static enum b2r2_fmt_type get_fmt_type(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return B2R2_FMT_TYPE_RASTER;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return B2R2_FMT_TYPE_PLANAR;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return B2R2_FMT_TYPE_SEMI_PLANAR;
+ default:
+ return B2R2_FMT_TYPE_RASTER;
+ }
+}
+
+/**
+ * is_transform() - returns whether the given request is a transform operation
+ */
+static bool is_transform(const struct b2r2_blt_request *req)
+{
+ return (req->user_req.transform != B2R2_BLT_TRANSFORM_NONE) ||
+ (req->user_req.src_rect.width !=
+ req->user_req.dst_rect.width) ||
+ (req->user_req.src_rect.height !=
+ req->user_req.dst_rect.height);
+}
+
+/**
+ * calculate_scale_factor() - calculates the scale factor between the given
+ * values
+ */
+static int calculate_scale_factor(u32 from, u32 to, u16 *sf_out)
+{
+ int ret;
+ u32 sf;
+
+ b2r2_log_info("%s\n", __func__);
+
+ if (to == from) {
+ *sf_out = 1 << 10;
+ return 0;
+ } else if (to == 0) {
+ b2r2_log_err("%s: To is 0!\n", __func__);
+ BUG_ON(1);
+ }
+
+ sf = (from << 10) / to;
+
+ if ((sf & 0xffff0000) != 0) {
+ /* Overflow error */
+ b2r2_log_warn("%s: "
+ "Scale factor too large\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ } else if (sf == 0) {
+ b2r2_log_warn("%s: "
+ "Scale factor too small\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ *sf_out = (u16)sf;
+
+ b2r2_log_info("%s exit\n", __func__);
+
+ return 0;
+
+error:
+ b2r2_log_warn("%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * rescale() - rescales the given dimension
+ *
+ * Returns the rescaled dimension in 22.10 fixed point format.
+ */
+static s32 rescale(s32 dim, u16 sf)
+{
+ b2r2_log_info("%s\n", __func__);
+
+ if (sf == 0) {
+ b2r2_log_err("%s: Scale factor is 0!\n", __func__);
+ BUG_ON(1);
+ }
+
+ /*
+ * This is normally not safe to do, since it drastically decreases the
+ * precision of the integer part of the dimension. But since the B2R2
+ * hardware only has 12-bit registers for these values, we are safe.
+ */
+ return (dim << 20) / sf;
+}
+
+/**
+ * inv_rescale() - does an inverted rescale of the given dimension
+ *
+ * Returns the rescaled dimension in 22.10 fixed point format.
+ */
+static s32 inv_rescale(s32 dim, u16 sf)
+{
+ if (sf == 0)
+ return dim;
+
+ return dim * sf;
+}
+
+/**
+ * set_target() - sets the target registers of the given node
+ */
+static void set_target(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ s32 l;
+ s32 r;
+ s32 t;
+ s32 b;
+
+ if (buf->tmp_buf_index)
+ node->dst_tmp_index = buf->tmp_buf_index;
+
+ node->node.GROUP1.B2R2_TBA = addr;
+ node->node.GROUP1.B2R2_TTY = buf->pitch | to_native_fmt(buf->fmt) |
+ buf->alpha_range | buf->chroma_selection | buf->hso |
+ buf->vso | buf->dither | buf->plane_selection;
+
+ if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ node->node.GROUP1.B2R2_TTY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((buf->win.width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((buf->win.height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP1.B2R2_TXY =
+ ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT);
+
+ /* Check if the rectangle is outside the buffer */
+ if (buf->vso == B2R2_TY_VSO_BOTTOM_TO_TOP)
+ t = buf->win.y - (buf->win.height - 1);
+ else
+ t = buf->win.y;
+
+ if (buf->hso == B2R2_TY_HSO_RIGHT_TO_LEFT)
+ l = buf->win.x - (buf->win.width - 1);
+ else
+ l = buf->win.x;
+
+ r = l + buf->win.width;
+ b = t + buf->win.height;
+
+ /* Clip to the destination buffer to prevent memory overwrites */
+ if ((l < 0) || (r > buf->width) || (t < 0) || (b > buf->height)) {
+ /* The clip rectangle is including the borders */
+ l = MAX(l, 0);
+ r = MIN(r, buf->width) - 1;
+ t = MAX(t, 0);
+ b = MIN(b, buf->height) - 1;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLIP_WINDOW;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RECT_CLIP_ENABLED;
+ node->node.GROUP6.B2R2_CWO =
+ ((l & 0x7FFF) << B2R2_CWS_X_SHIFT) |
+ ((t & 0x7FFF) << B2R2_CWS_Y_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((r & 0x7FFF) << B2R2_CWO_X_SHIFT) |
+ ((b & 0x7FFF) << B2R2_CWO_Y_SHIFT);
+ }
+
+}
+
+/**
+ * set_src() - configures the given source register with the given values
+ */
+static void set_src(struct b2r2_src_config *src, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ src->B2R2_SBA = addr;
+ src->B2R2_STY = buf->pitch | to_native_fmt(buf->fmt) |
+ buf->alpha_range | buf->hso | buf->vso;
+
+ if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ src->B2R2_STY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+
+ src->B2R2_SSZ = ((buf->win.width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((buf->win.height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ src->B2R2_SXY = ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT);
+
+}
+
+/**
+ * set_src_1() - sets the source 1 registers of the given node
+ */
+static void set_src_1(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ if (buf->tmp_buf_index)
+ node->src_tmp_index = buf->tmp_buf_index;
+
+ node->src_index = 1;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_FETCH_FROM_MEM;
+
+ node->node.GROUP3.B2R2_SBA = addr;
+ node->node.GROUP3.B2R2_STY = buf->pitch | to_native_fmt(buf->fmt) |
+ buf->alpha_range | buf->hso | buf->vso;
+
+ if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ node->node.GROUP3.B2R2_STY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+
+ node->node.GROUP3.B2R2_SXY =
+ ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT);
+
+ /* Source 1 has no size register */
+}
+
+/**
+ * set_src_2() - sets the source 2 registers of the given node
+ */
+static void set_src_2(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ if (buf->tmp_buf_index)
+ node->src_tmp_index = buf->tmp_buf_index;
+
+ node->src_index = 2;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+
+ set_src(&node->node.GROUP4, addr, buf);
+}
+
+/**
+ * set_src_3() - sets the source 3 registers of the given node
+ */
+static void set_src_3(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ if (buf->tmp_buf_index)
+ node->src_tmp_index = buf->tmp_buf_index;
+
+ node->src_index = 3;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_3;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+
+ set_src(&node->node.GROUP5, addr, buf);
+}
+
+/**
+ * set_ivmx() - configures the iVMX registers with the given values
+ */
+static void set_ivmx(struct b2r2_node *node, const u32 *vmx_values)
+{
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+
+ node->node.GROUP15.B2R2_VMX0 = vmx_values[0];
+ node->node.GROUP15.B2R2_VMX1 = vmx_values[1];
+ node->node.GROUP15.B2R2_VMX2 = vmx_values[2];
+ node->node.GROUP15.B2R2_VMX3 = vmx_values[3];
+}
+
+/**
+ * reset_nodes() - clears the node list
+ */
+static void reset_nodes(struct b2r2_node *node)
+{
+ while (node != NULL) {
+ memset(&node->node, 0, sizeof(node->node));
+
+ node->src_tmp_index = 0;
+ node->dst_tmp_index = 0;
+
+ /* TODO: Implement support for short linked lists */
+ node->node.GROUP0.B2R2_CIC = 0x7ffff;
+
+ if (node->next != NULL)
+ node->node.GROUP0.B2R2_NIP =
+ node->next->physical_address;
+ node = node->next;
+ }
+}
+
+int b2r2_node_split_init(void)
+{
+ b2r2_filters_init();
+
+ return 0;
+}
+
+void b2r2_node_split_exit(void)
+{
+ b2r2_filters_exit();
+}
diff --git a/drivers/video/b2r2/b2r2_node_split.h b/drivers/video/b2r2/b2r2_node_split.h
new file mode 100644
index 00000000000..5bceac28488
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_node_split.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 node splitter
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_NODE_SPLIT_H_
+#define __B2R2_NODE_SPLIT_H_
+
+#include "b2r2_internal.h"
+#include "b2r2_hw.h"
+
+/**
+ * b2r2_node_split_analyze() - Analyzes a B2R2 request
+ *
+ * @req - The request to analyze
+ * @max_buf_size - The largest size allowed for intermediate buffers
+ * @node_count - Number of nodes required for the job
+ * @buf_count - Number of intermediate buffers required for the job
+ * @bufs - An array of buffers needed for intermediate buffers
+ *
+ * Analyzes the request and determines how many nodes and intermediate buffers
+ * are required.
+ *
+ * It is the responsibility of the caller to allocate memory and assign the
+ * physical addresses. After that b2r2_node_split_assign_buffers should be
+ * called to assign the buffers to the right nodes.
+ *
+ * Returns:
+ * A handle identifing the analyzed request if successful, a negative
+ * value otherwise.
+ */
+int b2r2_node_split_analyze(const struct b2r2_blt_request *req, u32 max_buf_size,
+ u32 *node_count, struct b2r2_work_buf **bufs, u32* buf_count,
+ struct b2r2_node_split_job *job);
+
+/**
+ * b2r2_node_split_configure() - Performs a node split
+ *
+ * @handle - A handle for the analyzed request
+ * @first - The first node in the list of nodes to use
+ *
+ * Fills the supplied list of nodes with the parameters acquired by analyzing
+ * the request.
+ *
+ * All pointers to intermediate buffers are represented by integers to be used
+ * in the array returned by b2r2_node_split_analyze.
+ *
+ * Returns:
+ * A negative value if an error occurred, 0 otherwise.
+ */
+int b2r2_node_split_configure(struct b2r2_node_split_job *job,
+ struct b2r2_node *first);
+
+/**
+ * b2r2_node_split_assign_buffers() - Assignes physical addresses
+ *
+ * @handle - The handle for the job
+ * @first - The first node in the node list
+ * @bufs - Buffers with assigned physical addresses
+ * @buf_count - Number of physical addresses
+ *
+ * Assigns the physical addresses where intermediate buffers are required in
+ * the node list.
+ *
+ * The order of the elements of 'bufs' must be maintained from the call to
+ * b2r2_node_split_analyze.
+ *
+ * Returns:
+ * A negative value if an error occurred, 0 otherwise.
+ */
+int b2r2_node_split_assign_buffers(struct b2r2_node_split_job *job,
+ struct b2r2_node *first, struct b2r2_work_buf *bufs,
+ u32 buf_count);
+
+/**
+ * b2r2_node_split_unassign_buffers() - Removes all physical addresses
+ *
+ * @handle - The handle associated with the job
+ * @first - The first node in the node list
+ *
+ * Removes all references to intermediate buffers from the node list.
+ *
+ * This makes it possible to reuse the node list with new buffers by calling
+ * b2r2_node_split_assign_buffers again. Useful for caching node lists.
+ */
+void b2r2_node_split_unassign_buffers(struct b2r2_node_split_job *job,
+ struct b2r2_node *first);
+
+/**
+ * b2r2_node_split_release() - Releases all resources for a job
+ *
+ * @handle - The handle identifying the job. This will be set to 0.
+ *
+ * Releases all resources associated with a job.
+ *
+ * This should always be called once b2r2_node_split_analyze has been called
+ * in order to release any resources allocated while analyzing.
+ */
+void b2r2_node_split_cancel(struct b2r2_node_split_job *job);
+
+/**
+ * b2r2_node_split_init() - Initializes the node split module
+ *
+ * Initializes the node split module and creates debugfs files.
+ */
+int b2r2_node_split_init(void);
+
+/**
+ * b2r2_node_split_exit() - Deinitializes the node split module
+ *
+ * Releases all resources for the node split module.
+ */
+void b2r2_node_split_exit(void);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_profiler/Makefile b/drivers/video/b2r2/b2r2_profiler/Makefile
new file mode 100644
index 00000000000..69a85524fd7
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler/Makefile
@@ -0,0 +1,3 @@
+# Make file for loadable module B2R2 Profiler
+
+obj-$(CONFIG_B2R2_PROFILER) += b2r2_profiler.o
diff --git a/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c b/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c
new file mode 100644
index 00000000000..2e7c5ca5a7a
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson B2R2 profiler implementation
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+
+#include <video/b2r2_blt.h>
+#include "../b2r2_profiler_api.h"
+
+
+#define S32_MAX 2147483647
+
+
+static int src_format_filter_on = false;
+module_param(src_format_filter_on, bool, S_IRUGO | S_IWUSR);
+static unsigned int src_format_filter;
+module_param(src_format_filter, uint, S_IRUGO | S_IWUSR);
+
+static int print_blts_on = 0;
+module_param(print_blts_on, bool, S_IRUGO | S_IWUSR);
+static int use_mpix_per_second_in_print_blts = 1;
+module_param(use_mpix_per_second_in_print_blts, bool, S_IRUGO | S_IWUSR);
+
+static int min_avg_max_mpix_per_second_on = 1;
+module_param(min_avg_max_mpix_per_second_on, bool, S_IRUGO | S_IWUSR);
+
+static const unsigned int min_avg_max_mpix_per_second_num_blts_used = 400;
+static struct {
+ unsigned long sampling_start_time_jiffies;
+
+ s32 min_mpix_per_second;
+ struct b2r2_blt_req min_blt_request;
+ struct b2r2_blt_profiling_info min_blt_profiling_info;
+
+ s32 max_mpix_per_second;
+ struct b2r2_blt_req max_blt_request;
+ struct b2r2_blt_profiling_info max_blt_profiling_info;
+
+ s32 accumulated_num_pixels;
+ s32 accumulated_num_usecs;
+
+ u32 num_blts_done;
+} min_avg_max_mpix_per_second_state;
+
+
+static s32 nsec_2_usec(const s32 nsec);
+
+static int is_scale_blt(const struct b2r2_blt_req * const request);
+static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request, const struct b2r2_blt_profiling_info * const blt_profiling_info);
+static void print_blt(const struct b2r2_blt_req * const request, const struct b2r2_blt_profiling_info * const blt_profiling_info);
+
+static s32 get_num_pixels_in_blt(const struct b2r2_blt_req * const request);
+static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs);
+static void print_min_avg_max_mpix_per_second_state(void);
+static void reset_min_avg_max_mpix_per_second_state(void);
+static void do_min_avg_max_mpix_per_second(const struct b2r2_blt_req * const request, const struct b2r2_blt_profiling_info * const blt_profiling_info);
+
+static void blt_done(const struct b2r2_blt_req * const blt, const s32 request_id, const struct b2r2_blt_profiling_info * const blt_profiling_info);
+
+
+static struct b2r2_profiler this = {
+ .blt_done = blt_done,
+};
+
+
+static s32 nsec_2_usec(const s32 nsec)
+{
+ return nsec / 1000;
+}
+
+
+static int is_scale_blt(const struct b2r2_blt_req * const request)
+{
+ if ((request->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90 &&
+ (request->src_rect.width != request->dst_rect.height ||
+ request->src_rect.height != request->dst_rect.width)) ||
+ (!(request->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) &&
+ (request->src_rect.width != request->dst_rect.width ||
+ request->src_rect.height != request->dst_rect.height)))
+ return 1;
+ else
+ return 0;
+}
+
+static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request, const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ return get_mpix_per_second(get_num_pixels_in_blt(request),
+ nsec_2_usec(blt_profiling_info->nsec_active_in_cpu + blt_profiling_info->nsec_active_in_b2r2));
+}
+
+static void print_blt(const struct b2r2_blt_req * const request, const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ char tmp_str[128];
+ sprintf(tmp_str, "SF: %#10x, DF: %#10x, F: %#10x, T: %#3x, S: %1i, P: %7i",
+ request->src_img.fmt,
+ request->dst_img.fmt,
+ request->flags,
+ request->transform,
+ is_scale_blt(request),
+ get_num_pixels_in_blt(request));
+ if (use_mpix_per_second_in_print_blts)
+ printk(KERN_ALERT "%s, MPix/s: %3i\n",
+ tmp_str,
+ get_blt_mpix_per_second(request, blt_profiling_info));
+ else
+ printk(KERN_ALERT "%s, CPU: %10i, B2R2: %10i, Tot: %10i ns\n",
+ tmp_str,
+ blt_profiling_info->nsec_active_in_cpu,
+ blt_profiling_info->nsec_active_in_b2r2,
+ blt_profiling_info->total_time_nsec);
+}
+
+
+static s32 get_num_pixels_in_blt(const struct b2r2_blt_req * const request)
+{
+ s32 num_pixels_in_src = request->src_rect.width * request->src_rect.height;
+ s32 num_pixels_in_dst = request->dst_rect.width * request->dst_rect.height;
+ if (request->flags & (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW))
+ return num_pixels_in_dst;
+ else
+ return (num_pixels_in_src + num_pixels_in_dst) / 2;
+}
+
+static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs)
+{
+ s32 num_pixels_scale_factor = num_pixels != 0 ? S32_MAX / num_pixels : S32_MAX;
+ s32 num_usecs_scale_factor = num_usecs != 0 ? S32_MAX / num_usecs : S32_MAX;
+ s32 scale_factor = min(num_pixels_scale_factor, num_usecs_scale_factor);
+
+ s32 num_pixels_scaled = num_pixels * scale_factor;
+ s32 num_usecs_scaled = num_usecs * scale_factor;
+
+ if (num_usecs_scaled < 1000000)
+ return 0;
+
+ return (num_pixels_scaled / 1000000) / (num_usecs_scaled / 1000000);
+}
+
+static void print_min_avg_max_mpix_per_second_state(void)
+{
+ printk(KERN_ALERT "Min: %3i, Avg: %3i, Max: %3i MPix/s\n",
+ min_avg_max_mpix_per_second_state.min_mpix_per_second,
+ get_mpix_per_second(min_avg_max_mpix_per_second_state.accumulated_num_pixels,
+ min_avg_max_mpix_per_second_state.accumulated_num_usecs),
+ min_avg_max_mpix_per_second_state.max_mpix_per_second);
+ printk(KERN_ALERT "Min blit:\n");
+ print_blt(&min_avg_max_mpix_per_second_state.min_blt_request,
+ &min_avg_max_mpix_per_second_state.min_blt_profiling_info);
+ printk(KERN_ALERT "Max blit:\n");
+ print_blt(&min_avg_max_mpix_per_second_state.max_blt_request,
+ &min_avg_max_mpix_per_second_state.max_blt_profiling_info);
+}
+
+static void reset_min_avg_max_mpix_per_second_state(void)
+{
+ min_avg_max_mpix_per_second_state.sampling_start_time_jiffies =
+ jiffies;
+ min_avg_max_mpix_per_second_state.min_mpix_per_second = S32_MAX;
+ min_avg_max_mpix_per_second_state.max_mpix_per_second = 0;
+ min_avg_max_mpix_per_second_state.accumulated_num_pixels = 0;
+ min_avg_max_mpix_per_second_state.accumulated_num_usecs = 0;
+ min_avg_max_mpix_per_second_state.num_blts_done = 0;
+}
+
+static void do_min_avg_max_mpix_per_second(const struct b2r2_blt_req * const request, const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ s32 num_pixels_in_blt;
+ s32 num_usec_blt_took;
+ s32 blt_mpix_per_second;
+
+ if (time_before(jiffies, min_avg_max_mpix_per_second_state.sampling_start_time_jiffies))
+ return;
+
+ num_pixels_in_blt = get_num_pixels_in_blt(request);
+ num_usec_blt_took = nsec_2_usec(blt_profiling_info->nsec_active_in_cpu + blt_profiling_info->nsec_active_in_b2r2);
+ blt_mpix_per_second = get_mpix_per_second(num_pixels_in_blt,
+ num_usec_blt_took);
+
+ if (blt_mpix_per_second <= min_avg_max_mpix_per_second_state.min_mpix_per_second) {
+ min_avg_max_mpix_per_second_state.min_mpix_per_second =
+ blt_mpix_per_second;
+ memcpy(&min_avg_max_mpix_per_second_state.min_blt_request,
+ request, sizeof(struct b2r2_blt_req));
+ memcpy(&min_avg_max_mpix_per_second_state.min_blt_profiling_info,
+ blt_profiling_info, sizeof(struct b2r2_blt_profiling_info));
+ }
+
+ if (blt_mpix_per_second >= min_avg_max_mpix_per_second_state.max_mpix_per_second) {
+ min_avg_max_mpix_per_second_state.max_mpix_per_second =
+ blt_mpix_per_second;
+ memcpy(&min_avg_max_mpix_per_second_state.max_blt_request,
+ request, sizeof(struct b2r2_blt_req));
+ memcpy(&min_avg_max_mpix_per_second_state.max_blt_profiling_info,
+ blt_profiling_info, sizeof(struct b2r2_blt_profiling_info));
+ }
+
+ min_avg_max_mpix_per_second_state.accumulated_num_pixels +=
+ num_pixels_in_blt;
+ min_avg_max_mpix_per_second_state.accumulated_num_usecs +=
+ num_usec_blt_took;
+
+ min_avg_max_mpix_per_second_state.num_blts_done++;
+
+ if (min_avg_max_mpix_per_second_state.num_blts_done >= min_avg_max_mpix_per_second_num_blts_used) {
+ print_min_avg_max_mpix_per_second_state();
+ reset_min_avg_max_mpix_per_second_state();
+ /* The printouts initiated above can disturb the next measurement
+ so we delay it two seconds to give the printouts a chance to finish. */
+ min_avg_max_mpix_per_second_state.sampling_start_time_jiffies =
+ jiffies + (2 * HZ);
+ }
+}
+
+static void blt_done(const struct b2r2_blt_req * const request, const s32 request_id, const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ /* Filters */
+ if (src_format_filter_on && request->src_img.fmt != src_format_filter)
+ return;
+
+ /* Processors */
+ if (print_blts_on)
+ print_blt(request, blt_profiling_info);
+
+ if (min_avg_max_mpix_per_second_on)
+ do_min_avg_max_mpix_per_second(request, blt_profiling_info);
+}
+
+
+static int __init b2r2_profiler_init(void)
+{
+ reset_min_avg_max_mpix_per_second_state();
+
+ return b2r2_register_profiler(&this);
+}
+module_init(b2r2_profiler_init);
+
+static void __exit b2r2_profiler_exit(void)
+{
+ b2r2_unregister_profiler(&this);
+}
+module_exit(b2r2_profiler_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Johan Mossberg (johan.xx.mossberg@stericsson.com)");
+MODULE_DESCRIPTION("B2R2 Profiler");
diff --git a/drivers/video/b2r2/b2r2_profiler_api.h b/drivers/video/b2r2/b2r2_profiler_api.h
new file mode 100644
index 00000000000..5f1f9abbe49
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler_api.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 profiling API
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#ifndef _LINUX_VIDEO_B2R2_PROFILER_API_H
+#define _LINUX_VIDEO_B2R2_PROFILER_API_H
+
+#include <video/b2r2_blt.h>
+
+/**
+ * struct b2r2_blt_profiling_info - Profiling information for a blit
+ *
+ * @nsec_active_in_cpu: The number of nanoseconds the job was active in the CPU.
+ * This is an approximate value, check out the code for more
+ * info.
+ * @nsec_active_in_b2r2: The number of nanoseconds the job was active in B2R2. This
+ * is an approximate value, check out the code for more info.
+ * @total_time_nsec: The total time the job took in nano seconds. Includes ideling.
+ */
+struct b2r2_blt_profiling_info {
+ s32 nsec_active_in_cpu;
+ s32 nsec_active_in_b2r2;
+ s32 total_time_nsec;
+};
+
+/**
+ * struct b2r2_profiler - B2R2 profiler.
+ *
+ * The callbacks are never run concurrently. No heavy stuff must be done in the
+ * callbacks as this might adversely affect the B2R2 driver. The callbacks must
+ * not call the B2R2 profiler API as this will cause a deadlock. If the callbacks
+ * call into the B2R2 driver care must be taken as deadlock situations can arise.
+ *
+ * @blt_done: Called when a blit has finished, timed out or been canceled.
+ */
+struct b2r2_profiler {
+ void (*blt_done)(const struct b2r2_blt_req * const request, const s32 request_id, const struct b2r2_blt_profiling_info * const blt_profiling_info);
+};
+
+/**
+ * b2r2_register_profiler() - Registers a profiler.
+ *
+ * Currently only one profiler can be registered at any given time.
+ *
+ * @profiler: The profiler
+ *
+ * Returns 0 on success, negative error code on failure
+ */
+int b2r2_register_profiler(const struct b2r2_profiler * const profiler);
+
+/**
+ * b2r2_unregister_profiler() - Unregisters a profiler.
+ *
+ * @profiler: The profiler
+ */
+void b2r2_unregister_profiler(const struct b2r2_profiler * const profiler);
+
+#endif /* #ifdef _LINUX_VIDEO_B2R2_PROFILER_API_H */
diff --git a/drivers/video/b2r2/b2r2_profiler_socket.c b/drivers/video/b2r2/b2r2_profiler_socket.c
new file mode 100644
index 00000000000..f96ab5be76e
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler_socket.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 profiler socket communication
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/semaphore.h>
+#include <asm/errno.h>
+
+#include "b2r2_profiler_api.h"
+#include "b2r2_internal.h"
+
+
+/*
+ * TODO: Call the profiler in a seperate thread and have a circular buffer
+ * between the B2R2 driver and that thread. That way the profiler can not slow
+ * down or kill the B2R2 driver. Seems a bit overkill right now as there is
+ * only one B2R2 profiler and we have full control over it but the situation
+ * may be different in the future.
+ */
+
+
+static const struct b2r2_profiler *b2r2_profiler;
+static DEFINE_SEMAPHORE(b2r2_profiler_lock);
+
+
+int b2r2_register_profiler(const struct b2r2_profiler * const profiler)
+{
+ int return_value;
+
+ return_value = down_interruptible(&b2r2_profiler_lock);
+ if (return_value != 0)
+ return return_value;
+
+ if (b2r2_profiler != NULL) {
+ return_value = -EUSERS;
+
+ goto cleanup;
+ }
+
+ b2r2_profiler = profiler;
+
+ return_value = 0;
+
+cleanup:
+ up(&b2r2_profiler_lock);
+
+ return return_value;
+}
+EXPORT_SYMBOL(b2r2_register_profiler);
+
+void b2r2_unregister_profiler(const struct b2r2_profiler * const profiler)
+{
+ down(&b2r2_profiler_lock);
+
+ if (profiler == b2r2_profiler)
+ b2r2_profiler = NULL;
+
+ up(&b2r2_profiler_lock);
+}
+EXPORT_SYMBOL(b2r2_unregister_profiler);
+
+
+bool is_profiler_registered_approx(void)
+{
+ /* No locking by design, to make it fast, hence the approx */
+ if (b2r2_profiler != NULL)
+ return true;
+ else
+ return false;
+}
+
+void b2r2_call_profiler_blt_done(const struct b2r2_blt_request * const request)
+{
+ int return_value;
+ struct b2r2_blt_profiling_info blt_profiling_info;
+
+ return_value = down_interruptible(&b2r2_profiler_lock);
+ if (return_value != 0) {
+ dev_err(b2r2_blt_device(),
+ "%s: Failed to acquire semaphore, ret=%i. Lost profiler call!\n",
+ __func__, return_value);
+
+ return;
+ }
+
+ if (NULL == b2r2_profiler)
+ goto cleanup;
+
+ blt_profiling_info.nsec_active_in_cpu = request->nsec_active_in_cpu;
+ blt_profiling_info.nsec_active_in_b2r2 = request->job.nsec_active_in_hw;
+ blt_profiling_info.total_time_nsec = request->total_time_nsec;
+
+ b2r2_profiler->blt_done(&request->user_req, request->request_id, &blt_profiling_info);
+
+cleanup:
+ up(&b2r2_profiler_lock);
+}
diff --git a/drivers/video/b2r2/b2r2_profiler_socket.h b/drivers/video/b2r2/b2r2_profiler_socket.h
new file mode 100644
index 00000000000..80b2c20293f
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler_socket.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 profiler socket communication
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H
+#define _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H
+
+#include "b2r2_internal.h"
+
+/* Will give a correct result most of the time but can be wrong */
+bool is_profiler_registered_approx(void);
+
+void b2r2_call_profiler_blt_done(const struct b2r2_blt_request * const request);
+
+#endif /* _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H */
diff --git a/drivers/video/b2r2/b2r2_structures.h b/drivers/video/b2r2/b2r2_structures.h
new file mode 100644
index 00000000000..99fa7f047d3
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_structures.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 register struct
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#ifndef __B2R2_STRUCTURES_H
+#define __B2R2_STRUCTURES_H
+
+/* C struct view */
+struct b2r2_memory_map {
+ unsigned char fill0[2304];
+ unsigned int BLT_SSBA17; /* @2304 */
+ unsigned int BLT_SSBA18; /* @2308 */
+ unsigned int BLT_SSBA19; /* @2312 */
+ unsigned int BLT_SSBA20; /* @2316 */
+ unsigned int BLT_SSBA21; /* @2320 */
+ unsigned int BLT_SSBA22; /* @2324 */
+ unsigned int BLT_SSBA23; /* @2328 */
+ unsigned int BLT_SSBA24; /* @2332 */
+ unsigned char fill1[32];
+ unsigned int BLT_STBA5; /* @2368 */
+ unsigned int BLT_STBA6; /* @2372 */
+ unsigned int BLT_STBA7; /* @2376 */
+ unsigned int BLT_STBA8; /* @2380 */
+ unsigned char fill2[176];
+ unsigned int BLT_CTL; /* @2560 */
+ unsigned int BLT_ITS; /* @2564 */
+ unsigned int BLT_STA1; /* @2568 */
+ unsigned char fill3[4];
+ unsigned int BLT_SSBA1; /* @2576 */
+ unsigned int BLT_SSBA2; /* @2580 */
+ unsigned int BLT_SSBA3; /* @2584 */
+ unsigned int BLT_SSBA4; /* @2588 */
+ unsigned int BLT_SSBA5; /* @2592 */
+ unsigned int BLT_SSBA6; /* @2596 */
+ unsigned int BLT_SSBA7; /* @2600 */
+ unsigned int BLT_SSBA8; /* @2604 */
+ unsigned int BLT_STBA1; /* @2608 */
+ unsigned int BLT_STBA2; /* @2612 */
+ unsigned int BLT_STBA3; /* @2616 */
+ unsigned int BLT_STBA4; /* @2620 */
+ unsigned int BLT_CQ1_TRIG_IP; /* @2624 */
+ unsigned int BLT_CQ1_TRIG_CTL; /* @2628 */
+ unsigned int BLT_CQ1_PACE_CTL; /* @2632 */
+ unsigned int BLT_CQ1_IP; /* @2636 */
+ unsigned int BLT_CQ2_TRIG_IP; /* @2640 */
+ unsigned int BLT_CQ2_TRIG_CTL; /* @2644 */
+ unsigned int BLT_CQ2_PACE_CTL; /* @2648 */
+ unsigned int BLT_CQ2_IP; /* @2652 */
+ unsigned int BLT_AQ1_CTL; /* @2656 */
+ unsigned int BLT_AQ1_IP; /* @2660 */
+ unsigned int BLT_AQ1_LNA; /* @2664 */
+ unsigned int BLT_AQ1_STA; /* @2668 */
+ unsigned int BLT_AQ2_CTL; /* @2672 */
+ unsigned int BLT_AQ2_IP; /* @2676 */
+ unsigned int BLT_AQ2_LNA; /* @2680 */
+ unsigned int BLT_AQ2_STA; /* @2684 */
+ unsigned int BLT_AQ3_CTL; /* @2688 */
+ unsigned int BLT_AQ3_IP; /* @2692 */
+ unsigned int BLT_AQ3_LNA; /* @2696 */
+ unsigned int BLT_AQ3_STA; /* @2700 */
+ unsigned int BLT_AQ4_CTL; /* @2704 */
+ unsigned int BLT_AQ4_IP; /* @2708 */
+ unsigned int BLT_AQ4_LNA; /* @2712 */
+ unsigned int BLT_AQ4_STA; /* @2716 */
+ unsigned int BLT_SSBA9; /* @2720 */
+ unsigned int BLT_SSBA10; /* @2724 */
+ unsigned int BLT_SSBA11; /* @2728 */
+ unsigned int BLT_SSBA12; /* @2732 */
+ unsigned int BLT_SSBA13; /* @2736 */
+ unsigned int BLT_SSBA14; /* @2740 */
+ unsigned int BLT_SSBA15; /* @2744 */
+ unsigned int BLT_SSBA16; /* @2748 */
+ unsigned int BLT_SGA1; /* @2752 */
+ unsigned int BLT_SGA2; /* @2756 */
+ unsigned char fill4[8];
+ unsigned int BLT_ITM0; /* @2768 */
+ unsigned int BLT_ITM1; /* @2772 */
+ unsigned int BLT_ITM2; /* @2776 */
+ unsigned int BLT_ITM3; /* @2780 */
+ unsigned char fill5[16];
+ unsigned int BLT_DFV2; /* @2800 */
+ unsigned int BLT_DFV1; /* @2804 */
+ unsigned int BLT_PRI; /* @2808 */
+ unsigned char fill6[8];
+ unsigned int PLUGS1_OP2; /* @2820 */
+ unsigned int PLUGS1_CHZ; /* @2824 */
+ unsigned int PLUGS1_MSZ; /* @2828 */
+ unsigned int PLUGS1_PGZ; /* @2832 */
+ unsigned char fill7[16];
+ unsigned int PLUGS2_OP2; /* @2852 */
+ unsigned int PLUGS2_CHZ; /* @2856 */
+ unsigned int PLUGS2_MSZ; /* @2860 */
+ unsigned int PLUGS2_PGZ; /* @2864 */
+ unsigned char fill8[16];
+ unsigned int PLUGS3_OP2; /* @2884 */
+ unsigned int PLUGS3_CHZ; /* @2888 */
+ unsigned int PLUGS3_MSZ; /* @2892 */
+ unsigned int PLUGS3_PGZ; /* @2896 */
+ unsigned char fill9[48];
+ unsigned int PLUGT_OP2; /* @2948 */
+ unsigned int PLUGT_CHZ; /* @2952 */
+ unsigned int PLUGT_MSZ; /* @2956 */
+ unsigned int PLUGT_PGZ; /* @2960 */
+ unsigned char fill10[108];
+ unsigned int BLT_NIP; /* @3072 */
+ unsigned int BLT_CIC; /* @3076 */
+ unsigned int BLT_INS; /* @3080 */
+ unsigned int BLT_ACK; /* @3084 */
+ unsigned int BLT_TBA; /* @3088 */
+ unsigned int BLT_TTY; /* @3092 */
+ unsigned int BLT_TXY; /* @3096 */
+ unsigned int BLT_TSZ; /* @3100 */
+ unsigned int BLT_S1CF; /* @3104 */
+ unsigned int BLT_S2CF; /* @3108 */
+ unsigned int BLT_S1BA; /* @3112 */
+ unsigned int BLT_S1TY; /* @3116 */
+ unsigned int BLT_S1XY; /* @3120 */
+ unsigned char fill11[4];
+ unsigned int BLT_S2BA; /* @3128 */
+ unsigned int BLT_S2TY; /* @3132 */
+ unsigned int BLT_S2XY; /* @3136 */
+ unsigned int BLT_S2SZ; /* @3140 */
+ unsigned int BLT_S3BA; /* @3144 */
+ unsigned int BLT_S3TY; /* @3148 */
+ unsigned int BLT_S3XY; /* @3152 */
+ unsigned int BLT_S3SZ; /* @3156 */
+ unsigned int BLT_CWO; /* @3160 */
+ unsigned int BLT_CWS; /* @3164 */
+ unsigned int BLT_CCO; /* @3168 */
+ unsigned int BLT_CML; /* @3172 */
+ unsigned int BLT_FCTL; /* @3176 */
+ unsigned int BLT_PMK; /* @3180 */
+ unsigned int BLT_RSF; /* @3184 */
+ unsigned int BLT_RZI; /* @3188 */
+ unsigned int BLT_HFP; /* @3192 */
+ unsigned int BLT_VFP; /* @3196 */
+ unsigned int BLT_Y_RSF; /* @3200 */
+ unsigned int BLT_Y_RZI; /* @3204 */
+ unsigned int BLT_Y_HFP; /* @3208 */
+ unsigned int BLT_Y_VFP; /* @3212 */
+ unsigned char fill12[16];
+ unsigned int BLT_KEY1; /* @3232 */
+ unsigned int BLT_KEY2; /* @3236 */
+ unsigned char fill13[8];
+ unsigned int BLT_SAR; /* @3248 */
+ unsigned int BLT_USR; /* @3252 */
+ unsigned char fill14[8];
+ unsigned int BLT_IVMX0; /* @3264 */
+ unsigned int BLT_IVMX1; /* @3268 */
+ unsigned int BLT_IVMX2; /* @3272 */
+ unsigned int BLT_IVMX3; /* @3276 */
+ unsigned int BLT_OVMX0; /* @3280 */
+ unsigned int BLT_OVMX1; /* @3284 */
+ unsigned int BLT_OVMX2; /* @3288 */
+ unsigned int BLT_OVMX3; /* @3292 */
+ unsigned char fill15[8];
+ unsigned int BLT_VC1R; /* @3304 */
+ unsigned char fill16[20];
+ unsigned int BLT_Y_HFC0; /* @3328 */
+ unsigned int BLT_Y_HFC1; /* @3332 */
+ unsigned int BLT_Y_HFC2; /* @3336 */
+ unsigned int BLT_Y_HFC3; /* @3340 */
+ unsigned int BLT_Y_HFC4; /* @3344 */
+ unsigned int BLT_Y_HFC5; /* @3348 */
+ unsigned int BLT_Y_HFC6; /* @3352 */
+ unsigned int BLT_Y_HFC7; /* @3356 */
+ unsigned int BLT_Y_HFC8; /* @3360 */
+ unsigned int BLT_Y_HFC9; /* @3364 */
+ unsigned int BLT_Y_HFC10; /* @3368 */
+ unsigned int BLT_Y_HFC11; /* @3372 */
+ unsigned int BLT_Y_HFC12; /* @3376 */
+ unsigned int BLT_Y_HFC13; /* @3380 */
+ unsigned int BLT_Y_HFC14; /* @3384 */
+ unsigned int BLT_Y_HFC15; /* @3388 */
+ unsigned char fill17[80];
+ unsigned int BLT_Y_VFC0; /* @3472 */
+ unsigned int BLT_Y_VFC1; /* @3476 */
+ unsigned int BLT_Y_VFC2; /* @3480 */
+ unsigned int BLT_Y_VFC3; /* @3484 */
+ unsigned int BLT_Y_VFC4; /* @3488 */
+ unsigned int BLT_Y_VFC5; /* @3492 */
+ unsigned int BLT_Y_VFC6; /* @3496 */
+ unsigned int BLT_Y_VFC7; /* @3500 */
+ unsigned int BLT_Y_VFC8; /* @3504 */
+ unsigned int BLT_Y_VFC9; /* @3508 */
+ unsigned char fill18[72];
+ unsigned int BLT_HFC0; /* @3584 */
+ unsigned int BLT_HFC1; /* @3588 */
+ unsigned int BLT_HFC2; /* @3592 */
+ unsigned int BLT_HFC3; /* @3596 */
+ unsigned int BLT_HFC4; /* @3600 */
+ unsigned int BLT_HFC5; /* @3604 */
+ unsigned int BLT_HFC6; /* @3608 */
+ unsigned int BLT_HFC7; /* @3612 */
+ unsigned int BLT_HFC8; /* @3616 */
+ unsigned int BLT_HFC9; /* @3620 */
+ unsigned int BLT_HFC10; /* @3624 */
+ unsigned int BLT_HFC11; /* @3628 */
+ unsigned int BLT_HFC12; /* @3632 */
+ unsigned int BLT_HFC13; /* @3636 */
+ unsigned int BLT_HFC14; /* @3640 */
+ unsigned int BLT_HFC15; /* @3644 */
+ unsigned char fill19[80];
+ unsigned int BLT_VFC0; /* @3728 */
+ unsigned int BLT_VFC1; /* @3732 */
+ unsigned int BLT_VFC2; /* @3736 */
+ unsigned int BLT_VFC3; /* @3740 */
+ unsigned int BLT_VFC4; /* @3744 */
+ unsigned int BLT_VFC5; /* @3748 */
+ unsigned int BLT_VFC6; /* @3752 */
+ unsigned int BLT_VFC7; /* @3756 */
+ unsigned int BLT_VFC8; /* @3760 */
+ unsigned int BLT_VFC9; /* @3764 */
+};
+
+#endif /* !defined(__B2R2_STRUCTURES_H) */
+
diff --git a/drivers/video/b2r2/b2r2_timing.c b/drivers/video/b2r2/b2r2_timing.c
new file mode 100644
index 00000000000..4f3e2b8b042
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_timing.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 timing
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/time.h>
+
+
+u32 b2r2_get_curr_nsec(void)
+{
+ struct timespec ts;
+
+ getrawmonotonic(&ts);
+
+ return (u32)timespec_to_ns(&ts);
+}
diff --git a/drivers/video/b2r2/b2r2_timing.h b/drivers/video/b2r2/b2r2_timing.h
new file mode 100644
index 00000000000..e87113c0ec9
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_timing.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 timing
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_
+
+/**
+ * b2r2_get_curr_nsec() - Return the current nanosecond. Notice that the value
+ * wraps when the u32 limit is reached.
+ *
+ */
+u32 b2r2_get_curr_nsec(void);
+
+#endif /* _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_ */
diff --git a/drivers/video/b2r2/b2r2_utils.c b/drivers/video/b2r2/b2r2_utils.c
new file mode 100644
index 00000000000..ad0e7a42fa1
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_utils.c
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 utils
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "b2r2_utils.h"
+
+#include "b2r2_debug.h"
+
+#include <video/b2r2_blt.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+
+const s32 b2r2_s32_max = 2147483647;
+
+
+void b2r2_get_img_bounding_rect(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *bounding_rect)
+{
+ bounding_rect->x = 0;
+ bounding_rect->y = 0;
+ bounding_rect->width = img->width;
+ bounding_rect->height = img->height;
+}
+
+
+bool b2r2_is_zero_area_rect(struct b2r2_blt_rect *rect)
+{
+ return rect->width == 0 || rect->height == 0;
+}
+
+bool b2r2_is_rect_inside_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2)
+{
+ return rect1->x >= rect2->x &&
+ rect1->y >= rect2->y &&
+ rect1->x + rect1->width <= rect2->x + rect2->width &&
+ rect1->y + rect1->height <= rect2->y + rect2->height;
+}
+
+void b2r2_intersect_rects(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2, struct b2r2_blt_rect *intersection)
+{
+ struct b2r2_blt_rect tmp_rect;
+
+ tmp_rect.x = max(rect1->x, rect2->x);
+ tmp_rect.y = max(rect1->y, rect2->y);
+ tmp_rect.width = min(rect1->x + rect1->width, rect2->x + rect2->width)
+ - tmp_rect.x;
+ if (tmp_rect.width < 0)
+ tmp_rect.width = 0;
+ tmp_rect.height =
+ min(rect1->y + rect1->height, rect2->y + rect2->height) -
+ tmp_rect.y;
+ if (tmp_rect.height < 0)
+ tmp_rect.height = 0;
+
+ *intersection = tmp_rect;
+}
+
+
+int b2r2_get_fmt_bpp(enum b2r2_blt_fmt fmt)
+{
+ /*
+ * Currently this function is not used that often but if that changes a
+ * lookup table could make it a lot faster.
+ */
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return 1;
+
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return 8;
+
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ return 12;
+
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return 16;
+
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return 24;
+
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return 32;
+
+ default:
+ b2r2_log_err("%s: Internal error! Format %#x not recognized.\n",
+ __func__, fmt);
+ return 32;
+ }
+}
+
+int b2r2_get_fmt_y_bpp(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return 8;
+
+ default:
+ b2r2_log_err("%s: Internal error! Non YCbCr format supplied.\n",
+ __func__);
+ return 8;
+ }
+}
+
+
+bool b2r2_is_single_plane_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_independent_pixel_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcri_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcrsp_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcrp_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcr420_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcr422_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcr444_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_mb_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+u32 b2r2_calc_pitch_from_width(s32 width, enum b2r2_blt_fmt fmt)
+{
+ if (b2r2_is_single_plane_fmt(fmt)) {
+ return (u32)b2r2_div_round_up(width *
+ b2r2_get_fmt_bpp(fmt), 8);
+ } else if (b2r2_is_ycbcrsp_fmt(fmt) || b2r2_is_ycbcrp_fmt(fmt)) {
+ return (u32)b2r2_div_round_up(width *
+ b2r2_get_fmt_y_bpp(fmt), 8);
+ } else {
+ b2r2_log_err("%s: Internal error! "
+ "Pitchless format supplied.\n",
+ __func__);
+ return 0;
+ }
+}
+
+u32 b2r2_get_img_pitch(struct b2r2_blt_img *img)
+{
+ if (img->pitch != 0)
+ return img->pitch;
+ else
+ return b2r2_calc_pitch_from_width(img->width, img->fmt);
+}
+
+s32 b2r2_get_img_size(struct b2r2_blt_img *img)
+{
+ if (b2r2_is_single_plane_fmt(img->fmt)) {
+ return (s32)b2r2_get_img_pitch(img) * img->height;
+ } else if (b2r2_is_ycbcrsp_fmt(img->fmt) ||
+ b2r2_is_ycbcrp_fmt(img->fmt)) {
+ s32 y_plane_size;
+
+ y_plane_size = (s32)b2r2_get_img_pitch(img) * img->height;
+
+ if (b2r2_is_ycbcr420_fmt(img->fmt)) {
+ return y_plane_size + y_plane_size / 2;
+ } else if (b2r2_is_ycbcr422_fmt(img->fmt)) {
+ return y_plane_size * 2;
+ } else if (b2r2_is_ycbcr444_fmt(img->fmt)) {
+ return y_plane_size * 3;
+ } else {
+ b2r2_log_err("%s: Internal error! "
+ "Format %#x not recognized.\n",
+ __func__, img->fmt);
+ return 0;
+ }
+ } else if (b2r2_is_mb_fmt(img->fmt)) {
+ return (img->width * img->height *
+ b2r2_get_fmt_bpp(img->fmt)) / 8;
+ } else {
+ b2r2_log_err("%s: Internal error! "
+ "Format %#x not recognized.\n",
+ __func__, img->fmt);
+ return 0;
+ }
+}
+
+
+s32 b2r2_div_round_up(s32 dividend, s32 divisor)
+{
+ s32 quotient = dividend / divisor;
+ if (dividend % divisor != 0)
+ quotient++;
+
+ return quotient;
+}
+
+bool b2r2_is_aligned(s32 value, s32 alignment)
+{
+ return value % alignment == 0;
+}
+
+s32 b2r2_align_up(s32 value, s32 alignment)
+{
+ s32 remainder = abs(value) % abs(alignment);
+ s32 value_to_add;
+
+ if (remainder > 0) {
+ if (value >= 0)
+ value_to_add = alignment - remainder;
+ else
+ value_to_add = remainder;
+ } else {
+ value_to_add = 0;
+ }
+
+ return value + value_to_add;
+}
diff --git a/drivers/video/b2r2/b2r2_utils.h b/drivers/video/b2r2/b2r2_utils.h
new file mode 100644
index 00000000000..745d71c0126
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_utils.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 utils
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_UTILS_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_UTILS_H_
+
+#include <video/b2r2_blt.h>
+
+extern const s32 b2r2_s32_max;
+
+void b2r2_get_img_bounding_rect(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *bounding_rect);
+
+bool b2r2_is_zero_area_rect(struct b2r2_blt_rect *rect);
+bool b2r2_is_rect_inside_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2);
+void b2r2_intersect_rects(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2, struct b2r2_blt_rect *intersection);
+
+int b2r2_get_fmt_bpp(enum b2r2_blt_fmt fmt);
+int b2r2_get_fmt_y_bpp(enum b2r2_blt_fmt fmt);
+
+bool b2r2_is_single_plane_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_independent_pixel_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcri_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcrsp_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcrp_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcr420_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcr422_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcr444_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_mb_fmt(enum b2r2_blt_fmt fmt);
+
+/*
+ * Rounds up if an invalid width causes the pitch to be non byte aligned.
+ */
+u32 b2r2_calc_pitch_from_width(s32 width, enum b2r2_blt_fmt fmt);
+u32 b2r2_get_img_pitch(struct b2r2_blt_img *img);
+s32 b2r2_get_img_size(struct b2r2_blt_img *img);
+
+s32 b2r2_div_round_up(s32 dividend, s32 divisor);
+bool b2r2_is_aligned(s32 value, s32 alignment);
+s32 b2r2_align_up(s32 value, s32 alignment);
+
+#endif
diff --git a/drivers/video/mcde/Kconfig b/drivers/video/mcde/Kconfig
new file mode 100644
index 00000000000..cb88a66d370
--- /dev/null
+++ b/drivers/video/mcde/Kconfig
@@ -0,0 +1,96 @@
+config FB_MCDE
+ tristate "MCDE support"
+ depends on FB
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ select HWMEM
+ ---help---
+ This enables support for MCDE based frame buffer driver.
+
+ Please read the file <file:Documentation/fb/mcde.txt>
+
+config FB_MCDE_DEBUG
+ bool "MCDE debug messages"
+ depends on FB_MCDE
+ ---help---
+ Say Y here if you want the MCDE driver to output debug messages
+
+config FB_MCDE_VDEBUG
+ bool "MCDE verbose debug messages"
+ depends on FB_MCDE_DEBUG
+ ---help---
+ Say Y here if you want the MCDE driver to output more debug messages
+
+config MCDE_FB_AVOID_REALLOC
+ bool "MCDE early allocate framebuffer"
+ default n
+ depends on FB_MCDE
+ ---help---
+ If you say Y here maximum frame buffer size is allocated and
+ used for all resolutions. If you say N here, the frame buffer is
+ reallocated when resolution is changed. This reallocation might
+ fail because of fragmented memory. Note that this memory will
+ never be deallocated, while the MCDE framebuffer is used.
+
+config MCDE_DISPLAY_DSI
+ bool "Support for DSI displays within MCDE"
+ depends on FB_MCDE
+ default y
+
+menu "MCDE DSI displays"
+ depends on MCDE_DISPLAY_DSI
+
+config MCDE_DISPLAY_GENERIC_DSI
+ tristate "Generic DSI display driver"
+
+config MCDE_DISPLAY_SAMSUNG_S6D16D0
+ bool "Samsung S6D16D0 DSI display driver"
+ ---help---
+ Say Y if you have a TPO Taal or Blackpearl display panel.
+
+config MCDE_DISPLAY_SONY_ACX424AKP_DSI
+ tristate "Sony acx424akp DSI display driver"
+
+config MCDE_DISPLAY_AV8100
+ tristate "AV8100 HDMI/CVBS display driver"
+ select AV8100
+
+config MCDE_DISPLAY_HDMI_FB_AUTO_CREATE
+ bool "HDMI_FB_AUTO_CREATE"
+ default y
+ depends on MCDE_DISPLAY_AV8100
+ ---help---
+ Say Y if you want the HDMI frame buffer to be created on start
+ Say N if you want the HDMI frame buffer to be created when HDMI
+ cable is plugged (needs user space HDMIservice)
+
+endmenu
+
+config MCDE_DISPLAY_DPI
+ bool "Support for DPI displays within MCDE"
+ depends on FB_MCDE
+ default n
+ ---help---
+ Add this option to choose which DPI display driver for MCDE to include
+
+ DPI (Display Pixel Interface) is a MIPI Alliance standard used for
+ active-matrix LCDs. The DPI uses parallel data lines.
+
+menu "MCDE DPI displays"
+ depends on MCDE_DISPLAY_DPI
+
+config MCDE_DISPLAY_VUIB500_DPI
+ tristate "DPI display driver for the VUIB500 board"
+ ---help---
+ The VUIB500 is an ST-Ericsson user interface board.
+
+endmenu
+
+config MCDE_DISPLAY_AB8500_DENC
+ tristate "AB8500 CVBS display driver"
+ depends on FB_MCDE
+ select AB8500_DENC
+
+
diff --git a/drivers/video/mcde/Makefile b/drivers/video/mcde/Makefile
new file mode 100644
index 00000000000..82a78c2542a
--- /dev/null
+++ b/drivers/video/mcde/Makefile
@@ -0,0 +1,23 @@
+mcde-objs += mcde_mod.o
+mcde-objs += mcde_hw.o
+mcde-objs += mcde_dss.o
+mcde-objs += mcde_display.o
+mcde-objs += mcde_bus.o
+mcde-objs += mcde_fb.o
+mcde-objs += mcde_debugfs.o
+obj-$(CONFIG_FB_MCDE) += mcde.o
+
+obj-$(CONFIG_MCDE_DISPLAY_GENERIC_DSI) += display-generic_dsi.o
+obj-$(CONFIG_MCDE_DISPLAY_SAMSUNG_S6D16D0) += display-samsung_s6d16d0.o
+obj-$(CONFIG_MCDE_DISPLAY_SONY_ACX424AKP_DSI) += display-sony_acx424akp_dsi.o
+obj-$(CONFIG_MCDE_DISPLAY_VUIB500_DPI) += display-vuib500-dpi.o
+obj-$(CONFIG_MCDE_DISPLAY_AB8500_DENC) += display-ab8500.o
+obj-$(CONFIG_MCDE_DISPLAY_AV8100) += display-av8100.o
+obj-$(CONFIG_DISPLAY_FICTIVE) += display-fictive.o
+
+ifdef CONFIG_FB_MCDE_DEBUG
+EXTRA_CFLAGS += -DDEBUG
+endif
+ifdef CONFIG_FB_MCDE_VDEBUG
+EXTRA_CFLAGS += -DVERBOSE_DEBUG
+endif
diff --git a/drivers/video/mcde/display-ab8500.c b/drivers/video/mcde/display-ab8500.c
new file mode 100644
index 00000000000..f465b0617c7
--- /dev/null
+++ b/drivers/video/mcde/display-ab8500.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * AB8500 display driver
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/mfd/ab8500/denc.h>
+#include <video/mcde_display.h>
+#include <video/mcde_display-ab8500.h>
+
+#define AB8500_DISP_TRACE dev_dbg(&ddev->dev, "%s\n", __func__)
+
+#define SDTV_PIXCLOCK 37037
+
+/*
+ * PAL:
+ * Total nr of active lines: 576
+ * Total nr of blanking lines: 49
+ * total: 625
+ */
+#define PAL_HBP 132
+#define PAL_HFP 12
+#define PAL_VBP_FIELD_1 22
+#define PAL_VBP_FIELD_2 23
+#define PAL_VFP_FIELD_1 2
+#define PAL_VFP_FIELD_2 2
+
+/*
+ * NTSC (ITU-R BT.470-5):
+ * Total nr of active lines: 486
+ * Total nr of blanking lines: 39
+ * total: 525
+ */
+#define NTSC_ORG_HBP 122
+#define NTSC_ORG_HFP 16
+#define NTSC_ORG_VBP_FIELD_1 16
+#define NTSC_ORG_VBP_FIELD_2 17
+#define NTSC_ORG_VFP_FIELD_1 3
+#define NTSC_ORG_VFP_FIELD_2 3
+
+/*
+ * NTSC (DV variant):
+ * Total nr of active lines: 480
+ * Total nr of blanking lines: 45
+ * total: 525
+ */
+#define NTSC_HBP 122
+#define NTSC_HFP 16
+#define NTSC_VBP_FIELD_1 19
+#define NTSC_VBP_FIELD_2 20
+#define NTSC_VFP_FIELD_1 3
+#define NTSC_VFP_FIELD_2 3
+
+struct display_driver_data {
+ struct ab8500_denc_conf denc_conf;
+ struct platform_device *denc_dev;
+ int nr_regulators;
+ struct regulator **regulator;
+};
+
+static int try_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+static int set_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+static int set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode);
+static int on_first_update(struct mcde_display_device *ddev);
+static int display_update(struct mcde_display_device *ddev,
+ bool tripple_buffer);
+
+static int __devinit ab8500_probe(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ int i;
+ struct ab8500_display_platform_data *pdata = ddev->dev.platform_data;
+ struct display_driver_data *driver_data;
+
+ AB8500_DISP_TRACE;
+
+ if (pdata == NULL) {
+ dev_err(&ddev->dev, "%s:Platform data missing\n", __func__);
+ return -EINVAL;
+ }
+ if (ddev->port->type != MCDE_PORTTYPE_DPI) {
+ dev_err(&ddev->dev, "%s:Invalid port type %d\n", __func__,
+ ddev->port->type);
+ return -EINVAL;
+ }
+
+ driver_data = (struct display_driver_data *)
+ kzalloc(sizeof(struct display_driver_data), GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(&ddev->dev, "Failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+ driver_data->denc_dev = ab8500_denc_get_device();
+ if (!driver_data->denc_dev) {
+ dev_err(&ddev->dev, "Failed to get DENC device\n");
+ ret = -ENODEV;
+ goto dev_get_failed;
+ }
+
+ driver_data->regulator = kzalloc(pdata->nr_regulators *
+ sizeof(struct regulator *), GFP_KERNEL);
+ if (!driver_data->regulator) {
+ dev_err(&ddev->dev, "Failed to allocate regulator list\n");
+ ret = -ENOMEM;
+ goto reg_alloc_failed;
+ }
+ for (i = 0; i < pdata->nr_regulators; i++) {
+ driver_data->regulator[i] = regulator_get(&ddev->dev,
+ pdata->regulator_id[i]);
+ if (IS_ERR(driver_data->regulator[i])) {
+ ret = PTR_ERR(driver_data->regulator[i]);
+ dev_warn(&ddev->dev, "%s:Failed to get regulator %s\n",
+ __func__, pdata->regulator_id[i]);
+ goto regulator_get_failed;
+ }
+ }
+ driver_data->nr_regulators = pdata->nr_regulators;
+
+ dev_set_drvdata(&ddev->dev, driver_data);
+
+ ddev->try_video_mode = try_video_mode;
+ ddev->set_video_mode = set_video_mode;
+ ddev->set_power_mode = set_power_mode;
+ ddev->on_first_update = on_first_update;
+ ddev->update = display_update;
+
+ return 0;
+
+regulator_get_failed:
+ for (i--; i >= 0; i--)
+ regulator_put(driver_data->regulator[i]);
+ kfree(driver_data->regulator);
+ driver_data->regulator = NULL;
+reg_alloc_failed:
+ ab8500_denc_put_device(driver_data->denc_dev);
+dev_get_failed:
+ kfree(driver_data);
+ return ret;
+}
+
+static int __devexit ab8500_remove(struct mcde_display_device *ddev)
+{
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+ AB8500_DISP_TRACE;
+
+ ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+
+ if (driver_data->regulator) {
+ int i;
+ for (i = driver_data->nr_regulators - 1; i >= 0; i--)
+ regulator_put(driver_data->regulator[i]);
+ kfree(driver_data->regulator);
+ driver_data->regulator = NULL;
+ driver_data->nr_regulators = 0;
+ }
+ ab8500_denc_put_device(driver_data->denc_dev);
+ kfree(driver_data);
+ return 0;
+}
+
+static int ab8500_resume(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ AB8500_DISP_TRACE;
+
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s: Failed to resume display\n",
+ __func__);
+
+ return ret;
+}
+
+static int ab8500_suspend(struct mcde_display_device *ddev, pm_message_t state)
+{
+ int ret = 0;
+ AB8500_DISP_TRACE;
+
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s: Failed to suspend display\n",
+ __func__);
+
+ return ret;
+}
+
+
+static struct mcde_display_driver ab8500_driver = {
+ .probe = ab8500_probe,
+ .remove = ab8500_remove,
+ .suspend = ab8500_suspend,
+ .resume = ab8500_resume,
+ .driver = {
+ .name = "mcde_tv_ab8500",
+ },
+};
+
+static void print_vmode(struct mcde_video_mode *vmode)
+{
+ pr_debug("resolution: %dx%d\n", vmode->xres, vmode->yres);
+ pr_debug(" pixclock: %d\n", vmode->pixclock);
+ pr_debug(" hbp: %d\n", vmode->hbp);
+ pr_debug(" hfp: %d\n", vmode->hfp);
+ pr_debug(" vbp: %d\n", vmode->vbp);
+ pr_debug(" vfp: %d\n", vmode->vfp);
+ pr_debug("interlaced: %s\n", vmode->interlaced ? "true" : "false");
+}
+
+static int try_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ AB8500_DISP_TRACE;
+
+ if (ddev == NULL || video_mode == NULL) {
+ dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (video_mode->xres != 720) {
+ dev_warn(&ddev->dev,
+ "%s:Failed to find video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+
+ /* TODO: move this part to MCDE: mcde_dss_try_video_mode? */
+ /* check for PAL */
+ switch (video_mode->yres) {
+ case 576:
+ /* set including SAV/EAV: */
+ video_mode->hbp = PAL_HBP;
+ video_mode->hfp = PAL_HFP;
+ video_mode->vbp = PAL_VBP_FIELD_1 + PAL_VBP_FIELD_2;
+ video_mode->vfp = PAL_VFP_FIELD_1 + PAL_VFP_FIELD_2;
+ video_mode->interlaced = true;
+ video_mode->pixclock = SDTV_PIXCLOCK;
+ break;
+ case 480:
+ /* set including SAV/EAV */
+ video_mode->hbp = NTSC_HBP;
+ video_mode->hfp = NTSC_HFP;
+ video_mode->vbp = NTSC_VBP_FIELD_1 + NTSC_VBP_FIELD_2;
+ video_mode->vfp = NTSC_VFP_FIELD_1 + NTSC_VFP_FIELD_2;
+ video_mode->interlaced = true;
+ video_mode->pixclock = SDTV_PIXCLOCK;
+ break;
+ case 486:
+ /* set including SAV/EAV */
+ video_mode->hbp = NTSC_ORG_HBP;
+ video_mode->hfp = NTSC_ORG_HFP;
+ video_mode->vbp = NTSC_ORG_VBP_FIELD_1 + NTSC_ORG_VBP_FIELD_2;
+ video_mode->vfp = NTSC_ORG_VFP_FIELD_1 + NTSC_ORG_VFP_FIELD_2;
+ video_mode->interlaced = true;
+ video_mode->pixclock = SDTV_PIXCLOCK;
+ break;
+ default:
+ dev_warn(&ddev->dev,
+ "%s:Failed to find video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+
+ print_vmode(video_mode);
+
+ return 0;
+
+}
+
+static int set_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ int res;
+ struct ab8500_display_platform_data *pdata = ddev->dev.platform_data;
+ struct display_driver_data *driver_data =
+ (struct display_driver_data *)dev_get_drvdata(&ddev->dev);
+ AB8500_DISP_TRACE;
+
+ if (ddev == NULL || video_mode == NULL) {
+ dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ ddev->video_mode = *video_mode;
+
+ if (video_mode->xres != 720) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+
+ /* check for PAL BDGHI and N */
+ switch (video_mode->yres) {
+ case 576:
+ driver_data->denc_conf.TV_std = TV_STD_PAL_BDGHI;
+ /* TODO: how to choose LOW DEF FILTER */
+ driver_data->denc_conf.cr_filter = TV_CR_PAL_HIGH_DEF_FILTER;
+ /* TODO: PAL N (e.g. uses a setup of 7.5 IRE) */
+ driver_data->denc_conf.black_level_setup = false;
+ break;
+ case 480: /* NTSC, PAL M DV variant */
+ case 486: /* NTSC, PAL M original */
+ /* TODO: PAL M */
+ driver_data->denc_conf.TV_std = TV_STD_NTSC_M;
+ /* TODO: how to choose LOW DEF FILTER */
+ driver_data->denc_conf.cr_filter = TV_CR_NTSC_HIGH_DEF_FILTER;
+ driver_data->denc_conf.black_level_setup = true;
+ break;
+ default:
+ dev_warn(&ddev->dev, "%s:Failed to set video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+
+
+ driver_data->denc_conf.progressive = !video_mode->interlaced;
+ driver_data->denc_conf.act_output = true;
+ driver_data->denc_conf.test_pattern = false;
+ driver_data->denc_conf.partial_blanking = true;
+ driver_data->denc_conf.blank_all = false;
+ driver_data->denc_conf.suppress_col = false;
+ driver_data->denc_conf.phase_reset_mode = TV_PHASE_RST_MOD_DISABLE;
+ driver_data->denc_conf.dac_enable = false;
+ driver_data->denc_conf.act_dc_output = true;
+
+ set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (pdata->rgb_2_yCbCr_transform)
+ mcde_chnl_set_col_convert(ddev->chnl_state,
+ pdata->rgb_2_yCbCr_transform,
+ MCDE_CONVERT_RGB_2_YCBCR);
+ mcde_chnl_stop_flow(ddev->chnl_state);
+ res = mcde_chnl_set_video_mode(ddev->chnl_state, &ddev->video_mode);
+ if (res < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode on channel\n",
+ __func__);
+
+ return res;
+ }
+ ddev->update_flags |= UPDATE_FLAG_VIDEO_MODE;
+
+ return 0;
+}
+
+static int set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret;
+ int i;
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+ AB8500_DISP_TRACE;
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ dev_dbg(&ddev->dev, "off -> standby\n");
+ if (ddev->platform_enable) {
+ ret = ddev->platform_enable(ddev);
+ if (ret)
+ goto error;
+ }
+ if (driver_data->regulator) {
+ for (i = 0; i < driver_data->nr_regulators; i++) {
+ ret = regulator_enable(
+ driver_data->regulator[i]);
+ if (ret)
+ goto off_to_standby_failed;
+ dev_dbg(&ddev->dev, "regulator %d on\n", i);
+ }
+ }
+ ab8500_denc_power_up(driver_data->denc_dev);
+ ab8500_denc_reset(driver_data->denc_dev, true);
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+ dev_dbg(&ddev->dev, "standby -> on\n");
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ }
+ /* ON -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+ dev_dbg(&ddev->dev, "on -> standby\n");
+ ab8500_denc_reset(driver_data->denc_dev, false);
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+ /* STANDBY -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ bool error = false;
+ dev_dbg(&ddev->dev, "standby -> off\n");
+ if (driver_data->regulator) {
+ for (i = 0; i < driver_data->nr_regulators; i++) {
+ ret = regulator_disable(
+ driver_data->regulator[i]);
+ /* continue in case of an error */
+ error |= (ret != 0);
+ dev_dbg(&ddev->dev, "regulator %d off\n", i);
+ }
+ }
+ if (ddev->platform_disable) {
+ ret = ddev->platform_disable(ddev);
+ error |= (ret != 0);
+ }
+ if (error) {
+ /* the latest error code is returned */
+ goto error;
+ }
+ memset(&(ddev->video_mode), 0, sizeof(struct mcde_video_mode));
+ ab8500_denc_power_down(driver_data->denc_dev);
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+ return 0;
+
+ /* In case of an error, try to leave in off-state */
+off_to_standby_failed:
+ for (i--; i >= 0; i--)
+ regulator_disable(driver_data->regulator[i]);
+ ddev->platform_disable(ddev);
+
+error:
+ dev_err(&ddev->dev, "Failed to set power mode");
+ return ret;
+}
+
+static int on_first_update(struct mcde_display_device *ddev)
+{
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ ab8500_denc_conf(driver_data->denc_dev, &driver_data->denc_conf);
+ ab8500_denc_conf_plug_detect(driver_data->denc_dev, true, false,
+ TV_PLUG_TIME_2S);
+ ab8500_denc_mask_int_plug_det(driver_data->denc_dev, false, false);
+ ddev->first_update = false;
+ return 0;
+}
+
+static int display_update(struct mcde_display_device *ddev, bool tripple_buffer)
+{
+ int ret;
+
+ if (ddev->first_update)
+ on_first_update(ddev);
+ if (ddev->power_mode != MCDE_DISPLAY_PM_ON && ddev->set_power_mode) {
+ ret = set_power_mode(ddev, MCDE_DISPLAY_PM_ON);
+ if (ret < 0)
+ goto error;
+ }
+ ret = mcde_chnl_update(ddev->chnl_state, &ddev->update_area,
+ tripple_buffer);
+ if (ret < 0)
+ goto error;
+out:
+ return ret;
+error:
+ dev_warn(&ddev->dev, "%s:Failed to set power mode to on\n", __func__);
+ goto out;
+}
+
+/* Module init */
+static int __init mcde_display_tvout_ab8500_init(void)
+{
+ pr_debug("%s\n", __func__);
+
+ return mcde_display_driver_register(&ab8500_driver);
+}
+late_initcall(mcde_display_tvout_ab8500_init);
+
+static void __exit mcde_display_tvout_ab8500_exit(void)
+{
+ pr_debug("%s\n", __func__);
+
+ mcde_display_driver_unregister(&ab8500_driver);
+}
+module_exit(mcde_display_tvout_ab8500_exit);
+
+MODULE_AUTHOR("Marcel Tunnissen <marcel.tuennissen@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE TVout through AB8500 display driver");
diff --git a/drivers/video/mcde/display-av8100.c b/drivers/video/mcde/display-av8100.c
new file mode 100644
index 00000000000..3ff73f3e4eb
--- /dev/null
+++ b/drivers/video/mcde/display-av8100.c
@@ -0,0 +1,1577 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson HDMI display driver
+ *
+ * Author: Per Persson <per-xb-persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_display-av8100.h>
+#include <video/av8100.h>
+#include <video/hdmi.h>
+
+#define SWITCH_HELPSTR ", 0=HDMI, 1=SDTV, 2=DVI\n"
+
+/* AVI Infoframe */
+#define AVI_INFOFRAME_DATA_SIZE 13
+#define AVI_INFOFRAME_TYPE 0x82
+#define AVI_INFOFRAME_VERSION 0x02
+#define AVI_INFOFRAME_DB1 0x10 /* Active Information present */
+#define AVI_INFOFRAME_DB2 0x08 /* Active Portion Aspect ratio */
+
+struct cea_vesa_video_mode {
+ u32 cea;
+ u32 vesa_cea_nr;
+ struct mcde_video_mode *video_mode;
+};
+
+static int hdmi_try_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode);
+static int hdmi_set_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode);
+static int hdmi_set_pixel_format(
+ struct mcde_display_device *ddev, enum mcde_ovly_pix_fmt format);
+static struct mcde_video_mode *video_mode_get(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr);
+
+static ssize_t show_hdmisdtvswitch(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_hdmisdtvswitch(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_input_pixel_format(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_input_pixel_format(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_disponoff(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_disponoff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_vesacea(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t show_timing(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_timing(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_stayalive(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static DEVICE_ATTR(disponoff, S_IRUGO | S_IWUSR, show_disponoff,
+ store_disponoff);
+static DEVICE_ATTR(vesacea, S_IRUGO, show_vesacea, NULL);
+static DEVICE_ATTR(timing, S_IRUGO | S_IWUSR, show_timing, store_timing);
+static DEVICE_ATTR(stayalive, S_IWUSR, NULL, store_stayalive);
+
+static DEVICE_ATTR(hdmisdtvswitch, S_IRUGO | S_IWUSR, show_hdmisdtvswitch,
+ store_hdmisdtvswitch);
+static DEVICE_ATTR(input_pixel_format, S_IRUGO | S_IWUSR,
+ show_input_pixel_format, store_input_pixel_format);
+
+static ssize_t show_hdmisdtvswitch(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcde_display_device *mdev = to_mcde_display_device(dev);
+ int index;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ sprintf(buf, "%1x%s", mdev->port->hdmi_sdtv_switch, SWITCH_HELPSTR);
+ index = 1 + strlen(SWITCH_HELPSTR) + 1;
+
+ return index;
+}
+
+static ssize_t store_hdmisdtvswitch(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *mdev = to_mcde_display_device(dev);
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (count > 0) {
+ if ((*buf == 0) || (*buf == '0')) {
+ dev_dbg(dev, "hdmi/sdtv switch = hdmi\n");
+ mdev->port->hdmi_sdtv_switch = HDMI_SWITCH;
+ mdev->native_x_res = NATIVE_XRES_HDMI;
+ mdev->native_y_res = NATIVE_YRES_HDMI;
+ } else if ((*buf == 1) || (*buf == '1')) {
+ dev_dbg(dev, "hdmi/sdtv switch = sdtv\n");
+ mdev->port->hdmi_sdtv_switch = SDTV_SWITCH;
+ mdev->native_x_res = NATIVE_XRES_SDTV;
+ mdev->native_y_res = NATIVE_YRES_SDTV;
+ } else if ((*buf == 2) || (*buf == '2')) {
+ dev_dbg(dev, "hdmi/sdtv switch = dvi\n");
+ mdev->port->hdmi_sdtv_switch = DVI_SWITCH;
+ mdev->native_x_res = NATIVE_XRES_HDMI;
+ mdev->native_y_res = NATIVE_YRES_HDMI;
+ }
+ /* implicitely read by a memcmp in dss */
+ mdev->video_mode.force_update = true;
+ }
+
+ return count;
+}
+
+static ssize_t show_input_pixel_format(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+
+ return sprintf(buf, "%d\n", ddev->port->pixel_format);
+}
+
+static ssize_t store_input_pixel_format(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+ if (count > 0) {
+ unsigned long input;
+ if (strict_strtoul(buf, 10, &input) != 0)
+ return -EINVAL;
+ switch (input) {
+ /* intentional fall through */
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ case MCDE_PORTPIXFMT_DSI_YCBCR422:
+ ddev->port->pixel_format = input;
+ break;
+ default:
+ dev_warn(&ddev->dev, "invalid format (%ld)\n",
+ input);
+ return -EINVAL;
+ break;
+ }
+ /* implicitely read by a memcmp in dss */
+ ddev->video_mode.force_update = true;
+ driver_data->update_port_pixel_format = true;
+ }
+
+ return count;
+}
+
+static ssize_t show_disponoff(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (ddev->fbi && driver_data->fbdevname) {
+ dev_dbg(dev, "name:%s\n", driver_data->fbdevname);
+ strcpy(buf, driver_data->fbdevname);
+ return strlen(driver_data->fbdevname) + 1;
+ }
+ return 0;
+}
+
+static ssize_t store_disponoff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *mdev = to_mcde_display_device(dev);
+ bool enable = false;
+ u8 cea = 0;
+ u8 vesa_cea_nr = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if ((count != DISPONOFF_SIZE) && (count != DISPONOFF_SIZE + 1))
+ return -EINVAL;
+
+ if ((*buf == '0') && (*(buf + 1) == '1'))
+ enable = true;
+ cea = (hex_to_bin(buf[2]) << 4) + hex_to_bin(buf[3]);
+ vesa_cea_nr = (hex_to_bin(buf[4]) << 4) + hex_to_bin(buf[5]);
+ dev_dbg(dev, "enable:%d cea:%d nr:%d\n", enable, cea, vesa_cea_nr);
+
+ hdmi_fb_onoff(mdev, enable, cea, vesa_cea_nr);
+
+ return count;
+}
+
+static ssize_t show_timing(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+ struct mcde_video_mode *video_mode;
+ int index;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ index = 0;
+ if (driver_data->video_mode) {
+ video_mode = driver_data->video_mode;
+ memcpy(buf + index, &video_mode->xres, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->yres, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->pixclock, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->hbp, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->hfp, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->vbp, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->vfp, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->interlaced, sizeof(u32));
+ index += sizeof(u32);
+ }
+ return index;
+}
+
+static ssize_t store_timing(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (count != TIMING_SIZE)
+ return -EINVAL;
+
+ driver_data->video_mode = video_mode_get(ddev, *buf, *(buf + 1));
+
+ return count;
+}
+
+static ssize_t store_stayalive(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+
+ if (count != STAYALIVE_SIZE)
+ return -EINVAL;
+
+ if ((*buf == 1) || (*buf == '1'))
+ ddev->stay_alive = true;
+ else
+ ddev->stay_alive = false;
+
+ dev_dbg(dev, "%s %d\n", __func__, ddev->stay_alive);
+
+ return count;
+}
+
+static int ceanr_convert(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr, int buffering,
+ u16 *w, u16 *h, u16 *vw, u16 *vh)
+{
+ struct mcde_video_mode *video_mode;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ video_mode = video_mode_get(ddev, cea, vesa_cea_nr);
+ if (video_mode) {
+ *w = video_mode->xres;
+ *h = video_mode->yres;
+ *vw = video_mode->xres;
+ *vh = video_mode->yres * buffering;
+ dev_dbg(&ddev->dev, "cea:%d nr:%d found\n",
+ cea, vesa_cea_nr);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Supported HDMI modes */
+static struct mcde_video_mode video_modes_supp_hdmi[] = {
+ /* 0 CEA #1 640_480_60_P */
+ {
+ .xres = 640, .yres = 480,
+ .pixclock = 39682,
+ .hbp = 112, .hfp = 48,
+ .vbp = 33, .vfp = 12
+ },
+ /* 1 720_480_60_P */
+ {
+ .xres = 720, .yres = 480,
+ .pixclock = 37000,
+ .hbp = 104, .hfp = 34,
+ .vbp = 30, .vfp = 15
+ },
+ /* 2 720_576_50_P */
+ {
+ .xres = 720, .yres = 576,
+ .pixclock = 37037,
+ .hbp = 132, .hfp = 12,
+ .vbp = 44, .vfp = 5
+ },
+ /* 3 1280_720_60_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 13468,
+ .hbp = 256, .hfp = 114,
+ .vbp = 20, .vfp = 10
+ },
+ /* 4 1280_720_50_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 13468,
+ .hbp = 260, .hfp = 440,
+ .vbp = 25, .vfp = 5
+ },
+ /* 5 1280_720_30_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 13468,
+ .hbp = 260, .hfp = 1760,
+ .vbp = 20, .vfp = 10
+ },
+ /* 6 1280_720_24_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 16835,
+ .hbp = 260, .hfp = 1760,
+ .vbp = 20, .vfp = 10
+ },
+ /* 7 1280_720_25_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 13468,
+ .hbp = 260, .hfp = 2420,
+ .vbp = 20, .vfp = 10
+ },
+ /* 8 1920_1080_30_P */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 189, .hfp = 91,
+ .vbp = 36, .vfp = 9
+ },
+ /* 9 1920_1080_24_P */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 170, .hfp = 660,
+ .vbp = 36, .vfp = 9
+ },
+ /* 10 1920_1080_25_P */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 192, .hfp = 528,
+ .vbp = 36, .vfp = 9
+ },
+ /* 11 720_480_60_I */
+ {
+ .xres = 720, .yres = 480,
+ .pixclock = 74074,
+ .hbp = 126, .hfp = 12,
+ .vbp = 44, .vfp = 1,
+ .interlaced = true,
+ },
+ /* 12 720_576_50_I */
+ {
+ .xres = 720, .yres = 576,
+ .pixclock = 74074,
+ .hbp = 132, .hfp = 12,
+ .vbp = 44, .vfp = 5,
+ .interlaced = true,
+ },
+ /* 13 1920_1080_50_I */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 192, .hfp = 528,
+ .vbp = 20, .vfp = 25,
+ .interlaced = true,
+ },
+ /* 14 1920_1080_60_I */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 192, .hfp = 88,
+ .vbp = 20, .vfp = 25,
+ .interlaced = true,
+ },
+ /* 15 VESA #9 800_600_60_P */
+ {
+ .xres = 800, .yres = 600,
+ .pixclock = 25000,
+ .hbp = 168, .hfp = 88,
+ .vbp = 23, .vfp = 5,
+ .interlaced = false,
+ },
+ /* 16 VESA #14 848_480_60_P */
+ {
+ .xres = 848, .yres = 480,
+ .pixclock = 29630,
+ .hbp = 128, .hfp = 112,
+ .vbp = 23, .vfp = 14,
+ .interlaced = false,
+ },
+ /* 17 VESA #16 1024_768_60_P */
+ {
+ .xres = 1024, .yres = 768,
+ .pixclock = 15385,
+ .hbp = 160, .hfp = 160,
+ .vbp = 29, .vfp = 9,
+ .interlaced = false,
+ },
+ /* 18 VESA #22 1280_768_60_P */
+ {
+ .xres = 1280, .yres = 768,
+ .pixclock = 14652,
+ .hbp = 80, .hfp = 80,
+ .vbp = 12, .vfp = 10,
+ .interlaced = false,
+ },
+ /* 19 VESA #23 1280_768_60_P */
+ {
+ .xres = 1280, .yres = 768,
+ .pixclock = 12579,
+ .hbp = 192, .hfp = 192,
+ .vbp = 20, .vfp = 10,
+ .interlaced = false,
+ },
+ /* 20 VESA #27 1280_800_60_P */
+ {
+ .xres = 1280, .yres = 800,
+ .pixclock = 14085,
+ .hbp = 80, .hfp = 80,
+ .vbp = 14, .vfp = 9,
+ .interlaced = false,
+ },
+ /* 21 VESA #28 1280_800_60_P */
+ {
+ .xres = 1280, .yres = 800,
+ .pixclock = 11976,
+ .hbp = 200, .hfp = 200,
+ .vbp = 22, .vfp = 9,
+ .interlaced = false,
+ },
+ /* 22 VESA #39 1360_768_60_P */
+ {
+ .xres = 1360, .yres = 768,
+ .pixclock = 11696,
+ .hbp = 176, .hfp = 256,
+ .vbp = 18, .vfp = 9,
+ .interlaced = false,
+ },
+ /* 23 VESA #81 1366_768_60_P */
+ {
+ .xres = 1366, .yres = 768,
+ .pixclock = 11662,
+ .hbp = 213, .hfp = 213,
+ .vbp = 24, .vfp = 6,
+ .interlaced = false,
+ },
+};
+
+/* Supported TVout modes */
+static struct mcde_video_mode video_modes_supp_sdtv[] = {
+ /* 720_480_60_I) */
+ {
+ .xres = 720, .yres = 480,
+ .pixclock = 74074,
+ .hbp = 126, .hfp = 12,
+ .vbp = 44, .vfp = 1,
+ .interlaced = true,
+ },
+ /* 720_576_50_I) */
+ {
+ .xres = 720, .yres = 576,
+ .pixclock = 74074,
+ .hbp = 132, .hfp = 12,
+ .vbp = 44, .vfp = 5,
+ .interlaced = true,
+ },
+};
+
+static struct cea_vesa_video_mode cea_vesa_video_mode[] = {
+ /* 640_480_60_P */
+ {
+ .cea = 1, .vesa_cea_nr = 1,
+ .video_mode = &video_modes_supp_hdmi[0],
+ },
+ /* 720_480_60_P */
+ {
+ .cea = 1, .vesa_cea_nr = 2,
+ .video_mode = &video_modes_supp_hdmi[1],
+ },
+ /* 720_480_60_P */
+ {
+ .cea = 1, .vesa_cea_nr = 3,
+ .video_mode = &video_modes_supp_hdmi[1],
+ },
+ /* 720_576_50_P */
+ {
+ .cea = 1, .vesa_cea_nr = 17,
+ .video_mode = &video_modes_supp_hdmi[2],
+ },
+ /* 720_576_50_P */
+ {
+ .cea = 1, .vesa_cea_nr = 18,
+ .video_mode = &video_modes_supp_hdmi[2],
+ },
+ /* 1280_720_60_P */
+ {
+ .cea = 1, .vesa_cea_nr = 4,
+ .video_mode = &video_modes_supp_hdmi[3],
+ },
+ /* 1280_720_50_P */
+ {
+ .cea = 1, .vesa_cea_nr = 19,
+ .video_mode = &video_modes_supp_hdmi[4],
+ },
+ /* 1280_720_30_P */
+ {
+ .cea = 1, .vesa_cea_nr = 62,
+ .video_mode = &video_modes_supp_hdmi[5],
+ },
+ /* 1280_720_24_P */
+ {
+ .cea = 1, .vesa_cea_nr = 60,
+ .video_mode = &video_modes_supp_hdmi[6],
+ },
+ /* 1280_720_25_P */
+ {
+ .cea = 1, .vesa_cea_nr = 61,
+ .video_mode = &video_modes_supp_hdmi[7],
+ },
+ /* 1920_1080_30_P */
+ {
+ .cea = 1, .vesa_cea_nr = 34,
+ .video_mode = &video_modes_supp_hdmi[8],
+ },
+ /* 1920_1080_24_P */
+ {
+ .cea = 1, .vesa_cea_nr = 32,
+ .video_mode = &video_modes_supp_hdmi[9],
+ },
+ /* 1920_1080_25_P */
+ {
+ .cea = 1, .vesa_cea_nr = 33,
+ .video_mode = &video_modes_supp_hdmi[10],
+ },
+ /* 720_480_60_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 6,
+ .video_mode = &video_modes_supp_hdmi[11],
+ },
+ /* 720_480_60_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 7,
+ .video_mode = &video_modes_supp_hdmi[11],
+ },
+ /* 720_576_50_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 21,
+ .video_mode = &video_modes_supp_hdmi[12],
+ },
+ /* 720_576_50_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 22,
+ .video_mode = &video_modes_supp_hdmi[12],
+ },
+ /* 1920_1080_50_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 20,
+ .video_mode = &video_modes_supp_hdmi[13],
+ },
+ /* 1920_1080_60_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 5,
+ .video_mode = &video_modes_supp_hdmi[14],
+ },
+ /* VESA #4 640_480_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 4,
+ .video_mode = &video_modes_supp_hdmi[0],
+ },
+ /* VESA #9 800_600_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 9,
+ .video_mode = &video_modes_supp_hdmi[15],
+ },
+ /* VESA #14 848_480_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 14,
+ .video_mode = &video_modes_supp_hdmi[16],
+ },
+ /* VESA #16 1024_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 16,
+ .video_mode = &video_modes_supp_hdmi[17],
+ },
+ /* VESA #22 1280_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 22,
+ .video_mode = &video_modes_supp_hdmi[18],
+ },
+ /* VESA #23 1280_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 23,
+ .video_mode = &video_modes_supp_hdmi[19],
+ },
+ /* VESA #27 1280_800_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 27,
+ .video_mode = &video_modes_supp_hdmi[20],
+ },
+ /* VESA #28 1280_800_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 28,
+ .video_mode = &video_modes_supp_hdmi[21],
+ },
+ /* VESA #39 1360_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 39,
+ .video_mode = &video_modes_supp_hdmi[22],
+ },
+ /* VESA #81 1366_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 81,
+ .video_mode = &video_modes_supp_hdmi[23],
+ },
+};
+
+static ssize_t show_vesacea(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int findex;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ for (findex = 0; findex < ARRAY_SIZE(cea_vesa_video_mode); findex++) {
+ *(buf + findex * 2) = cea_vesa_video_mode[findex].cea;
+ *(buf + findex * 2 + 1) =
+ cea_vesa_video_mode[findex].vesa_cea_nr;
+ }
+ *(buf + findex * 2) = '\0';
+
+ return findex * 2 + 1;
+}
+
+static struct mcde_video_mode *video_mode_get(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr)
+{
+ int findex;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+
+ for (findex = 0; findex < ARRAY_SIZE(cea_vesa_video_mode); findex++)
+ if ((cea == cea_vesa_video_mode[findex].cea) &&
+ (vesa_cea_nr ==
+ cea_vesa_video_mode[findex].vesa_cea_nr)) {
+ dev_dbg(&ddev->dev, "cea:%d nr:%d\n", cea, vesa_cea_nr);
+ return cea_vesa_video_mode[findex].video_mode;
+ }
+
+ return NULL;
+}
+
+static u8 ceanr_get(struct mcde_display_device *ddev)
+{
+ int cnt;
+ int cea;
+ int vesa_cea_nr;
+ struct mcde_video_mode *vmode = &ddev->video_mode;
+ struct mcde_video_mode *vmode_try;
+
+ if (!vmode)
+ return 0;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+
+ for (cnt = 0; cnt < ARRAY_SIZE(cea_vesa_video_mode); cnt++) {
+ vmode_try = cea_vesa_video_mode[cnt].video_mode;
+ cea = cea_vesa_video_mode[cnt].cea;
+ vesa_cea_nr = cea_vesa_video_mode[cnt].vesa_cea_nr;
+
+ if (cea && vmode_try->xres == vmode->xres &&
+ vmode_try->yres == vmode->yres &&
+ vmode_try->pixclock == vmode->pixclock &&
+ vmode_try->hbp == vmode->hbp &&
+ vmode_try->hfp == vmode->hfp &&
+ vmode_try->vbp == vmode->vbp &&
+ vmode_try->vfp == vmode->vfp &&
+ vmode_try->interlaced == vmode->interlaced) {
+ dev_dbg(&ddev->dev, "ceanr:%d\n", vesa_cea_nr);
+ return vesa_cea_nr;
+ }
+ }
+
+ return 0;
+}
+
+#define AV8100_MAX_LEVEL 255
+
+static int hdmi_try_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ int index = 0;
+ int match_level = AV8100_MAX_LEVEL;
+ int found_index = -1;
+ struct mcde_video_mode *video_modes_supp;
+ int array_size;
+
+ if (ddev == NULL || video_mode == NULL) {
+ pr_warning("%s:ddev = NULL or video_mode = NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ dev_vdbg(&ddev->dev, "%s\n", __func__);
+
+ if (ddev->port->hdmi_sdtv_switch == SDTV_SWITCH) {
+ video_mode->interlaced = true;
+ video_modes_supp = video_modes_supp_sdtv;
+ array_size = ARRAY_SIZE(video_modes_supp_sdtv);
+ } else {
+ video_modes_supp = video_modes_supp_hdmi;
+ array_size = ARRAY_SIZE(video_modes_supp_hdmi);
+ }
+
+ while (index < array_size) {
+ /* 1. Check if all parameters match */
+ if ((video_mode->xres == video_modes_supp[index].xres) &&
+ (video_mode->yres == video_modes_supp[index].yres) &&
+ ((video_mode->xres + video_mode->hbp +
+ video_mode->hfp) ==
+ (video_modes_supp[index].xres +
+ video_modes_supp[index].hbp +
+ video_modes_supp[index].hfp)) &&
+ ((video_mode->yres + video_mode->vbp + video_mode->vfp)
+ ==
+ (video_modes_supp[index].yres +
+ video_modes_supp[index].vbp +
+ video_modes_supp[index].vfp)) &&
+ (video_mode->pixclock ==
+ video_modes_supp[index].pixclock) &&
+ (video_mode->interlaced ==
+ video_modes_supp[index].interlaced)) {
+ match_level = 1;
+ found_index = index;
+ break;
+ }
+
+ /* 2. Check if xres,yres,htot,vtot,interlaced match */
+ if ((match_level > 2) &&
+ (video_mode->xres == video_modes_supp[index].xres) &&
+ (video_mode->yres == video_modes_supp[index].yres) &&
+ ((video_mode->xres + video_mode->hbp +
+ video_mode->hfp) ==
+ (video_modes_supp[index].xres +
+ video_modes_supp[index].hbp +
+ video_modes_supp[index].hfp)) &&
+ ((video_mode->yres + video_mode->vbp + video_mode->vfp)
+ ==
+ (video_modes_supp[index].yres +
+ video_modes_supp[index].vbp +
+ video_modes_supp[index].vfp)) &&
+ (video_mode->interlaced ==
+ video_modes_supp[index].interlaced)) {
+ match_level = 2;
+ found_index = index;
+ }
+
+ /* 3. Check if xres,yres,pixelclock,interlaced match */
+ if ((match_level > 3) &&
+ (video_mode->xres == video_modes_supp[index].xres) &&
+ (video_mode->yres == video_modes_supp[index].yres) &&
+ (video_mode->interlaced ==
+ video_modes_supp[index].interlaced) &&
+ (video_mode->pixclock ==
+ video_modes_supp[index].pixclock)) {
+ match_level = 3;
+ found_index = index;
+ }
+
+ /* 4. Check if xres,yres,interlaced match */
+ if ((match_level > 4) &&
+ (video_mode->xres == video_modes_supp[index].xres) &&
+ (video_mode->yres == video_modes_supp[index].yres) &&
+ (video_mode->interlaced ==
+ video_modes_supp[index].interlaced)) {
+ match_level = 4;
+ found_index = index;
+ }
+
+ index++;
+ }
+
+ if (found_index == -1) {
+ dev_dbg(&ddev->dev, "video_mode not accepted\n");
+ dev_dbg(&ddev->dev, "xres:%d yres:%d pixclock:%d hbp:%d hfp:%d "
+ "vfp:%d vbp:%d intlcd:%d\n",
+ video_mode->xres, video_mode->yres,
+ video_mode->pixclock,
+ video_mode->hbp, video_mode->hfp,
+ video_mode->vfp, video_mode->vbp,
+ video_mode->interlaced);
+ return -EINVAL;
+ }
+
+ memset(video_mode, 0, sizeof(struct mcde_video_mode));
+ memcpy(video_mode, &video_modes_supp[found_index],
+ sizeof(struct mcde_video_mode));
+
+ dev_dbg(&ddev->dev, "%s:HDMI video_mode %d chosen. Level:%d\n",
+ __func__, found_index, match_level);
+
+ return 0;
+}
+
+static int hdmi_set_video_mode(
+ struct mcde_display_device *dev, struct mcde_video_mode *video_mode)
+{
+ int ret;
+ union av8100_configuration av8100_config;
+ struct mcde_display_hdmi_platform_data *pdata;
+ struct display_driver_data *driver_data;
+ struct av8100_status status;
+
+ /* TODO check video_mode_params */
+ if (dev == NULL || video_mode == NULL) {
+ pr_warning("%s:ddev = NULL or video_mode = NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = dev->dev.platform_data;
+ driver_data = dev_get_drvdata(&dev->dev);
+
+ dev_dbg(&dev->dev, "%s:\n", __func__);
+ dev_vdbg(&dev->dev, "%s:xres:%d yres:%d hbp:%d hfp:%d vbp:%d vfp:%d "
+ "interlaced:%d\n", __func__,
+ video_mode->xres,
+ video_mode->yres,
+ video_mode->hbp,
+ video_mode->hfp,
+ video_mode->vbp,
+ video_mode->vfp,
+ video_mode->interlaced);
+
+ if (driver_data->update_port_pixel_format) {
+ hdmi_set_pixel_format(dev, dev->pixel_format);
+ driver_data->update_port_pixel_format = false;
+ }
+
+ memset(&(dev->video_mode), 0, sizeof(struct mcde_video_mode));
+ memcpy(&(dev->video_mode), video_mode, sizeof(struct mcde_video_mode));
+
+ if (dev->port->pixel_format == MCDE_PORTPIXFMT_DSI_YCBCR422 &&
+ pdata->rgb_2_yCbCr_transform)
+ mcde_chnl_set_col_convert(dev->chnl_state,
+ pdata->rgb_2_yCbCr_transform,
+ MCDE_CONVERT_RGB_2_YCBCR);
+ mcde_chnl_stop_flow(dev->chnl_state);
+
+ ret = mcde_chnl_set_video_mode(dev->chnl_state, &dev->video_mode);
+ if (ret < 0) {
+ dev_warn(&dev->dev, "Failed to set video mode\n");
+ return ret;
+ }
+
+ status = av8100_status_get();
+ if (status.av8100_state == AV8100_OPMODE_UNDEFINED)
+ return -EINVAL;
+
+ if (av8100_ver_get() == AV8100_CHIPVER_1) {
+ if (status.av8100_state >= AV8100_OPMODE_STANDBY) {
+ /* Disable interrupts */
+ ret = av8100_disable_interrupt();
+ if (ret) {
+ dev_err(&dev->dev,
+ "%s:av8100_disable_interrupt failed\n",
+ __func__);
+ return ret;
+ }
+
+ ret = av8100_powerdown();
+ if (ret) {
+ dev_err(&dev->dev,
+ "av8100_powerdown failed\n");
+ return ret;
+ }
+
+ msleep(10);
+ }
+ }
+
+ /* Set to powerup with interrupts disabled */
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ ret = av8100_powerup();
+ if (ret) {
+ dev_err(&dev->dev, "av8100_powerup failed\n");
+ return ret;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_IDLE) {
+ ret = av8100_download_firmware(I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "av8100_download_firmware failed\n");
+ return ret;
+ }
+ }
+
+ if (av8100_disable_interrupt())
+ return -EFAULT;
+
+ /*
+ * Don't look at dev->port->hdmi_sdtv_switch; it states only which
+ * one should be started, not which one is currently working
+ */
+ if (av8100_conf_get(AV8100_COMMAND_HDMI, &av8100_config))
+ return -EFAULT;
+ if (av8100_config.hdmi_format.hdmi_mode == AV8100_HDMI_ON) {
+ /* Set HDMI mode to OFF */
+ av8100_config.hdmi_format.hdmi_mode = AV8100_HDMI_OFF;
+ av8100_config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0;
+ av8100_config.hdmi_format.hdmi_format = AV8100_HDMI;
+ if (av8100_conf_prep(AV8100_COMMAND_HDMI, &av8100_config))
+ return -EFAULT;
+
+ if (av8100_conf_w(AV8100_COMMAND_HDMI, NULL, NULL,
+ I2C_INTERFACE))
+ return -EFAULT;
+ }
+ if (av8100_conf_get(AV8100_COMMAND_DENC, &av8100_config))
+ return -EFAULT;
+ if (av8100_config.denc_format.enable) {
+ /* Turn off DENC */
+ av8100_config.denc_format.enable = 0;
+ if (av8100_conf_prep(AV8100_COMMAND_DENC, &av8100_config))
+ return -EFAULT;
+ if (av8100_conf_w(AV8100_COMMAND_DENC, NULL, NULL,
+ I2C_INTERFACE))
+ return -EFAULT;
+ }
+
+ /* Get current av8100 video output format */
+ ret = av8100_conf_get(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_get "
+ "AV8100_COMMAND_VIDEO_OUTPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ if (dev->port->hdmi_sdtv_switch == SDTV_SWITCH)
+ av8100_config.video_output_format.video_output_cea_vesa =
+ dev->video_mode.yres == NATIVE_YRES_SDTV ?
+ AV8100_CEA21_22_576I_PAL_50HZ :
+ AV8100_CEA6_7_NTSC_60HZ;
+ else
+ av8100_config.video_output_format.video_output_cea_vesa =
+ av8100_video_output_format_get(
+ dev->video_mode.xres,
+ dev->video_mode.yres,
+ dev->video_mode.xres +
+ dev->video_mode.hbp + dev->video_mode.hfp,
+ dev->video_mode.yres +
+ dev->video_mode.vbp + dev->video_mode.vfp,
+ dev->video_mode.pixclock,
+ dev->video_mode.interlaced);
+
+ if (AV8100_VIDEO_OUTPUT_CEA_VESA_MAX ==
+ av8100_config.video_output_format.video_output_cea_vesa) {
+ dev_err(&dev->dev, "%s:video output format not found "
+ "\n", __func__);
+ return ret;
+ }
+
+ ret = av8100_conf_prep(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_prep "
+ "AV8100_COMMAND_VIDEO_OUTPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Get current av8100 video input format */
+ ret = av8100_conf_get(AV8100_COMMAND_VIDEO_INPUT_FORMAT,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_get "
+ "AV8100_COMMAND_VIDEO_INPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Set correct av8100 video input pixel format */
+ switch (dev->port->pixel_format) {
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ default:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_RGB565;
+ break;
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_RGB666;
+ break;
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_RGB666P;
+ break;
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_RGB888;
+ break;
+ case MCDE_PORTPIXFMT_DSI_YCBCR422:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_YCBCR422;
+ break;
+ }
+
+ /* Set ui_x4 */
+ av8100_config.video_input_format.ui_x4 = dev->port->phy.dsi.ui;
+
+ /* Set TE_config */
+ switch (dev->port->sync_src) {
+ case MCDE_SYNCSRC_TE0:
+ av8100_config.video_input_format.TE_config = AV8100_TE_IT_LINE;
+ break;
+ case MCDE_SYNCSRC_TE1:
+ av8100_config.video_input_format.TE_config = AV8100_TE_GPIO_IT;
+ break;
+ case MCDE_SYNCSRC_TE_POLLING:
+ av8100_config.video_input_format.TE_config =
+ AV8100_TE_DSI_LANE; /* Only on DSI, no interrupts */
+ break;
+ case MCDE_SYNCSRC_OFF:
+ default:
+ av8100_config.video_input_format.TE_config = AV8100_TE_OFF;
+ break;
+ }
+
+ ret = av8100_conf_prep(AV8100_COMMAND_VIDEO_INPUT_FORMAT,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_prep "
+ "AV8100_COMMAND_VIDEO_INPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ ret = av8100_conf_w(AV8100_COMMAND_VIDEO_INPUT_FORMAT,
+ NULL, NULL, I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_w "
+ "AV8100_COMMAND_VIDEO_INPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ if (dev->port->hdmi_sdtv_switch == SDTV_SWITCH) {
+ if (dev->port->pixel_format != MCDE_PORTPIXFMT_DSI_YCBCR422)
+ av8100_config.color_transform =
+ AV8100_COLOR_TRANSFORM_RGB_TO_DENC;
+ else
+ av8100_config.color_transform =
+ AV8100_COLOR_TRANSFORM_YUV_TO_DENC;
+ } else if (dev->port->pixel_format == MCDE_PORTPIXFMT_DSI_YCBCR422) {
+ av8100_config.color_transform =
+ AV8100_COLOR_TRANSFORM_YUV_TO_RGB;
+ } else {
+ av8100_config.color_transform =
+ AV8100_COLOR_TRANSFORM_INDENTITY;
+ }
+
+ ret = av8100_conf_prep(
+ AV8100_COMMAND_COLORSPACECONVERSION,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_configuration_prepare "
+ "AV8100_COMMAND_COLORSPACECONVERSION failed\n",
+ __func__);
+ return ret;
+ }
+
+ ret = av8100_conf_w(
+ AV8100_COMMAND_COLORSPACECONVERSION,
+ NULL, NULL, I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_w "
+ "AV8100_COMMAND_COLORSPACECONVERSION failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Set video output format */
+ ret = av8100_conf_w(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT,
+ NULL, NULL, I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "av8100_conf_w failed\n");
+ return ret;
+ }
+
+ /* Set audio input format */
+ ret = av8100_conf_w(AV8100_COMMAND_AUDIO_INPUT_FORMAT,
+ NULL, NULL, I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_w "
+ "AV8100_COMMAND_AUDIO_INPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ dev->update_flags |= UPDATE_FLAG_VIDEO_MODE;
+ dev->first_update = true;
+
+ return 0;
+}
+
+static u16 rotate_byte_left(u8 c, int nr)
+{
+ return (0xff & (c << nr)) | (0xff & (c >> (8 - nr)));
+}
+
+static u16 map_yv(u8 in)
+{
+ return rotate_byte_left(in, 3) << 4;
+}
+
+static u16 map_u(u8 in)
+{
+ return rotate_byte_left(in, 5) << 4;
+}
+
+static int hdmi_set_pixel_format(
+ struct mcde_display_device *ddev, enum mcde_ovly_pix_fmt format)
+{
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ ddev->pixel_format = format;
+
+ return 0;
+}
+
+static int hdmi_set_port_pixel_format(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ mcde_chnl_stop_flow(ddev->chnl_state);
+ ret = mcde_chnl_set_pixel_format(ddev->chnl_state,
+ ddev->port->pixel_format);
+
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s: Failed to set pixel format = %d\n",
+ __func__, ddev->port->pixel_format);
+ return ret;
+ }
+
+ if (ddev->port->pixel_format == MCDE_PORTPIXFMT_DSI_YCBCR422 &&
+ av8100_ver_get() == 2) {
+ /* The V2 version has an error for unpacking YUV422 */
+ struct mcde_palette_table palette = {
+ .map_col_ch0 = *map_yv,
+ .map_col_ch1 = *map_u,
+ .map_col_ch2 = *map_yv,
+ };
+ ret = mcde_chnl_set_palette(ddev->chnl_state, &palette);
+ } else {
+ ret = mcde_chnl_set_palette(ddev->chnl_state, NULL);
+ }
+
+ return 0;
+}
+
+static int hdmi_apply_config(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ if (!ddev->update_flags)
+ return 0;
+
+ ret = mcde_chnl_apply(ddev->chnl_state);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to apply to channel\n",
+ __func__);
+ return ret;
+ }
+ ddev->update_flags = 0;
+
+ return 0;
+}
+
+static int hdmi_on_first_update(struct mcde_display_device *dev)
+{
+ int ret;
+ union av8100_configuration av8100_config;
+ u8 *infofr_data;
+ int infofr_crc;
+ int cnt;
+
+ dev->first_update = false;
+
+ /*
+ * Prepare HDMI configuration
+ * Avoid simultaneous output of DENC and HDMI/DVI.
+ * Only one of them should be enabled.
+ * Note HDMI/DVI and DENC are always turned off in set_video_mode.
+ */
+ switch (dev->port->hdmi_sdtv_switch) {
+ case SDTV_SWITCH:
+ if (av8100_conf_get(AV8100_COMMAND_DENC, &av8100_config))
+ return -EFAULT;
+ av8100_config.denc_format.enable = 1;
+ if (dev->video_mode.yres == NATIVE_YRES_SDTV) {
+ av8100_config.denc_format.standard_selection =
+ AV8100_PAL_BDGHI;
+ av8100_config.denc_format.cvbs_video_format =
+ AV8100_CVBS_625;
+ } else {
+ av8100_config.denc_format.standard_selection =
+ AV8100_NTSC_M;
+ av8100_config.denc_format.cvbs_video_format =
+ AV8100_CVBS_525;
+ }
+ ret = av8100_conf_prep(AV8100_COMMAND_DENC, &av8100_config);
+ break;
+ case DVI_SWITCH:
+ av8100_config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+ av8100_config.hdmi_format.hdmi_format = AV8100_DVI;
+ av8100_config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0;
+ ret = av8100_conf_prep(AV8100_COMMAND_HDMI, &av8100_config);
+ break;
+ case HDMI_SWITCH:
+ default:
+ av8100_config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+ av8100_config.hdmi_format.hdmi_format = AV8100_HDMI;
+ av8100_config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0;
+ ret = av8100_conf_prep(AV8100_COMMAND_HDMI, &av8100_config);
+ break;
+ }
+
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_prep "
+ "AV8100_COMMAND_HDMI/DENC failed\n", __func__);
+ return ret;
+ }
+
+ /* Enable interrupts */
+ ret = av8100_enable_interrupt();
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_enable_interrupt failed\n",
+ __func__);
+ return ret;
+ }
+
+ if (dev->port->hdmi_sdtv_switch == SDTV_SWITCH)
+ ret = av8100_conf_w(AV8100_COMMAND_DENC, NULL, NULL,
+ I2C_INTERFACE);
+ else
+ ret = av8100_conf_w(AV8100_COMMAND_HDMI, NULL, NULL,
+ I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_w "
+ "AV8100_COMMAND_HDMI/DENC failed\n", __func__);
+ return ret;
+ }
+
+ /* AVI Infoframe only if HDMI */
+ if (dev->port->hdmi_sdtv_switch != HDMI_SWITCH)
+ goto hdmi_on_first_update_end;
+
+ /* Create AVI Infoframe */
+ av8100_config.infoframes_format.type = AVI_INFOFRAME_TYPE;
+ av8100_config.infoframes_format.version = AVI_INFOFRAME_VERSION;
+ av8100_config.infoframes_format.length = AVI_INFOFRAME_DATA_SIZE;
+
+ /* AVI Infoframe data */
+ infofr_data = &av8100_config.infoframes_format.data[0];
+ memset(infofr_data, 0, AVI_INFOFRAME_DATA_SIZE);
+ infofr_data[0] = AVI_INFOFRAME_DB1;
+ infofr_data[1] = AVI_INFOFRAME_DB2;
+ infofr_data[3] = ceanr_get(dev);
+
+ /* Calculate AVI Infoframe checksum */
+ infofr_crc = av8100_config.infoframes_format.type +
+ av8100_config.infoframes_format.version +
+ av8100_config.infoframes_format.length;
+ for (cnt = 0; cnt < AVI_INFOFRAME_DATA_SIZE; cnt++)
+ infofr_crc += infofr_data[cnt];
+ infofr_crc &= 0xFF;
+ av8100_config.infoframes_format.crc = 0x100 - infofr_crc;
+
+ /* Send AVI Infoframe */
+ if (av8100_conf_prep(AV8100_COMMAND_INFOFRAMES,
+ &av8100_config) != 0) {
+ dev_err(&dev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_INFOFRAMES,
+ NULL, NULL, I2C_INTERFACE) != 0) {
+ dev_err(&dev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+hdmi_on_first_update_end:
+ return ret;
+}
+
+static int hdmi_set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+ int ret = 0;
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ if (ddev->platform_enable) {
+ ret = ddev->platform_enable(ddev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * the regulator for analog TV out is only enabled here,
+ * this means that one needs to switch to the OFF state
+ * to be able to switch from HDMI to CVBS.
+ */
+ if (ddev->port->hdmi_sdtv_switch == SDTV_SWITCH) {
+ ret = regulator_enable(driver_data->cvbs_regulator);
+ if (ret)
+ return ret;
+ driver_data->cvbs_regulator_enabled = true;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+
+ hdmi_set_port_pixel_format(ddev);
+ }
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ goto set_power_and_exit;
+ }
+ /* ON -> STANDBY */
+ else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ memset(&(ddev->video_mode), 0, sizeof(struct mcde_video_mode));
+ ret = av8100_powerscan();
+ if (ret)
+ dev_err(&ddev->dev, "%s:av8100_powerscan failed\n"
+ , __func__);
+ if (ddev->platform_disable) {
+ ret = ddev->platform_disable(ddev);
+ if (ret)
+ return ret;
+ }
+ if (driver_data->cvbs_regulator_enabled) {
+ ret = regulator_disable(driver_data->cvbs_regulator);
+ if (ret)
+ return ret;
+ driver_data->cvbs_regulator_enabled = false;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+set_power_and_exit:
+ mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+
+ return ret;
+}
+
+static int __devinit hdmi_probe(struct mcde_display_device *dev)
+{
+ int ret = 0;
+ struct display_driver_data *driver_data;
+ struct mcde_display_hdmi_platform_data *pdata =
+ dev->dev.platform_data;
+
+ if (pdata == NULL) {
+ dev_err(&dev->dev, "%s:Platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dev->port->type != MCDE_PORTTYPE_DSI) {
+ dev_err(&dev->dev, "%s:Invalid port type %d\n",
+ __func__, dev->port->type);
+ return -EINVAL;
+ }
+
+ driver_data = (struct display_driver_data *)
+ kzalloc(sizeof(struct display_driver_data), GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(&dev->dev, "Failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ /* DSI use clock continous mode if AV8100_CHIPVER_1 > 1 */
+ if (av8100_ver_get() > AV8100_CHIPVER_1)
+ dev->port->phy.dsi.clk_cont = true;
+
+ dev->on_first_update = hdmi_on_first_update;
+ dev->try_video_mode = hdmi_try_video_mode;
+ dev->set_video_mode = hdmi_set_video_mode;
+ dev->apply_config = hdmi_apply_config;
+ dev->set_pixel_format = hdmi_set_pixel_format;
+ dev->set_power_mode = hdmi_set_power_mode;
+ dev->ceanr_convert = ceanr_convert;
+
+ /* Create sysfs files */
+ if (device_create_file(&dev->dev, &dev_attr_hdmisdtvswitch))
+ dev_info(&dev->dev,
+ "Unable to create hdmisdtvswitch attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_input_pixel_format))
+ dev_info(&dev->dev,
+ "Unable to create input_pixel_format attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_disponoff))
+ dev_info(&dev->dev,
+ "Unable to create disponoff attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_vesacea))
+ dev_info(&dev->dev,
+ "Unable to create ceavesa attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_timing))
+ dev_info(&dev->dev,
+ "Unable to create timing attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_stayalive))
+ dev_info(&dev->dev,
+ "Unable to create stayalive attr\n");
+
+ if (pdata->cvbs_regulator_id) {
+ driver_data->cvbs_regulator = regulator_get(&dev->dev,
+ pdata->cvbs_regulator_id);
+ if (IS_ERR(driver_data->cvbs_regulator)) {
+ ret = PTR_ERR(driver_data->cvbs_regulator);
+ dev_warn(&dev->dev, "%s:Failed to get regulator %s\n",
+ __func__, pdata->cvbs_regulator_id);
+ driver_data->cvbs_regulator = NULL;
+ goto av_regulator_get_failed;
+ }
+ }
+
+ dev_set_drvdata(&dev->dev, driver_data);
+ dev_info(&dev->dev, "HDMI display probed\n");
+
+ return 0;
+
+av_regulator_get_failed:
+ kfree(driver_data);
+ return ret;
+}
+
+static int __devexit hdmi_remove(struct mcde_display_device *dev)
+{
+ struct display_driver_data *driver_data = dev_get_drvdata(&dev->dev);
+ struct mcde_display_hdmi_platform_data *pdata =
+ dev->dev.platform_data;
+
+ /* Remove sysfs files */
+ device_remove_file(&dev->dev, &dev_attr_input_pixel_format);
+ device_remove_file(&dev->dev, &dev_attr_hdmisdtvswitch);
+ device_remove_file(&dev->dev, &dev_attr_disponoff);
+ device_remove_file(&dev->dev, &dev_attr_vesacea);
+ device_remove_file(&dev->dev, &dev_attr_timing);
+ device_remove_file(&dev->dev, &dev_attr_stayalive);
+
+ dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF);
+
+ if (driver_data->cvbs_regulator)
+ regulator_put(driver_data->cvbs_regulator);
+ kfree(driver_data);
+ if (pdata->hdmi_platform_enable) {
+ if (pdata->regulator)
+ regulator_put(pdata->regulator);
+ if (pdata->reset_gpio) {
+ gpio_direction_input(pdata->reset_gpio);
+ gpio_free(pdata->reset_gpio);
+ }
+ }
+
+ return 0;
+}
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+static int hdmi_resume(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ if (ddev->chnl_state == NULL)
+ return 0;
+
+ /* set_power_mode will handle call platform_enable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to resume display\n"
+ , __func__);
+
+ return ret;
+}
+
+static int hdmi_suspend(struct mcde_display_device *ddev, pm_message_t state)
+{
+ int ret;
+
+ if (ddev->chnl_state == NULL)
+ return 0;
+
+ /* set_power_mode will handle call platform_disable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to suspend display\n"
+ , __func__);
+
+ return ret;
+}
+#endif
+
+static struct mcde_display_driver hdmi_driver = {
+ .probe = hdmi_probe,
+ .remove = hdmi_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ .suspend = hdmi_suspend,
+ .resume = hdmi_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .name = "av8100_hdmi",
+ },
+};
+
+/* Module init */
+static int __init mcde_display_hdmi_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&hdmi_driver);
+
+}
+late_initcall(mcde_display_hdmi_init);
+
+static void __exit mcde_display_hdmi_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&hdmi_driver);
+}
+module_exit(mcde_display_hdmi_exit);
+
+MODULE_AUTHOR("Per Persson <per.xb.persson@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson hdmi display driver");
diff --git a/drivers/video/mcde/display-fictive.c b/drivers/video/mcde/display-fictive.c
new file mode 100644
index 00000000000..c7ea1429b9f
--- /dev/null
+++ b/drivers/video/mcde/display-fictive.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * ST-Ericsson MCDE fictive display driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+
+#include <video/mcde_display.h>
+
+static int __devinit fictive_probe(struct mcde_display_device *dev)
+{
+ dev->platform_enable = NULL,
+ dev->platform_disable = NULL,
+ dev->set_power_mode = NULL;
+
+ dev_info(&dev->dev, "Fictive display probed\n");
+
+ return 0;
+}
+
+static int __devexit fictive_remove(struct mcde_display_device *dev)
+{
+ return 0;
+}
+
+static struct mcde_display_driver fictive_driver = {
+ .probe = fictive_probe,
+ .remove = fictive_remove,
+ .driver = {
+ .name = "mcde_disp_fictive",
+ },
+};
+
+/* Module init */
+static int __init mcde_display_fictive_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&fictive_driver);
+}
+module_init(mcde_display_fictive_init);
+
+static void __exit mcde_display_fictive_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&fictive_driver);
+}
+module_exit(mcde_display_fictive_exit);
+
+MODULE_AUTHOR("Per Persson <per.xb.persson@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE fictive display driver");
diff --git a/drivers/video/mcde/display-generic_dsi.c b/drivers/video/mcde/display-generic_dsi.c
new file mode 100644
index 00000000000..6c63f4cd750
--- /dev/null
+++ b/drivers/video/mcde/display-generic_dsi.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE generic DCS display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_display-generic_dsi.h>
+
+static int generic_platform_enable(struct mcde_display_device *dev)
+{
+ struct mcde_display_generic_platform_data *pdata =
+ dev->dev.platform_data;
+
+ dev_dbg(&dev->dev, "%s: Reset & power on generic display\n", __func__);
+
+ if (pdata->regulator) {
+ if (regulator_enable(pdata->regulator) < 0) {
+ dev_err(&dev->dev, "%s:Failed to enable regulator\n"
+ , __func__);
+ return -EINVAL;
+ }
+ }
+ if (pdata->reset_gpio)
+ gpio_set_value_cansleep(pdata->reset_gpio, pdata->reset_high);
+ mdelay(pdata->reset_delay);
+ if (pdata->reset_gpio)
+ gpio_set_value_cansleep(pdata->reset_gpio, !pdata->reset_high);
+
+ return 0;
+}
+
+static int generic_platform_disable(struct mcde_display_device *dev)
+{
+ struct mcde_display_generic_platform_data *pdata =
+ dev->dev.platform_data;
+
+ dev_dbg(&dev->dev, "%s:Reset & power off generic display\n", __func__);
+
+ if (pdata->regulator) {
+ if (regulator_disable(pdata->regulator) < 0) {
+ dev_err(&dev->dev, "%s:Failed to disable regulator\n"
+ , __func__);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int generic_set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+ struct mcde_display_generic_platform_data *pdata =
+ ddev->dev.platform_data;
+
+ dev_dbg(&ddev->dev, "%s:Set Power mode\n", __func__);
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+
+ if (ddev->platform_enable) {
+ ret = ddev->platform_enable(ddev);
+ if (ret)
+ return ret;
+ }
+
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_EXIT_SLEEP_MODE, NULL, 0);
+ if (ret)
+ return ret;
+
+ msleep(pdata->sleep_out_delay);
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_DISPLAY_ON, NULL, 0);
+ if (ret)
+ return ret;
+
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ goto set_power_and_exit;
+ }
+ /* ON -> STANDBY */
+ else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_DISPLAY_OFF, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_ENTER_SLEEP_MODE, NULL, 0);
+ if (ret)
+ return ret;
+
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* SLEEP -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ if (ddev->platform_disable) {
+ ret = ddev->platform_disable(ddev);
+ if (ret)
+ return ret;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+set_power_and_exit:
+ mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+
+ return ret;
+}
+
+static int __devinit generic_probe(struct mcde_display_device *dev)
+{
+ int ret = 0;
+ struct mcde_display_generic_platform_data *pdata =
+ dev->dev.platform_data;
+
+ if (pdata == NULL) {
+ dev_err(&dev->dev, "%s:Platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dev->port->type != MCDE_PORTTYPE_DSI) {
+ dev_err(&dev->dev,
+ "%s:Invalid port type %d\n",
+ __func__, dev->port->type);
+ return -EINVAL;
+ }
+
+ if (!dev->platform_enable && !dev->platform_disable) {
+ pdata->generic_platform_enable = true;
+ if (pdata->reset_gpio) {
+ ret = gpio_request(pdata->reset_gpio, NULL);
+ if (ret) {
+ dev_warn(&dev->dev,
+ "%s:Failed to request gpio %d\n",
+ __func__, pdata->reset_gpio);
+ goto gpio_request_failed;
+ }
+ gpio_direction_output(pdata->reset_gpio,
+ !pdata->reset_high);
+ }
+ if (pdata->regulator_id) {
+ pdata->regulator = regulator_get(&dev->dev,
+ pdata->regulator_id);
+ if (IS_ERR(pdata->regulator)) {
+ ret = PTR_ERR(pdata->regulator);
+ dev_warn(&dev->dev,
+ "%s:Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_id);
+ pdata->regulator = NULL;
+ goto regulator_get_failed;
+ }
+
+ if (regulator_set_voltage(pdata->regulator,
+ pdata->min_supply_voltage,
+ pdata->max_supply_voltage) < 0) {
+ int volt;
+
+ dev_warn(&dev->dev,
+ "%s:Failed to set voltage '%s'\n",
+ __func__, pdata->regulator_id);
+ volt = regulator_get_voltage(pdata->regulator);
+ dev_warn(&dev->dev,
+ "Voltage:%d\n", volt);
+ }
+
+ /*
+ * When u-boot has display a startup screen.
+ * U-boot has turned on display power however the
+ * regulator framework does not know about that
+ * This is the case here, the display driver has to
+ * enable the regulator for the display.
+ */
+ if (dev->power_mode == MCDE_DISPLAY_PM_STANDBY) {
+ ret = regulator_enable(pdata->regulator);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "%s:Failed to enable regulator\n"
+ , __func__);
+ goto regulator_enable_failed;
+ }
+ }
+ }
+ }
+
+ dev->platform_enable = generic_platform_enable,
+ dev->platform_disable = generic_platform_disable,
+ dev->set_power_mode = generic_set_power_mode;
+
+ dev_info(&dev->dev, "Generic display probed\n");
+
+ goto out;
+regulator_enable_failed:
+regulator_get_failed:
+ if (pdata->generic_platform_enable && pdata->reset_gpio)
+ gpio_free(pdata->reset_gpio);
+gpio_request_failed:
+out:
+ return ret;
+}
+
+static int __devexit generic_remove(struct mcde_display_device *dev)
+{
+ struct mcde_display_generic_platform_data *pdata =
+ dev->dev.platform_data;
+
+ dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF);
+
+ if (!pdata->generic_platform_enable)
+ return 0;
+
+ if (pdata->regulator)
+ regulator_put(pdata->regulator);
+ if (pdata->reset_gpio) {
+ gpio_direction_input(pdata->reset_gpio);
+ gpio_free(pdata->reset_gpio);
+ }
+
+ return 0;
+}
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+static int generic_resume(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ /* set_power_mode will handle call platform_enable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to resume display\n"
+ , __func__);
+ ddev->set_synchronized_update(ddev,
+ ddev->get_synchronized_update(ddev));
+ return ret;
+}
+
+static int generic_suspend(struct mcde_display_device *ddev, pm_message_t state)
+{
+ int ret;
+
+ /* set_power_mode will handle call platform_disable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to suspend display\n"
+ , __func__);
+ return ret;
+}
+#endif
+
+static struct mcde_display_driver generic_driver = {
+ .probe = generic_probe,
+ .remove = generic_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ .suspend = generic_suspend,
+ .resume = generic_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .name = "mcde_disp_generic",
+ },
+};
+
+/* Module init */
+static int __init mcde_display_generic_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&generic_driver);
+}
+module_init(mcde_display_generic_init);
+
+static void __exit mcde_display_generic_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&generic_driver);
+}
+module_exit(mcde_display_generic_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE generic DCS display driver");
diff --git a/drivers/video/mcde/display-samsung_s6d16d0.c b/drivers/video/mcde/display-samsung_s6d16d0.c
new file mode 100644
index 00000000000..063c8996763
--- /dev/null
+++ b/drivers/video/mcde/display-samsung_s6d16d0.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE Samsung S6D16D0 display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+
+#include <video/mcde_display.h>
+
+#define RESET_DURATION_US 10
+#define RESET_DELAY_MS 120
+#define SLEEP_OUT_DELAY_MS 120
+#define IO_REGU "vdd1"
+#define IO_REGU_MIN 1650000
+#define IO_REGU_MAX 3300000
+
+struct device_info {
+ int reset_gpio;
+ struct mcde_port port;
+ struct regulator *regulator;
+};
+
+static inline struct device_info *get_drvdata(struct mcde_display_device *ddev)
+{
+ return (struct device_info *)dev_get_drvdata(&ddev->dev);
+}
+
+static int power_on(struct mcde_display_device *ddev)
+{
+ struct device_info *di = get_drvdata(ddev);
+
+ dev_dbg(&ddev->dev, "Reset & power on s6d16d0 display\n");
+
+ regulator_enable(di->regulator);
+ gpio_set_value_cansleep(di->reset_gpio, 0);
+ udelay(RESET_DURATION_US);
+ gpio_set_value_cansleep(di->reset_gpio, 1);
+ msleep(RESET_DELAY_MS);
+
+ return 0;
+}
+
+static int power_off(struct mcde_display_device *ddev)
+{
+ struct device_info *di = get_drvdata(ddev);
+
+ dev_dbg(&ddev->dev, "Power off s6d16d0 display\n");
+
+ regulator_disable(di->regulator);
+
+ return 0;
+}
+
+static int display_on(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "Display on s6d16d0\n");
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_EXIT_SLEEP_MODE,
+ NULL, 0);
+ if (ret)
+ return ret;
+ msleep(SLEEP_OUT_DELAY_MS);
+ return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_ON,
+ NULL, 0);
+}
+
+static int display_off(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "Display off s6d16d0\n");
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_OFF,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_ENTER_SLEEP_MODE,
+ NULL, 0);
+}
+
+static int set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+
+ dev_dbg(&ddev->dev, "Set power mode %d\n", power_mode);
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ ret = power_on(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+
+ ret = display_on(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ }
+ /* ON -> STANDBY */
+ else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+
+ ret = display_off(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ ret = power_off(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+ return mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+}
+
+static int __devinit samsung_s6d16d0_probe(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ struct mcde_display_dsi_platform_data *pdata = ddev->dev.platform_data;
+ struct device_info *di;
+
+ if (pdata == NULL || !pdata->reset_gpio) {
+ dev_err(&ddev->dev, "Invalid platform data\n");
+ return -EINVAL;
+ }
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+ di->reset_gpio = pdata->reset_gpio;
+ di->port.link = pdata->link;
+ di->port.type = MCDE_PORTTYPE_DSI;
+ di->port.mode = MCDE_PORTMODE_CMD;
+ di->port.pixel_format = MCDE_PORTPIXFMT_DSI_24BPP;
+ di->port.sync_src = MCDE_SYNCSRC_BTA;
+ di->port.phy.dsi.num_data_lanes = 2;
+ /* TODO: Move UI to mcde_hw.c when clk_get_rate(dsi) is done */
+ di->port.phy.dsi.ui = 9;
+
+ ret = gpio_request(di->reset_gpio, NULL);
+ if (ret)
+ goto gpio_request_failed;
+ gpio_direction_output(di->reset_gpio, 1);
+ di->regulator = regulator_get(&ddev->dev, IO_REGU);
+ if (IS_ERR(di->regulator)) {
+ di->regulator = NULL;
+ goto regulator_get_failed;
+ }
+ ret = regulator_set_voltage(di->regulator, IO_REGU_MIN, IO_REGU_MAX);
+ if (WARN_ON(ret))
+ goto regulator_voltage_failed;
+
+ /* Get in sync with u-boot */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY)
+ (void)regulator_enable(di->regulator);
+
+ ddev->set_power_mode = set_power_mode;
+ ddev->port = &di->port;
+ ddev->native_x_res = 864;
+ ddev->native_y_res = 480;
+ dev_set_drvdata(&ddev->dev, di);
+
+ dev_info(&ddev->dev, "Samsung s6d16d0 display probed\n");
+
+ return 0;
+regulator_voltage_failed:
+ regulator_put(di->regulator);
+regulator_get_failed:
+ gpio_free(di->reset_gpio);
+gpio_request_failed:
+ kfree(di);
+ return ret;
+}
+
+static struct mcde_display_driver samsung_s6d16d0_driver = {
+ .probe = samsung_s6d16d0_probe,
+ .driver = {
+ .name = "samsung_s6d16d0",
+ },
+};
+
+static int __init samsung_s6d16d0_init(void)
+{
+ return mcde_display_driver_register(&samsung_s6d16d0_driver);
+}
+module_init(samsung_s6d16d0_init);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE Samsung S6D16D0 display driver");
diff --git a/drivers/video/mcde/display-sony_acx424akp_dsi.c b/drivers/video/mcde/display-sony_acx424akp_dsi.c
new file mode 100644
index 00000000000..0b5b69bf23e
--- /dev/null
+++ b/drivers/video/mcde/display-sony_acx424akp_dsi.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE Sony acx424akp DCS display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <linux/regulator/consumer.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_display-sony_acx424akp_dsi.h>
+
+#define RESET_DELAY_MS 11
+#define RESET_LOW_DELAY_US 20
+#define SLEEP_OUT_DELAY_MS 140
+#define IO_REGU "vddi"
+#define IO_REGU_MIN 1600000
+#define IO_REGU_MAX 3300000
+
+struct device_info {
+ int reset_gpio;
+ struct mcde_port port;
+ struct regulator *regulator;
+};
+
+static inline struct device_info *get_drvdata(struct mcde_display_device *ddev)
+{
+ return (struct device_info *)dev_get_drvdata(&ddev->dev);
+}
+
+static int display_read_deviceid(struct mcde_display_device *dev, u16 *id)
+{
+ struct mcde_chnl_state *chnl;
+
+ u8 id1, id2, id3;
+ int len = 1;
+ int ret = 0;
+ int readret = 0;
+
+ dev_dbg(&dev->dev, "%s: Read device id of the display\n", __func__);
+
+ /* Acquire MCDE resources */
+ chnl = mcde_chnl_get(dev->chnl_id, dev->fifo, dev->port);
+ if (IS_ERR(chnl)) {
+ ret = PTR_ERR(chnl);
+ dev_warn(&dev->dev, "Failed to acquire MCDE channel\n");
+ goto out;
+ }
+
+ /* plugnplay: use registers DA, DBh and DCh to detect display */
+ readret = mcde_dsi_dcs_read(chnl, 0xDA, (u32 *)&id1, &len);
+ if (!readret)
+ readret = mcde_dsi_dcs_read(chnl, 0xDB, (u32 *)&id2, &len);
+ if (!readret)
+ readret = mcde_dsi_dcs_read(chnl, 0xDC, (u32 *)&id3, &len);
+
+ if (readret) {
+ dev_info(&dev->dev,
+ "mcde_dsi_dcs_read failed to read display ID\n");
+ goto read_fail;
+ }
+
+ *id = (id3 << 8) | id2;
+read_fail:
+ /* close MCDE channel */
+ mcde_chnl_put(chnl);
+out:
+ return 0;
+}
+
+static int power_on(struct mcde_display_device *dev)
+{
+ struct device_info *di = get_drvdata(dev);
+
+ dev_dbg(&dev->dev, "%s: Reset & power on sony display\n", __func__);
+
+ regulator_enable(di->regulator);
+ gpio_set_value_cansleep(di->reset_gpio, 0);
+ udelay(RESET_LOW_DELAY_US);
+ gpio_set_value_cansleep(di->reset_gpio, 1);
+ msleep(RESET_DELAY_MS);
+
+ return 0;
+}
+
+static int power_off(struct mcde_display_device *dev)
+{
+ struct device_info *di = get_drvdata(dev);
+
+ dev_dbg(&dev->dev, "%s:Reset & power off sony display\n", __func__);
+
+ regulator_disable(di->regulator);
+
+ return 0;
+}
+
+static int display_on(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "Display on sony display\n");
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_EXIT_SLEEP_MODE,
+ NULL, 0);
+ if (ret)
+ return ret;
+ msleep(SLEEP_OUT_DELAY_MS);
+ return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_ON,
+ NULL, 0);
+}
+
+static int display_off(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "Display off sony display\n");
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_OFF,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_ENTER_SLEEP_MODE,
+ NULL, 0);
+}
+
+static int sony_acx424akp_set_scan_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+ u8 param[MCDE_MAX_DSI_DIRECT_CMD_WRITE];
+
+ dev_dbg(&ddev->dev, "%s:Set Power mode\n", __func__);
+
+ /* 180 rotation for SONY ACX424AKP display */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY) {
+ param[0] = 0xAA;
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, 0xf3, param, 1);
+ if (ret)
+ return ret;
+
+ param[0] = 0x00;
+ param[1] = 0x00;
+ ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3);
+ if (ret)
+ return ret;
+
+ param[0] = 0xC9;
+ param[1] = 0x01;
+ ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3);
+ if (ret)
+ return ret;
+
+ param[0] = 0xA2;
+ param[1] = 0x00;
+ ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3);
+ if (ret)
+ return ret;
+
+ param[0] = 0xFF;
+ param[1] = 0xAA;
+ ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static int sony_acx424akp_set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+
+ dev_dbg(&ddev->dev, "%s:Set Power mode\n", __func__);
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ ret = power_on(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+
+ ret = display_on(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ }
+ /* ON -> STANDBY */
+ else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+
+ ret = display_off(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ ret = power_off(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+ mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+ return sony_acx424akp_set_scan_mode(ddev, power_mode);
+}
+
+static int __devinit sony_acx424akp_probe(struct mcde_display_device *dev)
+{
+ int ret = 0;
+ u16 id = 0;
+ struct device_info *di;
+ struct mcde_port *port;
+ struct mcde_display_sony_acx424akp_platform_data *pdata =
+ dev->dev.platform_data;
+
+ if (pdata == NULL || !pdata->reset_gpio) {
+ dev_err(&dev->dev, "Invalid platform data\n");
+ return -EINVAL;
+ }
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ port = dev->port;
+ di->reset_gpio = pdata->reset_gpio;
+ di->port.type = MCDE_PORTTYPE_DSI;
+ di->port.mode = MCDE_PORTMODE_CMD;
+ di->port.pixel_format = MCDE_PORTPIXFMT_DSI_24BPP;
+ di->port.sync_src = MCDE_SYNCSRC_BTA;
+ di->port.phy.dsi.num_data_lanes = 2;
+ di->port.link = port->link;
+ /* TODO: Move UI to mcde_hw.c when clk_get_rate(dsi) is done */
+ di->port.phy.dsi.ui = 9;
+
+ ret = gpio_request(di->reset_gpio, NULL);
+ if (WARN_ON(ret))
+ goto gpio_request_failed;
+
+ gpio_direction_output(di->reset_gpio, 1);
+ di->regulator = regulator_get(&dev->dev, IO_REGU);
+ if (IS_ERR(di->regulator)) {
+ ret = PTR_ERR(di->regulator);
+ di->regulator = NULL;
+ goto regulator_get_failed;
+ }
+ ret = regulator_set_voltage(di->regulator, IO_REGU_MIN, IO_REGU_MAX);
+ if (WARN_ON(ret))
+ goto regulator_voltage_failed;
+
+ dev->set_power_mode = sony_acx424akp_set_power_mode;
+
+ dev->port = &di->port;
+ dev->native_x_res = 480;
+ dev->native_y_res = 854;
+ dev_set_drvdata(&dev->dev, di);
+
+ /*
+ * When u-boot has display a startup screen.
+ * U-boot has turned on display power however the
+ * regulator framework does not know about that
+ * This is the case here, the display driver has to
+ * enable the regulator for the display.
+ */
+ if (dev->power_mode == MCDE_DISPLAY_PM_STANDBY) {
+ (void) regulator_enable(di->regulator);
+ } else if (dev->power_mode == MCDE_DISPLAY_PM_OFF) {
+ power_on(dev);
+ dev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ ret = display_read_deviceid(dev, &id);
+ if (ret)
+ goto read_id_failed;
+
+ switch (id) {
+ case DISPLAY_SONY_ACX424AKP:
+ pdata->disp_panel = DISPLAY_SONY_ACX424AKP;
+ dev_info(&dev->dev,
+ "Sony ACX424AKP display (ID 0x%.4X) probed\n", id);
+ break;
+ default:
+ pdata->disp_panel = DISPLAY_NONE;
+ dev_info(&dev->dev,
+ "Display not recognized (ID 0x%.4X) probed\n", id);
+ goto read_id_failed;
+ }
+
+ return 0;
+
+read_id_failed:
+regulator_voltage_failed:
+ regulator_put(di->regulator);
+regulator_get_failed:
+ gpio_free(di->reset_gpio);
+gpio_request_failed:
+ kfree(di);
+ return ret;
+}
+
+static int __devexit sony_acx424akp_remove(struct mcde_display_device *dev)
+{
+ struct device_info *di = get_drvdata(dev);
+
+ dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF);
+
+ regulator_put(di->regulator);
+ gpio_direction_input(di->reset_gpio);
+ gpio_free(di->reset_gpio);
+
+ kfree(di);
+
+ return 0;
+}
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+static int sony_acx424akp_resume(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ /* set_power_mode will handle call platform_enable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to resume display\n"
+ , __func__);
+ ddev->set_synchronized_update(ddev,
+ ddev->get_synchronized_update(ddev));
+ return ret;
+}
+
+static int sony_acx424akp_suspend(struct mcde_display_device *ddev, \
+ pm_message_t state)
+{
+ int ret;
+
+ /* set_power_mode will handle call platform_disable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to suspend display\n"
+ , __func__);
+ return ret;
+}
+#endif
+
+static struct mcde_display_driver sony_acx424akp_driver = {
+ .probe = sony_acx424akp_probe,
+ .remove = sony_acx424akp_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ .suspend = sony_acx424akp_suspend,
+ .resume = sony_acx424akp_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .name = "mcde_disp_sony_acx424akp",
+ },
+};
+
+/* Module init */
+static int __init mcde_display_sony_acx424akp_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&sony_acx424akp_driver);
+}
+module_init(mcde_display_sony_acx424akp_init);
+
+static void __exit mcde_display_sony_acx424akp_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&sony_acx424akp_driver);
+}
+module_exit(mcde_display_sony_acx424akp_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE Sony ACX424AKP DCS display driver");
diff --git a/drivers/video/mcde/display-vuib500-dpi.c b/drivers/video/mcde/display-vuib500-dpi.c
new file mode 100644
index 00000000000..2bd5b990608
--- /dev/null
+++ b/drivers/video/mcde/display-vuib500-dpi.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE DPI display driver
+ * The VUIB500 is an user interface board the can be attached to an HREF. It
+ * supports the DPI pixel interface and converts this to an analog VGA signal,
+ * which can be connected to a monitor using a DSUB connector. The VUIB board
+ * uses an external power supply of 5V.
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_display-vuib500-dpi.h>
+
+#define DPI_DISP_TRACE dev_dbg(&ddev->dev, "%s\n", __func__)
+
+static int try_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+static int set_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+
+static int __devinit dpi_display_probe(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ struct mcde_display_dpi_platform_data *pdata = ddev->dev.platform_data;
+ DPI_DISP_TRACE;
+
+ if (pdata == NULL) {
+ dev_err(&ddev->dev, "%s:Platform data missing\n", __func__);
+ ret = -EINVAL;
+ goto no_pdata;
+ }
+
+ if (ddev->port->type != MCDE_PORTTYPE_DPI) {
+ dev_err(&ddev->dev,
+ "%s:Invalid port type %d\n",
+ __func__, ddev->port->type);
+ ret = -EINVAL;
+ goto invalid_port_type;
+ }
+
+ ddev->try_video_mode = try_video_mode;
+ ddev->set_video_mode = set_video_mode;
+ dev_info(&ddev->dev, "DPI display probed\n");
+
+ goto out;
+invalid_port_type:
+no_pdata:
+out:
+ return ret;
+}
+
+static int __devexit dpi_display_remove(struct mcde_display_device *ddev)
+{
+ DPI_DISP_TRACE;
+
+ ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+
+ return 0;
+}
+
+static int dpi_display_resume(struct mcde_display_device *ddev)
+{
+ int ret;
+ DPI_DISP_TRACE;
+
+ /* set_power_mode will handle call platform_enable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to resume display\n"
+ , __func__);
+ return ret;
+}
+
+static int dpi_display_suspend(struct mcde_display_device *ddev,
+ pm_message_t state)
+{
+ int ret;
+ DPI_DISP_TRACE;
+
+ /* set_power_mode will handle call platform_disable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to suspend display\n"
+ , __func__);
+ return ret;
+}
+
+static void print_vmode(struct mcde_video_mode *vmode)
+{
+ pr_debug("resolution: %dx%d\n", vmode->xres, vmode->yres);
+ pr_debug(" pixclock: %d\n", vmode->pixclock);
+ pr_debug(" hbp: %d\n", vmode->hbp);
+ pr_debug(" hfp: %d\n", vmode->hfp);
+ pr_debug(" hsw: %d\n", vmode->hsw);
+ pr_debug(" vbp: %d\n", vmode->vbp);
+ pr_debug(" vfp: %d\n", vmode->vfp);
+ pr_debug(" vsw: %d\n", vmode->vsw);
+ pr_debug("interlaced: %s\n", vmode->interlaced ? "true" : "false");
+}
+
+/* Taken from the programmed value of the LCD clock in PRCMU */
+#define PIX_CLK_FREQ 25000000
+#define VMODE_XRES 640
+#define VMODE_YRES 480
+
+static int try_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ int res = -EINVAL;
+ DPI_DISP_TRACE;
+
+ if (ddev == NULL || video_mode == NULL) {
+ dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n",
+ __func__);
+ return res;
+ }
+
+ if (video_mode->xres == VMODE_XRES && video_mode->yres == VMODE_YRES) {
+ video_mode->hbp = 40;
+ video_mode->hfp = 8;
+ video_mode->hsw = 96;
+ video_mode->vbp = 25;
+ video_mode->vfp = 2;
+ video_mode->vsw = 2;
+ /*
+ * The pixclock setting is not used within MCDE. The clock is
+ * setup elsewhere. But the pixclock value is visible in user
+ * space.
+ */
+ video_mode->pixclock = (int) (1e+12 * (1.0 / PIX_CLK_FREQ));
+ res = 0;
+ } /* TODO: add more supported resolutions here */
+ video_mode->interlaced = false;
+
+ if (res == 0)
+ print_vmode(video_mode);
+ else
+ dev_warn(&ddev->dev,
+ "%s:Failed to find video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+
+ return res;
+
+}
+
+static int set_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ int res;
+ DPI_DISP_TRACE;
+
+ if (ddev == NULL || video_mode == NULL) {
+ dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (video_mode->xres != VMODE_XRES || video_mode->yres != VMODE_YRES) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+ ddev->video_mode = *video_mode;
+ print_vmode(video_mode);
+
+ res = mcde_chnl_set_video_mode(ddev->chnl_state, &ddev->video_mode);
+ if (res < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode on channel\n",
+ __func__);
+
+ }
+ /* notify mcde display driver about updated video mode */
+ ddev->update_flags |= UPDATE_FLAG_VIDEO_MODE;
+ return res;
+}
+
+static struct mcde_display_driver dpi_display_driver = {
+ .probe = dpi_display_probe,
+ .remove = dpi_display_remove,
+ .suspend = dpi_display_suspend,
+ .resume = dpi_display_resume,
+ .driver = {
+ .name = "mcde_display_dpi",
+ },
+};
+
+/* Module init */
+static int __init mcde_dpi_display_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&dpi_display_driver);
+}
+module_init(mcde_dpi_display_init);
+
+static void __exit mcde_dpi_display_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&dpi_display_driver);
+}
+module_exit(mcde_dpi_display_exit);
+
+MODULE_AUTHOR("Marcel Tunnissen <marcel.tuennissen@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE DPI display driver fro VUIB500 display");
diff --git a/drivers/video/mcde/dsilink_regs.h b/drivers/video/mcde/dsilink_regs.h
new file mode 100644
index 00000000000..29fa75a1752
--- /dev/null
+++ b/drivers/video/mcde/dsilink_regs.h
@@ -0,0 +1,2037 @@
+
+#define DSI_VAL2REG(__reg, __fld, __val) \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK)
+#define DSI_REG2VAL(__reg, __fld, __val) \
+ (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT)
+
+#define DSI_MCTL_INTEGRATION_MODE 0x00000000
+#define DSI_MCTL_INTEGRATION_MODE_INT_MODE_EN_SHIFT 0
+#define DSI_MCTL_INTEGRATION_MODE_INT_MODE_EN_MASK 0x00000001
+#define DSI_MCTL_INTEGRATION_MODE_INT_MODE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_INTEGRATION_MODE, INT_MODE_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL 0x00000004
+#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN_SHIFT 0
+#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN_MASK 0x00000001
+#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, LINK_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_SHIFT 1
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_MASK 0x00000002
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_CMD 0
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_VID 1
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF1_MODE, \
+ DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_##__x)
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF1_MODE, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_VID_EN_SHIFT 2
+#define DSI_MCTL_MAIN_DATA_CTL_VID_EN_MASK 0x00000004
+#define DSI_MCTL_MAIN_DATA_CTL_VID_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, VID_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL_SHIFT 3
+#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL_MASK 0x00000008
+#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, TVG_SEL, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL_SHIFT 4
+#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL_MASK 0x00000010
+#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, TBG_SEL, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN_SHIFT 5
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN_MASK 0x00000020
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF1_TE_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN_SHIFT 6
+#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN_MASK 0x00000040
+#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF2_TE_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN_SHIFT 7
+#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN_MASK 0x00000080
+#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_READ_EN_SHIFT 8
+#define DSI_MCTL_MAIN_DATA_CTL_READ_EN_MASK 0x00000100
+#define DSI_MCTL_MAIN_DATA_CTL_READ_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, READ_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN_SHIFT 9
+#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN_MASK 0x00000200
+#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, BTA_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC_SHIFT 10
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC_MASK 0x00000400
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DISP_GEN_ECC, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM_SHIFT 11
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM_MASK 0x00000800
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DISP_GEN_CHECKSUM, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN_SHIFT 12
+#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN_MASK 0x00001000
+#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, HOST_EOT_GEN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN_SHIFT 13
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN_MASK 0x00002000
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DISP_EOT_GEN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN_SHIFT 14
+#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN_MASK 0x00004000
+#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DLX_REMAP_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN_SHIFT 15
+#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN_MASK 0x00008000
+#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, TE_POLLING_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL 0x00000008
+#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN_SHIFT 0
+#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN_MASK 0x00000001
+#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, LANE2_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE_SHIFT 1
+#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE_MASK 0x00000002
+#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, FORCE_STOP_MODE, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS_SHIFT 2
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS_MASK 0x00000004
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, CLK_CONTINUOUS, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN_SHIFT 3
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN_MASK 0x00000008
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, CLK_ULPM_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN_SHIFT 4
+#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN_MASK 0x00000010
+#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, DAT1_ULPM_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN_SHIFT 5
+#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN_MASK 0x00000020
+#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, DAT2_ULPM_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME_SHIFT 6
+#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME_MASK 0x000003C0
+#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, WAIT_BURST_TIME, __x)
+#define DSI_MCTL_PLL_CTL 0x0000000C
+#define DSI_MCTL_PLL_CTL_PLL_MULT_SHIFT 0
+#define DSI_MCTL_PLL_CTL_PLL_MULT_MASK 0x000000FF
+#define DSI_MCTL_PLL_CTL_PLL_MULT(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_MULT, __x)
+#define DSI_MCTL_PLL_CTL_PLL_OUT_DIV_SHIFT 8
+#define DSI_MCTL_PLL_CTL_PLL_OUT_DIV_MASK 0x00003F00
+#define DSI_MCTL_PLL_CTL_PLL_OUT_DIV(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_OUT_DIV, __x)
+#define DSI_MCTL_PLL_CTL_PLL_IN_DIV_SHIFT 14
+#define DSI_MCTL_PLL_CTL_PLL_IN_DIV_MASK 0x0001C000
+#define DSI_MCTL_PLL_CTL_PLL_IN_DIV(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_IN_DIV, __x)
+#define DSI_MCTL_PLL_CTL_PLL_SEL_DIV2_SHIFT 17
+#define DSI_MCTL_PLL_CTL_PLL_SEL_DIV2_MASK 0x00020000
+#define DSI_MCTL_PLL_CTL_PLL_SEL_DIV2(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_SEL_DIV2, __x)
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_SHIFT 18
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_MASK 0x00040000
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_INT_PLL 0
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_SYS_PLL 1
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_OUT_SEL, \
+ DSI_MCTL_PLL_CTL_PLL_OUT_SEL_##__x)
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_OUT_SEL, __x)
+#define DSI_MCTL_PLL_CTL_PLL_MASTER_SHIFT 31
+#define DSI_MCTL_PLL_CTL_PLL_MASTER_MASK 0x80000000
+#define DSI_MCTL_PLL_CTL_PLL_MASTER(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_MASTER, __x)
+#define DSI_MCTL_LANE_STS 0x00000010
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_SHIFT 0
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_MASK 0x00000003
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_START 0
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_IDLE 1
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_HS 2
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_ULPM 3
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, CLKLANE_STATE, \
+ DSI_MCTL_LANE_STS_CLKLANE_STATE_##__x)
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, CLKLANE_STATE, __x)
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_SHIFT 2
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_MASK 0x0000001C
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_START 0
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_IDLE 1
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_WRITE 2
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_ULPM 3
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_READ 4
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE1_STATE, \
+ DSI_MCTL_LANE_STS_DATLANE1_STATE_##__x)
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE1_STATE, __x)
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_SHIFT 5
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_MASK 0x00000060
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_START 0
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_IDLE 1
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_WRITE 2
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_ULPM 3
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE2_STATE, \
+ DSI_MCTL_LANE_STS_DATLANE2_STATE_##__x)
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE2_STATE, __x)
+#define DSI_MCTL_DPHY_TIMEOUT 0x00000014
+#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV_SHIFT 0
+#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV_MASK 0x0000000F
+#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_TIMEOUT, CLK_DIV, __x)
+#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL_SHIFT 4
+#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL_MASK 0x0003FFF0
+#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_TIMEOUT, HSTX_TO_VAL, __x)
+#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL_SHIFT 18
+#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL_MASK 0xFFFC0000
+#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_TIMEOUT, LPRX_TO_VAL, __x)
+#define DSI_MCTL_ULPOUT_TIME 0x00000018
+#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME_SHIFT 0
+#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME_MASK 0x000001FF
+#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME(__x) \
+ DSI_VAL2REG(DSI_MCTL_ULPOUT_TIME, CKLANE_ULPOUT_TIME, __x)
+#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME_SHIFT 9
+#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME_MASK 0x0003FE00
+#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME(__x) \
+ DSI_VAL2REG(DSI_MCTL_ULPOUT_TIME, DATA_ULPOUT_TIME, __x)
+#define DSI_MCTL_DPHY_STATIC 0x0000001C
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK_SHIFT 0
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK_MASK 0x00000001
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, SWAP_PINS_CLK, __x)
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK_SHIFT 1
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK_MASK 0x00000002
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, HS_INVERT_CLK, __x)
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1_SHIFT 2
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1_MASK 0x00000004
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, SWAP_PINS_DAT1, __x)
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1_SHIFT 3
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1_MASK 0x00000008
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, HS_INVERT_DAT1, __x)
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2_SHIFT 4
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2_MASK 0x00000010
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, SWAP_PINS_DAT2, __x)
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2_SHIFT 5
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2_MASK 0x00000020
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, HS_INVERT_DAT2, __x)
+#define DSI_MCTL_DPHY_STATIC_UI_X4_SHIFT 6
+#define DSI_MCTL_DPHY_STATIC_UI_X4_MASK 0x00000FC0
+#define DSI_MCTL_DPHY_STATIC_UI_X4(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, UI_X4, __x)
+#define DSI_MCTL_MAIN_EN 0x00000020
+#define DSI_MCTL_MAIN_EN_PLL_START_SHIFT 0
+#define DSI_MCTL_MAIN_EN_PLL_START_MASK 0x00000001
+#define DSI_MCTL_MAIN_EN_PLL_START(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, PLL_START, __x)
+#define DSI_MCTL_MAIN_EN_CKLANE_EN_SHIFT 3
+#define DSI_MCTL_MAIN_EN_CKLANE_EN_MASK 0x00000008
+#define DSI_MCTL_MAIN_EN_CKLANE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, CKLANE_EN, __x)
+#define DSI_MCTL_MAIN_EN_DAT1_EN_SHIFT 4
+#define DSI_MCTL_MAIN_EN_DAT1_EN_MASK 0x00000010
+#define DSI_MCTL_MAIN_EN_DAT1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT1_EN, __x)
+#define DSI_MCTL_MAIN_EN_DAT2_EN_SHIFT 5
+#define DSI_MCTL_MAIN_EN_DAT2_EN_MASK 0x00000020
+#define DSI_MCTL_MAIN_EN_DAT2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT2_EN, __x)
+#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ_SHIFT 6
+#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ_MASK 0x00000040
+#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, CLKLANE_ULPM_REQ, __x)
+#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ_SHIFT 7
+#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ_MASK 0x00000080
+#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT1_ULPM_REQ, __x)
+#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ_SHIFT 8
+#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ_MASK 0x00000100
+#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT2_ULPM_REQ, __x)
+#define DSI_MCTL_MAIN_EN_IF1_EN_SHIFT 9
+#define DSI_MCTL_MAIN_EN_IF1_EN_MASK 0x00000200
+#define DSI_MCTL_MAIN_EN_IF1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, IF1_EN, __x)
+#define DSI_MCTL_MAIN_EN_IF2_EN_SHIFT 10
+#define DSI_MCTL_MAIN_EN_IF2_EN_MASK 0x00000400
+#define DSI_MCTL_MAIN_EN_IF2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, IF2_EN, __x)
+#define DSI_MCTL_MAIN_STS 0x00000024
+#define DSI_MCTL_MAIN_STS_PLL_LOCK_SHIFT 0
+#define DSI_MCTL_MAIN_STS_PLL_LOCK_MASK 0x00000001
+#define DSI_MCTL_MAIN_STS_PLL_LOCK(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, PLL_LOCK, __x)
+#define DSI_MCTL_MAIN_STS_CLKLANE_READY_SHIFT 1
+#define DSI_MCTL_MAIN_STS_CLKLANE_READY_MASK 0x00000002
+#define DSI_MCTL_MAIN_STS_CLKLANE_READY(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, CLKLANE_READY, __x)
+#define DSI_MCTL_MAIN_STS_DAT1_READY_SHIFT 2
+#define DSI_MCTL_MAIN_STS_DAT1_READY_MASK 0x00000004
+#define DSI_MCTL_MAIN_STS_DAT1_READY(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, DAT1_READY, __x)
+#define DSI_MCTL_MAIN_STS_DAT2_READY_SHIFT 3
+#define DSI_MCTL_MAIN_STS_DAT2_READY_MASK 0x00000008
+#define DSI_MCTL_MAIN_STS_DAT2_READY(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, DAT2_READY, __x)
+#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR_SHIFT 4
+#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR_MASK 0x00000010
+#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, HSTX_TO_ERR, __x)
+#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR_SHIFT 5
+#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR_MASK 0x00000020
+#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, LPRX_TO_ERR, __x)
+#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK_SHIFT 6
+#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK_MASK 0x00000040
+#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, CRS_UNTERM_PCK, __x)
+#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK_SHIFT 7
+#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK_MASK 0x00000080
+#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, VRS_UNTERM_PCK, __x)
+#define DSI_MCTL_DPHY_ERR 0x00000028
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_1_SHIFT 6
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_1_MASK 0x00000040
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_ESC_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_2_SHIFT 7
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_2_MASK 0x00000080
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_ESC_2, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_1_SHIFT 8
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_1_MASK 0x00000100
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_SYNCESC_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_2_SHIFT 9
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_2_MASK 0x00000200
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_SYNCESC_2, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_1_SHIFT 10
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_1_MASK 0x00000400
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONTROL_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_2_SHIFT 11
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_2_MASK 0x00000800
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONTROL_2, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_1_SHIFT 12
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_1_MASK 0x00001000
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP0_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_2_SHIFT 13
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_2_MASK 0x00002000
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP0_2, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_1_SHIFT 14
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_1_MASK 0x00004000
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP1_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_2_SHIFT 15
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_2_MASK 0x00008000
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP1_2, __x)
+#define DSI_INT_VID_RDDATA 0x00000030
+#define DSI_INT_VID_RDDATA_IF_DATA_SHIFT 0
+#define DSI_INT_VID_RDDATA_IF_DATA_MASK 0x0000FFFF
+#define DSI_INT_VID_RDDATA_IF_DATA(__x) \
+ DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_DATA, __x)
+#define DSI_INT_VID_RDDATA_IF_VALID_SHIFT 16
+#define DSI_INT_VID_RDDATA_IF_VALID_MASK 0x00010000
+#define DSI_INT_VID_RDDATA_IF_VALID(__x) \
+ DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_VALID, __x)
+#define DSI_INT_VID_RDDATA_IF_START_SHIFT 17
+#define DSI_INT_VID_RDDATA_IF_START_MASK 0x00020000
+#define DSI_INT_VID_RDDATA_IF_START(__x) \
+ DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_START, __x)
+#define DSI_INT_VID_RDDATA_IF_FRAME_SYNC_SHIFT 18
+#define DSI_INT_VID_RDDATA_IF_FRAME_SYNC_MASK 0x00040000
+#define DSI_INT_VID_RDDATA_IF_FRAME_SYNC(__x) \
+ DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_FRAME_SYNC, __x)
+#define DSI_INT_VID_GNT 0x00000034
+#define DSI_INT_VID_GNT_IF_STALL_SHIFT 0
+#define DSI_INT_VID_GNT_IF_STALL_MASK 0x00000001
+#define DSI_INT_VID_GNT_IF_STALL(__x) \
+ DSI_VAL2REG(DSI_INT_VID_GNT, IF_STALL, __x)
+#define DSI_INT_CMD_RDDATA 0x00000038
+#define DSI_INT_CMD_RDDATA_IF_DATA_SHIFT 0
+#define DSI_INT_CMD_RDDATA_IF_DATA_MASK 0x0000FFFF
+#define DSI_INT_CMD_RDDATA_IF_DATA(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_DATA, __x)
+#define DSI_INT_CMD_RDDATA_IF_VALID_SHIFT 16
+#define DSI_INT_CMD_RDDATA_IF_VALID_MASK 0x00010000
+#define DSI_INT_CMD_RDDATA_IF_VALID(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_VALID, __x)
+#define DSI_INT_CMD_RDDATA_IF_START_SHIFT 17
+#define DSI_INT_CMD_RDDATA_IF_START_MASK 0x00020000
+#define DSI_INT_CMD_RDDATA_IF_START(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_START, __x)
+#define DSI_INT_CMD_RDDATA_IF_FRAME_SYNC_SHIFT 18
+#define DSI_INT_CMD_RDDATA_IF_FRAME_SYNC_MASK 0x00040000
+#define DSI_INT_CMD_RDDATA_IF_FRAME_SYNC(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_FRAME_SYNC, __x)
+#define DSI_INT_CMD_GNT 0x0000003C
+#define DSI_INT_CMD_GNT_IF_STALL_SHIFT 0
+#define DSI_INT_CMD_GNT_IF_STALL_MASK 0x00000001
+#define DSI_INT_CMD_GNT_IF_STALL(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_GNT, IF_STALL, __x)
+#define DSI_INT_INTERRUPT_CTL 0x00000040
+#define DSI_INT_INTERRUPT_CTL_INT_VAL_SHIFT 0
+#define DSI_INT_INTERRUPT_CTL_INT_VAL_MASK 0x00000001
+#define DSI_INT_INTERRUPT_CTL_INT_VAL(__x) \
+ DSI_VAL2REG(DSI_INT_INTERRUPT_CTL, INT_VAL, __x)
+#define DSI_CMD_MODE_CTL 0x00000050
+#define DSI_CMD_MODE_CTL_IF1_ID_SHIFT 0
+#define DSI_CMD_MODE_CTL_IF1_ID_MASK 0x00000003
+#define DSI_CMD_MODE_CTL_IF1_ID(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, IF1_ID, __x)
+#define DSI_CMD_MODE_CTL_IF2_ID_SHIFT 2
+#define DSI_CMD_MODE_CTL_IF2_ID_MASK 0x0000000C
+#define DSI_CMD_MODE_CTL_IF2_ID(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, IF2_ID, __x)
+#define DSI_CMD_MODE_CTL_IF1_LP_EN_SHIFT 4
+#define DSI_CMD_MODE_CTL_IF1_LP_EN_MASK 0x00000010
+#define DSI_CMD_MODE_CTL_IF1_LP_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, IF1_LP_EN, __x)
+#define DSI_CMD_MODE_CTL_IF2_LP_EN_SHIFT 5
+#define DSI_CMD_MODE_CTL_IF2_LP_EN_MASK 0x00000020
+#define DSI_CMD_MODE_CTL_IF2_LP_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, IF2_LP_EN, __x)
+#define DSI_CMD_MODE_CTL_ARB_MODE_SHIFT 6
+#define DSI_CMD_MODE_CTL_ARB_MODE_MASK 0x00000040
+#define DSI_CMD_MODE_CTL_ARB_MODE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, ARB_MODE, __x)
+#define DSI_CMD_MODE_CTL_ARB_PRI_SHIFT 7
+#define DSI_CMD_MODE_CTL_ARB_PRI_MASK 0x00000080
+#define DSI_CMD_MODE_CTL_ARB_PRI(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, ARB_PRI, __x)
+#define DSI_CMD_MODE_CTL_FIL_VALUE_SHIFT 8
+#define DSI_CMD_MODE_CTL_FIL_VALUE_MASK 0x0000FF00
+#define DSI_CMD_MODE_CTL_FIL_VALUE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, FIL_VALUE, __x)
+#define DSI_CMD_MODE_CTL_TE_TIMEOUT_SHIFT 16
+#define DSI_CMD_MODE_CTL_TE_TIMEOUT_MASK 0x03FF0000
+#define DSI_CMD_MODE_CTL_TE_TIMEOUT(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, TE_TIMEOUT, __x)
+#define DSI_CMD_MODE_STS 0x00000054
+#define DSI_CMD_MODE_STS_ERR_NO_TE_SHIFT 0
+#define DSI_CMD_MODE_STS_ERR_NO_TE_MASK 0x00000001
+#define DSI_CMD_MODE_STS_ERR_NO_TE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_NO_TE, __x)
+#define DSI_CMD_MODE_STS_ERR_TE_MISS_SHIFT 1
+#define DSI_CMD_MODE_STS_ERR_TE_MISS_MASK 0x00000002
+#define DSI_CMD_MODE_STS_ERR_TE_MISS(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_TE_MISS, __x)
+#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN_SHIFT 2
+#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN_MASK 0x00000004
+#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_SDI1_UNDERRUN, __x)
+#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN_SHIFT 3
+#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN_MASK 0x00000008
+#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_SDI2_UNDERRUN, __x)
+#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD_SHIFT 4
+#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD_MASK 0x00000010
+#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_UNWANTED_RD, __x)
+#define DSI_CMD_MODE_STS_CSM_RUNNING_SHIFT 5
+#define DSI_CMD_MODE_STS_CSM_RUNNING_MASK 0x00000020
+#define DSI_CMD_MODE_STS_CSM_RUNNING(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, CSM_RUNNING, __x)
+#define DSI_DIRECT_CMD_SEND 0x00000060
+#define DSI_DIRECT_CMD_SEND_START_SHIFT 0
+#define DSI_DIRECT_CMD_SEND_START_MASK 0xFFFFFFFF
+#define DSI_DIRECT_CMD_SEND_START(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_SEND, START, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS 0x00000064
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_SHIFT 0
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_MASK 0x00000007
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_WRITE 0
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_READ 1
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_TE_REQ 4
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_TRIG_REQ 5
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_BTA_REQ 6
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_NAT, \
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_##__x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_NAT, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT_SHIFT 3
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT_MASK 0x00000008
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_LONGNOTSHORT, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT 8
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_MASK 0x00003F00
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_0 3
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_1 19
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_2 35
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_LONG_WRITE 41
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_0 5
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_1 21
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_LONG_WRITE 57
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_READ 6
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SET_MAX_PKT_SIZE 55
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_HEAD, \
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_##__x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_HEAD, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT 14
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_MASK 0x0000C000
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_ID, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT 16
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_MASK 0x001F0000
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_SIZE, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN_SHIFT 21
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN_MASK 0x00200000
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_LP_EN, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL_SHIFT 24
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL_MASK 0x0F000000
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, TRIGGER_VAL, __x)
+#define DSI_DIRECT_CMD_STS 0x00000068
+#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION_SHIFT 0
+#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION_MASK 0x00000001
+#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, CMD_TRANSMISSION, __x)
+#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED_SHIFT 1
+#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED_MASK 0x00000002
+#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, WRITE_COMPLETED, __x)
+#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED_SHIFT 2
+#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED_MASK 0x00000004
+#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, TRIGGER_COMPLETED, __x)
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_SHIFT 3
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_MASK 0x00000008
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, READ_COMPLETED, __x)
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED_SHIFT 4
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED_MASK 0x00000010
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, ACKNOWLEDGE_RECEIVED, __x)
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED_SHIFT 5
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED_MASK 0x00000020
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, ACKNOWLEDGE_WITH_ERR_RECEIVED, __x)
+#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED_SHIFT 6
+#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED_MASK 0x00000040
+#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, TRIGGER_RECEIVED, __x)
+#define DSI_DIRECT_CMD_STS_TE_RECEIVED_SHIFT 7
+#define DSI_DIRECT_CMD_STS_TE_RECEIVED_MASK 0x00000080
+#define DSI_DIRECT_CMD_STS_TE_RECEIVED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, TE_RECEIVED, __x)
+#define DSI_DIRECT_CMD_STS_BTA_COMPLETED_SHIFT 8
+#define DSI_DIRECT_CMD_STS_BTA_COMPLETED_MASK 0x00000100
+#define DSI_DIRECT_CMD_STS_BTA_COMPLETED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, BTA_COMPLETED, __x)
+#define DSI_DIRECT_CMD_STS_BTA_FINISHED_SHIFT 9
+#define DSI_DIRECT_CMD_STS_BTA_FINISHED_MASK 0x00000200
+#define DSI_DIRECT_CMD_STS_BTA_FINISHED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, BTA_FINISHED, __x)
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR_SHIFT 10
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR_MASK 0x00000400
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, READ_COMPLETED_WITH_ERR, __x)
+#define DSI_DIRECT_CMD_STS_TRIGGER_VAL_SHIFT 11
+#define DSI_DIRECT_CMD_STS_TRIGGER_VAL_MASK 0x00007800
+#define DSI_DIRECT_CMD_STS_TRIGGER_VAL(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, TRIGGER_VAL, __x)
+#define DSI_DIRECT_CMD_STS_ACK_VAL_SHIFT 16
+#define DSI_DIRECT_CMD_STS_ACK_VAL_MASK 0xFFFF0000
+#define DSI_DIRECT_CMD_STS_ACK_VAL(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, ACK_VAL, __x)
+#define DSI_DIRECT_CMD_RD_INIT 0x0000006C
+#define DSI_DIRECT_CMD_RD_INIT_RESET_SHIFT 0
+#define DSI_DIRECT_CMD_RD_INIT_RESET_MASK 0xFFFFFFFF
+#define DSI_DIRECT_CMD_RD_INIT_RESET(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_INIT, RESET, __x)
+#define DSI_DIRECT_CMD_WRDAT0 0x00000070
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT0_SHIFT 0
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT0_MASK 0x000000FF
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT0(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT0, __x)
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT1_SHIFT 8
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT1_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT1(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT1, __x)
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT2_SHIFT 16
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT2_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT2(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT2, __x)
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT3_SHIFT 24
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT3_MASK 0xFF000000
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT3(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT3, __x)
+#define DSI_DIRECT_CMD_WRDAT1 0x00000074
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT4_SHIFT 0
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT4_MASK 0x000000FF
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT4(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT4, __x)
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT5_SHIFT 8
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT5_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT5(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT5, __x)
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT6_SHIFT 16
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT6_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT6(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT6, __x)
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT7_SHIFT 24
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT7_MASK 0xFF000000
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT7(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT7, __x)
+#define DSI_DIRECT_CMD_WRDAT2 0x00000078
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT8_SHIFT 0
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT8_MASK 0x000000FF
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT8(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT8, __x)
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT9_SHIFT 8
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT9_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT9(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT9, __x)
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT10_SHIFT 16
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT10_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT10(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT10, __x)
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT11_SHIFT 24
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT11_MASK 0xFF000000
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT11(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT11, __x)
+#define DSI_DIRECT_CMD_WRDAT3 0x0000007C
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT12_SHIFT 0
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT12_MASK 0x000000FF
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT12(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT12, __x)
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT13_SHIFT 8
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT13_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT13(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT13, __x)
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT14_SHIFT 16
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT14_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT14(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT14, __x)
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT15_SHIFT 24
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT15_MASK 0xFF000000
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT15(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT15, __x)
+#define DSI_DIRECT_CMD_RDDAT 0x00000080
+#define DSI_DIRECT_CMD_RDDAT_RDDAT0_SHIFT 0
+#define DSI_DIRECT_CMD_RDDAT_RDDAT0_MASK 0x000000FF
+#define DSI_DIRECT_CMD_RDDAT_RDDAT0(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT0, __x)
+#define DSI_DIRECT_CMD_RDDAT_RDDAT1_SHIFT 8
+#define DSI_DIRECT_CMD_RDDAT_RDDAT1_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_RDDAT_RDDAT1(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT1, __x)
+#define DSI_DIRECT_CMD_RDDAT_RDDAT2_SHIFT 16
+#define DSI_DIRECT_CMD_RDDAT_RDDAT2_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_RDDAT_RDDAT2(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT2, __x)
+#define DSI_DIRECT_CMD_RDDAT_RDDAT3_SHIFT 24
+#define DSI_DIRECT_CMD_RDDAT_RDDAT3_MASK 0xFF000000
+#define DSI_DIRECT_CMD_RDDAT_RDDAT3(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT3, __x)
+#define DSI_DIRECT_CMD_RD_PROPERTY 0x00000084
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_SHIFT 0
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_MASK 0x0000FFFF
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_PROPERTY, RD_SIZE, __x)
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID_SHIFT 16
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID_MASK 0x00030000
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_PROPERTY, RD_ID, __x)
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC_SHIFT 18
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC_MASK 0x00040000
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_PROPERTY, RD_DCSNOTGENERIC, __x)
+#define DSI_DIRECT_CMD_RD_STS 0x00000088
+#define DSI_DIRECT_CMD_RD_STS_ERR_FIXED_SHIFT 0
+#define DSI_DIRECT_CMD_RD_STS_ERR_FIXED_MASK 0x00000001
+#define DSI_DIRECT_CMD_RD_STS_ERR_FIXED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_FIXED, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNCORRECTABLE_SHIFT 1
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNCORRECTABLE_MASK 0x00000002
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNCORRECTABLE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_UNCORRECTABLE, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_CHECKSUM_SHIFT 2
+#define DSI_DIRECT_CMD_RD_STS_ERR_CHECKSUM_MASK 0x00000004
+#define DSI_DIRECT_CMD_RD_STS_ERR_CHECKSUM(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_CHECKSUM, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNDECODABLE_SHIFT 3
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNDECODABLE_MASK 0x00000008
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNDECODABLE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_UNDECODABLE, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_RECEIVE_SHIFT 4
+#define DSI_DIRECT_CMD_RD_STS_ERR_RECEIVE_MASK 0x00000010
+#define DSI_DIRECT_CMD_RD_STS_ERR_RECEIVE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_RECEIVE, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_OVERSIZE_SHIFT 5
+#define DSI_DIRECT_CMD_RD_STS_ERR_OVERSIZE_MASK 0x00000020
+#define DSI_DIRECT_CMD_RD_STS_ERR_OVERSIZE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_OVERSIZE, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_WRONG_LENGTH_SHIFT 6
+#define DSI_DIRECT_CMD_RD_STS_ERR_WRONG_LENGTH_MASK 0x00000040
+#define DSI_DIRECT_CMD_RD_STS_ERR_WRONG_LENGTH(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_WRONG_LENGTH, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_MISSING_EOT_SHIFT 7
+#define DSI_DIRECT_CMD_RD_STS_ERR_MISSING_EOT_MASK 0x00000080
+#define DSI_DIRECT_CMD_RD_STS_ERR_MISSING_EOT(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_MISSING_EOT, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_EOT_WITH_ERR_SHIFT 8
+#define DSI_DIRECT_CMD_RD_STS_ERR_EOT_WITH_ERR_MASK 0x00000100
+#define DSI_DIRECT_CMD_RD_STS_ERR_EOT_WITH_ERR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_EOT_WITH_ERR, __x)
+#define DSI_VID_MAIN_CTL 0x00000090
+#define DSI_VID_MAIN_CTL_START_MODE_SHIFT 0
+#define DSI_VID_MAIN_CTL_START_MODE_MASK 0x00000003
+#define DSI_VID_MAIN_CTL_START_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, START_MODE, __x)
+#define DSI_VID_MAIN_CTL_STOP_MODE_SHIFT 2
+#define DSI_VID_MAIN_CTL_STOP_MODE_MASK 0x0000000C
+#define DSI_VID_MAIN_CTL_STOP_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, STOP_MODE, __x)
+#define DSI_VID_MAIN_CTL_VID_ID_SHIFT 4
+#define DSI_VID_MAIN_CTL_VID_ID_MASK 0x00000030
+#define DSI_VID_MAIN_CTL_VID_ID(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, VID_ID, __x)
+#define DSI_VID_MAIN_CTL_HEADER_SHIFT 6
+#define DSI_VID_MAIN_CTL_HEADER_MASK 0x00000FC0
+#define DSI_VID_MAIN_CTL_HEADER(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, HEADER, __x)
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_SHIFT 12
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_MASK 0x00003000
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_16BITS 0
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS 1
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS_LOOSE 2
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_24BITS 3
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, VID_PIXEL_MODE, \
+ DSI_VID_MAIN_CTL_VID_PIXEL_MODE_##__x)
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, VID_PIXEL_MODE, __x)
+#define DSI_VID_MAIN_CTL_BURST_MODE_SHIFT 14
+#define DSI_VID_MAIN_CTL_BURST_MODE_MASK 0x00004000
+#define DSI_VID_MAIN_CTL_BURST_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, BURST_MODE, __x)
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE_SHIFT 15
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE_MASK 0x00008000
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, SYNC_PULSE_ACTIVE, __x)
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL_SHIFT 16
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL_MASK 0x00010000
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, SYNC_PULSE_HORIZONTAL, __x)
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_SHIFT 17
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_MASK 0x00060000
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_NULL 0
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_BLANKING 1
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_0 2
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_1 3
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKLINE_MODE, \
+ DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_##__x)
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKLINE_MODE, __x)
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_SHIFT 19
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_MASK 0x00180000
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_NULL 0
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_BLANKING 1
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 2
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_1 3
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKEOL_MODE, \
+ DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_##__x)
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKEOL_MODE, __x)
+#define DSI_VID_MAIN_CTL_RECOVERY_MODE_SHIFT 21
+#define DSI_VID_MAIN_CTL_RECOVERY_MODE_MASK 0x00600000
+#define DSI_VID_MAIN_CTL_RECOVERY_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, RECOVERY_MODE, __x)
+#define DSI_VID_VSIZE 0x00000094
+#define DSI_VID_VSIZE_VSA_LENGTH_SHIFT 0
+#define DSI_VID_VSIZE_VSA_LENGTH_MASK 0x0000003F
+#define DSI_VID_VSIZE_VSA_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_VSIZE, VSA_LENGTH, __x)
+#define DSI_VID_VSIZE_VBP_LENGTH_SHIFT 6
+#define DSI_VID_VSIZE_VBP_LENGTH_MASK 0x00000FC0
+#define DSI_VID_VSIZE_VBP_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_VSIZE, VBP_LENGTH, __x)
+#define DSI_VID_VSIZE_VFP_LENGTH_SHIFT 12
+#define DSI_VID_VSIZE_VFP_LENGTH_MASK 0x000FF000
+#define DSI_VID_VSIZE_VFP_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_VSIZE, VFP_LENGTH, __x)
+#define DSI_VID_VSIZE_VACT_LENGTH_SHIFT 20
+#define DSI_VID_VSIZE_VACT_LENGTH_MASK 0x7FF00000
+#define DSI_VID_VSIZE_VACT_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_VSIZE, VACT_LENGTH, __x)
+#define DSI_VID_HSIZE1 0x00000098
+#define DSI_VID_HSIZE1_HSA_LENGTH_SHIFT 0
+#define DSI_VID_HSIZE1_HSA_LENGTH_MASK 0x000003FF
+#define DSI_VID_HSIZE1_HSA_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_HSIZE1, HSA_LENGTH, __x)
+#define DSI_VID_HSIZE1_HBP_LENGTH_SHIFT 10
+#define DSI_VID_HSIZE1_HBP_LENGTH_MASK 0x000FFC00
+#define DSI_VID_HSIZE1_HBP_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_HSIZE1, HBP_LENGTH, __x)
+#define DSI_VID_HSIZE1_HFP_LENGTH_SHIFT 20
+#define DSI_VID_HSIZE1_HFP_LENGTH_MASK 0x7FF00000
+#define DSI_VID_HSIZE1_HFP_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_HSIZE1, HFP_LENGTH, __x)
+#define DSI_VID_HSIZE2 0x0000009C
+#define DSI_VID_HSIZE2_RGB_SIZE_SHIFT 0
+#define DSI_VID_HSIZE2_RGB_SIZE_MASK 0x00001FFF
+#define DSI_VID_HSIZE2_RGB_SIZE(__x) \
+ DSI_VAL2REG(DSI_VID_HSIZE2, RGB_SIZE, __x)
+#define DSI_VID_BLKSIZE1 0x000000A0
+#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_SHIFT 0
+#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_MASK 0x00001FFF
+#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK(__x) \
+ DSI_VAL2REG(DSI_VID_BLKSIZE1, BLKLINE_EVENT_PCK, __x)
+#define DSI_VID_BLKSIZE1_BLKEOL_PCK_SHIFT 13
+#define DSI_VID_BLKSIZE1_BLKEOL_PCK_MASK 0x03FFE000
+#define DSI_VID_BLKSIZE1_BLKEOL_PCK(__x) \
+ DSI_VAL2REG(DSI_VID_BLKSIZE1, BLKEOL_PCK, __x)
+#define DSI_VID_BLKSIZE2 0x000000A4
+#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_SHIFT 0
+#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_MASK 0x00001FFF
+#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK(__x) \
+ DSI_VAL2REG(DSI_VID_BLKSIZE2, BLKLINE_PULSE_PCK, __x)
+#define DSI_VID_PCK_TIME 0x000000A8
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION_SHIFT 0
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION_MASK 0x00001FFF
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION(__x) \
+ DSI_VAL2REG(DSI_VID_PCK_TIME, BLKEOL_DURATION, __x)
+#define DSI_VID_DPHY_TIME 0x000000AC
+#define DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT 0
+#define DSI_VID_DPHY_TIME_REG_LINE_DURATION_MASK 0x00001FFF
+#define DSI_VID_DPHY_TIME_REG_LINE_DURATION(__x) \
+ DSI_VAL2REG(DSI_VID_DPHY_TIME, REG_LINE_DURATION, __x)
+#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT 13
+#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_MASK 0x00FFE000
+#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME(__x) \
+ DSI_VAL2REG(DSI_VID_DPHY_TIME, REG_WAKEUP_TIME, __x)
+#define DSI_VID_ERR_COLOR 0x000000B0
+#define DSI_VID_ERR_COLOR_COL_RED_SHIFT 0
+#define DSI_VID_ERR_COLOR_COL_RED_MASK 0x000000FF
+#define DSI_VID_ERR_COLOR_COL_RED(__x) \
+ DSI_VAL2REG(DSI_VID_ERR_COLOR, COL_RED, __x)
+#define DSI_VID_ERR_COLOR_COL_GREEN_SHIFT 8
+#define DSI_VID_ERR_COLOR_COL_GREEN_MASK 0x0000FF00
+#define DSI_VID_ERR_COLOR_COL_GREEN(__x) \
+ DSI_VAL2REG(DSI_VID_ERR_COLOR, COL_GREEN, __x)
+#define DSI_VID_ERR_COLOR_COL_BLUE_SHIFT 16
+#define DSI_VID_ERR_COLOR_COL_BLUE_MASK 0x00FF0000
+#define DSI_VID_ERR_COLOR_COL_BLUE(__x) \
+ DSI_VAL2REG(DSI_VID_ERR_COLOR, COL_BLUE, __x)
+#define DSI_VID_ERR_COLOR_PAD_VAL_SHIFT 24
+#define DSI_VID_ERR_COLOR_PAD_VAL_MASK 0xFF000000
+#define DSI_VID_ERR_COLOR_PAD_VAL(__x) \
+ DSI_VAL2REG(DSI_VID_ERR_COLOR, PAD_VAL, __x)
+#define DSI_VID_VPOS 0x000000B4
+#define DSI_VID_VPOS_LINE_POS_SHIFT 0
+#define DSI_VID_VPOS_LINE_POS_MASK 0x00000003
+#define DSI_VID_VPOS_LINE_POS(__x) \
+ DSI_VAL2REG(DSI_VID_VPOS, LINE_POS, __x)
+#define DSI_VID_VPOS_LINE_VAL_SHIFT 2
+#define DSI_VID_VPOS_LINE_VAL_MASK 0x00001FFC
+#define DSI_VID_VPOS_LINE_VAL(__x) \
+ DSI_VAL2REG(DSI_VID_VPOS, LINE_VAL, __x)
+#define DSI_VID_HPOS 0x000000B8
+#define DSI_VID_HPOS_HORIZONTAL_POS_SHIFT 0
+#define DSI_VID_HPOS_HORIZONTAL_POS_MASK 0x00000007
+#define DSI_VID_HPOS_HORIZONTAL_POS(__x) \
+ DSI_VAL2REG(DSI_VID_HPOS, HORIZONTAL_POS, __x)
+#define DSI_VID_HPOS_HORIZONTAL_VAL_SHIFT 3
+#define DSI_VID_HPOS_HORIZONTAL_VAL_MASK 0x0000FFF8
+#define DSI_VID_HPOS_HORIZONTAL_VAL(__x) \
+ DSI_VAL2REG(DSI_VID_HPOS, HORIZONTAL_VAL, __x)
+#define DSI_VID_MODE_STS 0x000000BC
+#define DSI_VID_MODE_STS_VSG_RUNNING_SHIFT 0
+#define DSI_VID_MODE_STS_VSG_RUNNING_MASK 0x00000001
+#define DSI_VID_MODE_STS_VSG_RUNNING(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, VSG_RUNNING, __x)
+#define DSI_VID_MODE_STS_ERR_MISSING_DATA_SHIFT 1
+#define DSI_VID_MODE_STS_ERR_MISSING_DATA_MASK 0x00000002
+#define DSI_VID_MODE_STS_ERR_MISSING_DATA(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_MISSING_DATA, __x)
+#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC_SHIFT 2
+#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC_MASK 0x00000004
+#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_MISSING_HSYNC, __x)
+#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC_SHIFT 3
+#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC_MASK 0x00000008
+#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_MISSING_VSYNC, __x)
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH_SHIFT 4
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH_MASK 0x00000010
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, REG_ERR_SMALL_LENGTH, __x)
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT_SHIFT 5
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT_MASK 0x00000020
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, REG_ERR_SMALL_HEIGHT, __x)
+#define DSI_VID_MODE_STS_ERR_BURSTWRITE_SHIFT 6
+#define DSI_VID_MODE_STS_ERR_BURSTWRITE_MASK 0x00000040
+#define DSI_VID_MODE_STS_ERR_BURSTWRITE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_BURSTWRITE, __x)
+#define DSI_VID_MODE_STS_ERR_LONGWRITE_SHIFT 7
+#define DSI_VID_MODE_STS_ERR_LONGWRITE_MASK 0x00000080
+#define DSI_VID_MODE_STS_ERR_LONGWRITE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_LONGWRITE, __x)
+#define DSI_VID_MODE_STS_ERR_LONGREAD_SHIFT 8
+#define DSI_VID_MODE_STS_ERR_LONGREAD_MASK 0x00000100
+#define DSI_VID_MODE_STS_ERR_LONGREAD(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_LONGREAD, __x)
+#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH_SHIFT 9
+#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH_MASK 0x00000200
+#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_VRS_WRONG_LENGTH, __x)
+#define DSI_VID_MODE_STS_VSG_RECOVERY_SHIFT 10
+#define DSI_VID_MODE_STS_VSG_RECOVERY_MASK 0x00000400
+#define DSI_VID_MODE_STS_VSG_RECOVERY(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, VSG_RECOVERY, __x)
+#define DSI_VID_VCA_SETTING1 0x000000C0
+#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_SHIFT 0
+#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_MASK 0x0000FFFF
+#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT(__x) \
+ DSI_VAL2REG(DSI_VID_VCA_SETTING1, MAX_BURST_LIMIT, __x)
+#define DSI_VID_VCA_SETTING1_BURST_LP_SHIFT 16
+#define DSI_VID_VCA_SETTING1_BURST_LP_MASK 0x00010000
+#define DSI_VID_VCA_SETTING1_BURST_LP(__x) \
+ DSI_VAL2REG(DSI_VID_VCA_SETTING1, BURST_LP, __x)
+#define DSI_VID_VCA_SETTING2 0x000000C4
+#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT 0
+#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_MASK 0x0000FFFF
+#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT(__x) \
+ DSI_VAL2REG(DSI_VID_VCA_SETTING2, EXACT_BURST_LIMIT, __x)
+#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_SHIFT 16
+#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_MASK 0xFFFF0000
+#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT(__x) \
+ DSI_VAL2REG(DSI_VID_VCA_SETTING2, MAX_LINE_LIMIT, __x)
+#define DSI_TVG_CTL 0x000000C8
+#define DSI_TVG_CTL_TVG_RUN_SHIFT 0
+#define DSI_TVG_CTL_TVG_RUN_MASK 0x00000001
+#define DSI_TVG_CTL_TVG_RUN(__x) \
+ DSI_VAL2REG(DSI_TVG_CTL, TVG_RUN, __x)
+#define DSI_TVG_CTL_TVG_STOPMODE_SHIFT 1
+#define DSI_TVG_CTL_TVG_STOPMODE_MASK 0x00000006
+#define DSI_TVG_CTL_TVG_STOPMODE(__x) \
+ DSI_VAL2REG(DSI_TVG_CTL, TVG_STOPMODE, __x)
+#define DSI_TVG_CTL_TVG_MODE_SHIFT 3
+#define DSI_TVG_CTL_TVG_MODE_MASK 0x00000018
+#define DSI_TVG_CTL_TVG_MODE(__x) \
+ DSI_VAL2REG(DSI_TVG_CTL, TVG_MODE, __x)
+#define DSI_TVG_CTL_TVG_STRIPE_SIZE_SHIFT 5
+#define DSI_TVG_CTL_TVG_STRIPE_SIZE_MASK 0x000000E0
+#define DSI_TVG_CTL_TVG_STRIPE_SIZE(__x) \
+ DSI_VAL2REG(DSI_TVG_CTL, TVG_STRIPE_SIZE, __x)
+#define DSI_TVG_IMG_SIZE 0x000000CC
+#define DSI_TVG_IMG_SIZE_TVG_LINE_SIZE_SHIFT 0
+#define DSI_TVG_IMG_SIZE_TVG_LINE_SIZE_MASK 0x00001FFF
+#define DSI_TVG_IMG_SIZE_TVG_LINE_SIZE(__x) \
+ DSI_VAL2REG(DSI_TVG_IMG_SIZE, TVG_LINE_SIZE, __x)
+#define DSI_TVG_IMG_SIZE_TVG_NBLINE_SHIFT 16
+#define DSI_TVG_IMG_SIZE_TVG_NBLINE_MASK 0x07FF0000
+#define DSI_TVG_IMG_SIZE_TVG_NBLINE(__x) \
+ DSI_VAL2REG(DSI_TVG_IMG_SIZE, TVG_NBLINE, __x)
+#define DSI_TVG_COLOR1 0x000000D0
+#define DSI_TVG_COLOR1_COL1_RED_SHIFT 0
+#define DSI_TVG_COLOR1_COL1_RED_MASK 0x000000FF
+#define DSI_TVG_COLOR1_COL1_RED(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR1, COL1_RED, __x)
+#define DSI_TVG_COLOR1_COL1_GREEN_SHIFT 8
+#define DSI_TVG_COLOR1_COL1_GREEN_MASK 0x0000FF00
+#define DSI_TVG_COLOR1_COL1_GREEN(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR1, COL1_GREEN, __x)
+#define DSI_TVG_COLOR1_COL1_BLUE_SHIFT 16
+#define DSI_TVG_COLOR1_COL1_BLUE_MASK 0x00FF0000
+#define DSI_TVG_COLOR1_COL1_BLUE(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR1, COL1_BLUE, __x)
+#define DSI_TVG_COLOR2 0x000000D4
+#define DSI_TVG_COLOR2_COL2_RED_SHIFT 0
+#define DSI_TVG_COLOR2_COL2_RED_MASK 0x000000FF
+#define DSI_TVG_COLOR2_COL2_RED(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR2, COL2_RED, __x)
+#define DSI_TVG_COLOR2_COL2_GREEN_SHIFT 8
+#define DSI_TVG_COLOR2_COL2_GREEN_MASK 0x0000FF00
+#define DSI_TVG_COLOR2_COL2_GREEN(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR2, COL2_GREEN, __x)
+#define DSI_TVG_COLOR2_COL2_BLUE_SHIFT 16
+#define DSI_TVG_COLOR2_COL2_BLUE_MASK 0x00FF0000
+#define DSI_TVG_COLOR2_COL2_BLUE(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR2, COL2_BLUE, __x)
+#define DSI_TVG_STS 0x000000D8
+#define DSI_TVG_STS_TVG_RUNNING_SHIFT 0
+#define DSI_TVG_STS_TVG_RUNNING_MASK 0x00000001
+#define DSI_TVG_STS_TVG_RUNNING(__x) \
+ DSI_VAL2REG(DSI_TVG_STS, TVG_RUNNING, __x)
+#define DSI_TBG_CTL 0x000000E0
+#define DSI_TBG_CTL_TBG_START_SHIFT 0
+#define DSI_TBG_CTL_TBG_START_MASK 0x00000001
+#define DSI_TBG_CTL_TBG_START(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_START, __x)
+#define DSI_TBG_CTL_TBG_HS_REQ_SHIFT 1
+#define DSI_TBG_CTL_TBG_HS_REQ_MASK 0x00000002
+#define DSI_TBG_CTL_TBG_HS_REQ(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_HS_REQ, __x)
+#define DSI_TBG_CTL_TBG_DATA_SEL_SHIFT 2
+#define DSI_TBG_CTL_TBG_DATA_SEL_MASK 0x00000004
+#define DSI_TBG_CTL_TBG_DATA_SEL(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_DATA_SEL, __x)
+#define DSI_TBG_CTL_TBG_MODE_SHIFT 3
+#define DSI_TBG_CTL_TBG_MODE_MASK 0x00000018
+#define DSI_TBG_CTL_TBG_MODE_1BYTE 0
+#define DSI_TBG_CTL_TBG_MODE_2BYTE 1
+#define DSI_TBG_CTL_TBG_MODE_BURST_COUNTER 2
+#define DSI_TBG_CTL_TBG_MODE_BURST 3
+#define DSI_TBG_CTL_TBG_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_MODE, DSI_TBG_CTL_TBG_MODE_##__x)
+#define DSI_TBG_CTL_TBG_MODE(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_MODE, __x)
+#define DSI_TBG_SETTING 0x000000E4
+#define DSI_TBG_SETTING_TBG_DATA_SHIFT 0
+#define DSI_TBG_SETTING_TBG_DATA_MASK 0x0000FFFF
+#define DSI_TBG_SETTING_TBG_DATA(__x) \
+ DSI_VAL2REG(DSI_TBG_SETTING, TBG_DATA, __x)
+#define DSI_TBG_SETTING_TBG_CPT_SHIFT 16
+#define DSI_TBG_SETTING_TBG_CPT_MASK 0x0FFF0000
+#define DSI_TBG_SETTING_TBG_CPT(__x) \
+ DSI_VAL2REG(DSI_TBG_SETTING, TBG_CPT, __x)
+#define DSI_TBG_STS 0x000000E8
+#define DSI_TBG_STS_TBG_STATUS_SHIFT 0
+#define DSI_TBG_STS_TBG_STATUS_MASK 0x00000001
+#define DSI_TBG_STS_TBG_STATUS(__x) \
+ DSI_VAL2REG(DSI_TBG_STS, TBG_STATUS, __x)
+#define DSI_MCTL_MAIN_STS_CTL 0x000000F0
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EN_SHIFT 0
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EN_MASK 0x00000001
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, PLL_LOCK_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EN_SHIFT 1
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EN_MASK 0x00000002
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CLKLANE_READY_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EN_SHIFT 2
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EN_MASK 0x00000004
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT1_READY_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EN_SHIFT 3
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EN_MASK 0x00000008
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT2_READY_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EN_SHIFT 4
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EN_MASK 0x00000010
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, HSTX_TO_ERR_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EN_SHIFT 5
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EN_MASK 0x00000020
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, LPRX_TO_ERR_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EN_SHIFT 6
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EN_MASK 0x00000040
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CRS_UNTERM_PCK_ERR_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EN_SHIFT 7
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EN_MASK 0x00000080
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, VRS_UNTERM_PCK_ERR_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EDGE_SHIFT 16
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EDGE_MASK 0x00010000
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, PLL_LOCK_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EDGE_SHIFT 17
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EDGE_MASK 0x00020000
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CLKLANE_READY_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EDGE_SHIFT 18
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EDGE_MASK 0x00040000
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT1_READY_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EDGE_SHIFT 19
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EDGE_MASK 0x00080000
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT2_READY_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EDGE_SHIFT 20
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EDGE_MASK 0x00100000
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, HSTX_TO_ERR_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EDGE_SHIFT 21
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EDGE_MASK 0x00200000
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, LPRX_TO_ERR_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EDGE_SHIFT 22
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EDGE_MASK 0x00400000
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CRS_UNTERM_PCK_ERR_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EDGE_SHIFT 23
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EDGE_MASK 0x00800000
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, VRS_UNTERM_PCK_ERR_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL 0x000000F4
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN_SHIFT 0
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN_MASK 0x00000001
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_NO_TE_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN_SHIFT 1
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN_MASK 0x00000002
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_TE_MISS_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN_SHIFT 2
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN_MASK 0x00000004
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI1_UNDERRUN_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN_SHIFT 3
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN_MASK 0x00000008
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI2_UNDERRUN_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN_SHIFT 4
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN_MASK 0x00000010
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_UNWANTED_RD_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN_SHIFT 5
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN_MASK 0x00000020
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, CSM_RUNNING_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE_SHIFT 16
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE_MASK 0x00010000
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_NO_TE_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE_SHIFT 17
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE_MASK 0x00020000
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_TE_MISS_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE_SHIFT 18
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE_MASK 0x00040000
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI1_UNDERRUN_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE_SHIFT 19
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE_MASK 0x00080000
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI2_UNDERRUN_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE_SHIFT 20
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE_MASK 0x00100000
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_UNWANTED_RD_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE_SHIFT 21
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE_MASK 0x00200000
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, CSM_RUNNING_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL 0x000000F8
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN_SHIFT 0
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN_MASK 0x00000001
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, CMD_TRANSMISSION_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN_SHIFT 1
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN_MASK 0x00000002
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, WRITE_COMPLETED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN_SHIFT 2
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN_MASK 0x00000004
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_COMPLETED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN_SHIFT 3
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN_MASK 0x00000008
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN_SHIFT 4
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN_MASK 0x00000010
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_RECEIVED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN_SHIFT 5
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN_MASK 0x00000020
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_WITH_ERR_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN_SHIFT 6
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN_MASK 0x00000040
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_RECEIVED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN_SHIFT 7
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN_MASK 0x00000080
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TE_RECEIVED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN_SHIFT 8
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN_MASK 0x00000100
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_COMPLETED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN_SHIFT 9
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN_MASK 0x00000200
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_FINISHED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN_SHIFT 10
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN_MASK 0x00000400
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_WITH_ERR_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE_SHIFT 16
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE_MASK 0x00010000
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, CMD_TRANSMISSION_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE_SHIFT 17
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE_MASK 0x00020000
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, WRITE_COMPLETED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE_SHIFT 18
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE_MASK 0x00040000
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_COMPLETED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE_SHIFT 19
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE_MASK 0x00080000
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE_SHIFT 20
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE_MASK 0x00100000
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_RECEIVED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE_SHIFT 21
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE_MASK 0x00200000
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_WITH_ERR_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE_SHIFT 22
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE_MASK 0x00400000
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_RECEIVED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE_SHIFT 23
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE_MASK 0x00800000
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TE_RECEIVED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE_SHIFT 24
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE_MASK 0x01000000
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_COMPLETED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE_SHIFT 25
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE_MASK 0x02000000
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_FINISHED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE_SHIFT 26
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE_MASK 0x04000000
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_WITH_ERR_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL 0x000000FC
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EN_SHIFT 0
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EN_MASK 0x00000001
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_FIXED_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EN_SHIFT 1
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EN_MASK 0x00000002
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNCORRECTABLE_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EN_SHIFT 2
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EN_MASK 0x00000004
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_CHECKSUM_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EN_SHIFT 3
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EN_MASK 0x00000008
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNDECODABLE_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EN_SHIFT 4
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EN_MASK 0x00000010
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_RECEIVE_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EN_SHIFT 5
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EN_MASK 0x00000020
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_OVERSIZE_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EN_SHIFT 6
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EN_MASK 0x00000040
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_WRONG_LENGTH_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EN_SHIFT 7
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EN_MASK 0x00000080
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_MISSING_EOT_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EN_SHIFT 8
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EN_MASK 0x00000100
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_EOT_WITH_ERR_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EDGE_SHIFT 16
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EDGE_MASK 0x00010000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_FIXED_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EDGE_SHIFT 17
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EDGE_MASK 0x00020000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNCORRECTABLE_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EDGE_SHIFT 18
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EDGE_MASK 0x00040000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_CHECKSUM_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EDGE_SHIFT 19
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EDGE_MASK 0x00080000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNDECODABLE_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EDGE_SHIFT 20
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EDGE_MASK 0x00100000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_RECEIVE_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EDGE_SHIFT 21
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EDGE_MASK 0x00200000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_OVERSIZE_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EDGE_SHIFT 22
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EDGE_MASK 0x00400000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_WRONG_LENGTH_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EDGE_SHIFT 23
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EDGE_MASK 0x00800000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_MISSING_EOT_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EDGE_SHIFT 24
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EDGE_MASK 0x01000000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_EOT_WITH_ERR_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL 0x00000100
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EN_SHIFT 0
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EN_MASK 0x00000001
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, VSG_RUNNING_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EN_SHIFT 1
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EN_MASK 0x00000002
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_DATA_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EN_SHIFT 2
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EN_MASK 0x00000004
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_HSYNC_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EN_SHIFT 3
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EN_MASK 0x00000008
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_VSYNC_EN, __x)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EN_SHIFT 4
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EN_MASK 0x00000010
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_LENGTH_EN, __x)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EN_SHIFT 5
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EN_MASK 0x00000020
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_HEIGHT_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EN_SHIFT 6
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EN_MASK 0x00000040
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_BURSTWRITE_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EN_SHIFT 7
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EN_MASK 0x00000080
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGWRITE_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EN_SHIFT 8
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EN_MASK 0x00000100
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGREAD_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EN_SHIFT 9
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EN_MASK 0x00000200
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_VRS_WRONG_LENGTH_EN, __x)
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE_SHIFT 16
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE_MASK 0x00010000
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, VSG_RUNNING_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE_SHIFT 17
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE_MASK 0x00020000
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_DATA_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE_SHIFT 18
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE_MASK 0x00040000
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_HSYNC_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE_SHIFT 19
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE_MASK 0x00080000
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_VSYNC_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE_SHIFT 20
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE_MASK 0x00100000
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_LENGTH_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE_SHIFT 21
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE_MASK 0x00200000
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_HEIGHT_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE_SHIFT 22
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE_MASK 0x00400000
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_BURSTWRITE_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE_SHIFT 23
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE_MASK 0x00800000
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGWRITE_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE_SHIFT 24
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE_MASK 0x01000000
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGREAD_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE_SHIFT 25
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE_MASK 0x02000000
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_VRS_WRONG_LENGTH_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE_SHIFT 26
+#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE_MASK 0x04000000
+#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, VSG_RECOVERY_EDGE, __x)
+#define DSI_TG_STS_CTL 0x00000104
+#define DSI_TG_STS_CTL_TVG_STS_EN_SHIFT 0
+#define DSI_TG_STS_CTL_TVG_STS_EN_MASK 0x00000001
+#define DSI_TG_STS_CTL_TVG_STS_EN(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CTL, TVG_STS_EN, __x)
+#define DSI_TG_STS_CTL_TBG_STS_EN_SHIFT 1
+#define DSI_TG_STS_CTL_TBG_STS_EN_MASK 0x00000002
+#define DSI_TG_STS_CTL_TBG_STS_EN(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CTL, TBG_STS_EN, __x)
+#define DSI_TG_STS_CTL_TVG_STS_EDGE_SHIFT 16
+#define DSI_TG_STS_CTL_TVG_STS_EDGE_MASK 0x00010000
+#define DSI_TG_STS_CTL_TVG_STS_EDGE(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CTL, TVG_STS_EDGE, __x)
+#define DSI_TG_STS_CTL_TBG_STS_EDGE_SHIFT 17
+#define DSI_TG_STS_CTL_TBG_STS_EDGE_MASK 0x00020000
+#define DSI_TG_STS_CTL_TBG_STS_EDGE(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CTL, TBG_STS_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL 0x00000108
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EN_SHIFT 6
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EN_MASK 0x00000040
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EN_SHIFT 7
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EN_MASK 0x00000080
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EN_SHIFT 8
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EN_MASK 0x00000100
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EN_SHIFT 9
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EN_MASK 0x00000200
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EN_SHIFT 10
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EN_MASK 0x00000400
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EN_SHIFT 11
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EN_MASK 0x00000800
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EN_SHIFT 12
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EN_MASK 0x00001000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EN_SHIFT 13
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EN_MASK 0x00002000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EN_SHIFT 14
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EN_MASK 0x00004000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EN_SHIFT 15
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EN_MASK 0x00008000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EDGE_SHIFT 22
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EDGE_MASK 0x00400000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EDGE_SHIFT 23
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EDGE_MASK 0x00800000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_2_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EDGE_SHIFT 24
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EDGE_MASK 0x01000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EDGE_SHIFT 25
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EDGE_MASK 0x02000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_2_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EDGE_SHIFT 26
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EDGE_MASK 0x04000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EDGE_SHIFT 27
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EDGE_MASK 0x08000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_2_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EDGE_SHIFT 28
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EDGE_MASK 0x10000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EDGE_SHIFT 29
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EDGE_MASK 0x20000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_2_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EDGE_SHIFT 30
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EDGE_MASK 0x40000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EDGE_SHIFT 31
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EDGE_MASK 0x80000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_2_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CLR 0x00000110
+#define DSI_MCTL_MAIN_STS_CLR_PLL_LOCK_CLR_SHIFT 0
+#define DSI_MCTL_MAIN_STS_CLR_PLL_LOCK_CLR_MASK 0x00000001
+#define DSI_MCTL_MAIN_STS_CLR_PLL_LOCK_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, PLL_LOCK_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_CLKLANE_READY_CLR_SHIFT 1
+#define DSI_MCTL_MAIN_STS_CLR_CLKLANE_READY_CLR_MASK 0x00000002
+#define DSI_MCTL_MAIN_STS_CLR_CLKLANE_READY_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, CLKLANE_READY_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_DAT1_READY_CLR_SHIFT 2
+#define DSI_MCTL_MAIN_STS_CLR_DAT1_READY_CLR_MASK 0x00000004
+#define DSI_MCTL_MAIN_STS_CLR_DAT1_READY_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, DAT1_READY_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_DAT2_READY_CLR_SHIFT 3
+#define DSI_MCTL_MAIN_STS_CLR_DAT2_READY_CLR_MASK 0x00000008
+#define DSI_MCTL_MAIN_STS_CLR_DAT2_READY_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, DAT2_READY_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_HSTX_TO_ERR_CLR_SHIFT 4
+#define DSI_MCTL_MAIN_STS_CLR_HSTX_TO_ERR_CLR_MASK 0x00000010
+#define DSI_MCTL_MAIN_STS_CLR_HSTX_TO_ERR_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, HSTX_TO_ERR_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_LPRX_TO_ERR_CLR_SHIFT 5
+#define DSI_MCTL_MAIN_STS_CLR_LPRX_TO_ERR_CLR_MASK 0x00000020
+#define DSI_MCTL_MAIN_STS_CLR_LPRX_TO_ERR_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, LPRX_TO_ERR_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_CRS_UNTERM_PCK_CLR_SHIFT 6
+#define DSI_MCTL_MAIN_STS_CLR_CRS_UNTERM_PCK_CLR_MASK 0x00000040
+#define DSI_MCTL_MAIN_STS_CLR_CRS_UNTERM_PCK_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, CRS_UNTERM_PCK_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_VRS_UNTERM_PCK_CLR_SHIFT 7
+#define DSI_MCTL_MAIN_STS_CLR_VRS_UNTERM_PCK_CLR_MASK 0x00000080
+#define DSI_MCTL_MAIN_STS_CLR_VRS_UNTERM_PCK_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, VRS_UNTERM_PCK_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR 0x00000114
+#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR_SHIFT 0
+#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR_MASK 0x00000001
+#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_NO_TE_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR_SHIFT 1
+#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR_MASK 0x00000002
+#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_TE_MISS_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR_SHIFT 2
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR_MASK 0x00000004
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_SDI1_UNDERRUN_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR_SHIFT 3
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR_MASK 0x00000008
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_SDI2_UNDERRUN_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR_SHIFT 4
+#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR_MASK 0x00000010
+#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_UNWANTED_RD_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR_SHIFT 5
+#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR_MASK 0x00000020
+#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, CSM_RUNNING_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR 0x00000118
+#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR_SHIFT 0
+#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR_MASK 0x00000001
+#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, CMD_TRANSMISSION_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR_SHIFT 1
+#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR_MASK 0x00000002
+#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, WRITE_COMPLETED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR_SHIFT 2
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR_MASK 0x00000004
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, TRIGGER_COMPLETED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR_SHIFT 3
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR_MASK 0x00000008
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, READ_COMPLETED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR_SHIFT 4
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR_MASK 0x00000010
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, ACKNOWLEDGE_RECEIVED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR_SHIFT 5
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR_MASK 0x00000020
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR_SHIFT 6
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR_MASK 0x00000040
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, TRIGGER_RECEIVED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR_SHIFT 7
+#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR_MASK 0x00000080
+#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, TE_RECEIVED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR_SHIFT 8
+#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR_MASK 0x00000100
+#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, BTA_COMPLETED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR_SHIFT 9
+#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR_MASK 0x00000200
+#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, BTA_FINISHED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR_SHIFT 10
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR_MASK 0x00000400
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, READ_COMPLETED_WITH_ERR_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR 0x0000011C
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_FIXED_CLR_SHIFT 0
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_FIXED_CLR_MASK 0x00000001
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_FIXED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_FIXED_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNCORRECTABLE_CLR_SHIFT 1
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNCORRECTABLE_CLR_MASK 0x00000002
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNCORRECTABLE_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_UNCORRECTABLE_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_CHECKSUM_CLR_SHIFT 2
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_CHECKSUM_CLR_MASK 0x00000004
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_CHECKSUM_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_CHECKSUM_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNDECODABLE_CLR_SHIFT 3
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNDECODABLE_CLR_MASK 0x00000008
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNDECODABLE_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_UNDECODABLE_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_RECEIVE_CLR_SHIFT 4
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_RECEIVE_CLR_MASK 0x00000010
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_RECEIVE_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_RECEIVE_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_OVERSIZE_CLR_SHIFT 5
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_OVERSIZE_CLR_MASK 0x00000020
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_OVERSIZE_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_OVERSIZE_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_WRONG_LENGTH_CLR_SHIFT 6
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_WRONG_LENGTH_CLR_MASK 0x00000040
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_WRONG_LENGTH_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_WRONG_LENGTH_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_MISSING_EOT_CLR_SHIFT 7
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_MISSING_EOT_CLR_MASK 0x00000080
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_MISSING_EOT_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_MISSING_EOT_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_EOT_WITH_ERR_CLR_SHIFT 8
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_EOT_WITH_ERR_CLR_MASK 0x00000100
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_EOT_WITH_ERR_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_EOT_WITH_ERR_CLR, __x)
+#define DSI_VID_MODE_STS_CLR 0x00000120
+#define DSI_VID_MODE_STS_CLR_VSG_STS_CLR_SHIFT 0
+#define DSI_VID_MODE_STS_CLR_VSG_STS_CLR_MASK 0x00000001
+#define DSI_VID_MODE_STS_CLR_VSG_STS_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, VSG_STS_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_DATA_CLR_SHIFT 1
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_DATA_CLR_MASK 0x00000002
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_DATA_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_MISSING_DATA_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_HSYNC_CLR_SHIFT 2
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_HSYNC_CLR_MASK 0x00000004
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_HSYNC_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_MISSING_HSYNC_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_VSYNC_CLR_SHIFT 3
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_VSYNC_CLR_MASK 0x00000008
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_VSYNC_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_MISSING_VSYNC_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_LENGTH_CLR_SHIFT 4
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_LENGTH_CLR_MASK 0x00000010
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_LENGTH_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, REG_ERR_SMALL_LENGTH_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_HEIGHT_CLR_SHIFT 5
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_HEIGHT_CLR_MASK 0x00000020
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_HEIGHT_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, REG_ERR_SMALL_HEIGHT_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_BURSTWRITE_CLR_SHIFT 6
+#define DSI_VID_MODE_STS_CLR_ERR_BURSTWRITE_CLR_MASK 0x00000040
+#define DSI_VID_MODE_STS_CLR_ERR_BURSTWRITE_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_BURSTWRITE_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_LONGWRITE_CLR_SHIFT 7
+#define DSI_VID_MODE_STS_CLR_ERR_LONGWRITE_CLR_MASK 0x00000080
+#define DSI_VID_MODE_STS_CLR_ERR_LONGWRITE_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_LONGWRITE_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_LONGREAD_CLR_SHIFT 8
+#define DSI_VID_MODE_STS_CLR_ERR_LONGREAD_CLR_MASK 0x00000100
+#define DSI_VID_MODE_STS_CLR_ERR_LONGREAD_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_LONGREAD_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_VRS_WRONG_LENGTH_CLR_SHIFT 9
+#define DSI_VID_MODE_STS_CLR_ERR_VRS_WRONG_LENGTH_CLR_MASK 0x00000200
+#define DSI_VID_MODE_STS_CLR_ERR_VRS_WRONG_LENGTH_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_VRS_WRONG_LENGTH_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_VSG_RECOVERY_CLR_SHIFT 10
+#define DSI_VID_MODE_STS_CLR_VSG_RECOVERY_CLR_MASK 0x00000400
+#define DSI_VID_MODE_STS_CLR_VSG_RECOVERY_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, VSG_RECOVERY_CLR, __x)
+#define DSI_TG_STS_CLR 0x00000124
+#define DSI_TG_STS_CLR_TVG_STS_CLR_SHIFT 0
+#define DSI_TG_STS_CLR_TVG_STS_CLR_MASK 0x00000001
+#define DSI_TG_STS_CLR_TVG_STS_CLR(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CLR, TVG_STS_CLR, __x)
+#define DSI_TG_STS_CLR_TBG_STS_CLR_SHIFT 1
+#define DSI_TG_STS_CLR_TBG_STS_CLR_MASK 0x00000002
+#define DSI_TG_STS_CLR_TBG_STS_CLR(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CLR, TBG_STS_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR 0x00000128
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_1_CLR_SHIFT 6
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_1_CLR_MASK 0x00000040
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_ESC_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_2_CLR_SHIFT 7
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_2_CLR_MASK 0x00000080
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_ESC_2_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_1_CLR_SHIFT 8
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_1_CLR_MASK 0x00000100
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_SYNCESC_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_2_CLR_SHIFT 9
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_2_CLR_MASK 0x00000200
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_SYNCESC_2_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_1_CLR_SHIFT 10
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_1_CLR_MASK 0x00000400
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONTROL_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_2_CLR_SHIFT 11
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_2_CLR_MASK 0x00000800
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONTROL_2_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_1_CLR_SHIFT 12
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_1_CLR_MASK 0x00001000
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP0_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_2_CLR_SHIFT 13
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_2_CLR_MASK 0x00002000
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP0_2_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_1_CLR_SHIFT 14
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_1_CLR_MASK 0x00004000
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP1_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_2_CLR_SHIFT 15
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_2_CLR_MASK 0x00008000
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP1_2_CLR, __x)
+#define DSI_MCTL_MAIN_STS_FLAG 0x00000130
+#define DSI_MCTL_MAIN_STS_FLAG_PLL_LOCK_FLAG_SHIFT 0
+#define DSI_MCTL_MAIN_STS_FLAG_PLL_LOCK_FLAG_MASK 0x00000001
+#define DSI_MCTL_MAIN_STS_FLAG_PLL_LOCK_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, PLL_LOCK_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_CLKLANE_READY_FLAG_SHIFT 1
+#define DSI_MCTL_MAIN_STS_FLAG_CLKLANE_READY_FLAG_MASK 0x00000002
+#define DSI_MCTL_MAIN_STS_FLAG_CLKLANE_READY_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, CLKLANE_READY_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_DAT1_READY_FLAG_SHIFT 2
+#define DSI_MCTL_MAIN_STS_FLAG_DAT1_READY_FLAG_MASK 0x00000004
+#define DSI_MCTL_MAIN_STS_FLAG_DAT1_READY_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, DAT1_READY_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_DAT2_READY_FLAG_SHIFT 3
+#define DSI_MCTL_MAIN_STS_FLAG_DAT2_READY_FLAG_MASK 0x00000008
+#define DSI_MCTL_MAIN_STS_FLAG_DAT2_READY_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, DAT2_READY_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_HSTX_TO_ERR_FLAG_SHIFT 4
+#define DSI_MCTL_MAIN_STS_FLAG_HSTX_TO_ERR_FLAG_MASK 0x00000010
+#define DSI_MCTL_MAIN_STS_FLAG_HSTX_TO_ERR_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, HSTX_TO_ERR_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_LPRX_TO_ERR_FLAG_SHIFT 5
+#define DSI_MCTL_MAIN_STS_FLAG_LPRX_TO_ERR_FLAG_MASK 0x00000020
+#define DSI_MCTL_MAIN_STS_FLAG_LPRX_TO_ERR_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, LPRX_TO_ERR_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_CRS_UNTERM_PCK_FLAG_SHIFT 6
+#define DSI_MCTL_MAIN_STS_FLAG_CRS_UNTERM_PCK_FLAG_MASK 0x00000040
+#define DSI_MCTL_MAIN_STS_FLAG_CRS_UNTERM_PCK_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, CRS_UNTERM_PCK_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_VRS_UNTERM_PCK_FLAG_SHIFT 7
+#define DSI_MCTL_MAIN_STS_FLAG_VRS_UNTERM_PCK_FLAG_MASK 0x00000080
+#define DSI_MCTL_MAIN_STS_FLAG_VRS_UNTERM_PCK_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, VRS_UNTERM_PCK_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG 0x00000134
+#define DSI_CMD_MODE_STS_FLAG_ERR_NO_TE_FLAG_SHIFT 0
+#define DSI_CMD_MODE_STS_FLAG_ERR_NO_TE_FLAG_MASK 0x00000001
+#define DSI_CMD_MODE_STS_FLAG_ERR_NO_TE_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_NO_TE_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_ERR_TE_MISS_FLAG_SHIFT 1
+#define DSI_CMD_MODE_STS_FLAG_ERR_TE_MISS_FLAG_MASK 0x00000002
+#define DSI_CMD_MODE_STS_FLAG_ERR_TE_MISS_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_TE_MISS_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI1_UNDERRUN_FLAG_SHIFT 2
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI1_UNDERRUN_FLAG_MASK 0x00000004
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI1_UNDERRUN_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_SDI1_UNDERRUN_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI2_UNDERRUN_FLAG_SHIFT 3
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI2_UNDERRUN_FLAG_MASK 0x00000008
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI2_UNDERRUN_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_SDI2_UNDERRUN_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_ERR_UNWANTED_RD_FLAG_SHIFT 4
+#define DSI_CMD_MODE_STS_FLAG_ERR_UNWANTED_RD_FLAG_MASK 0x00000010
+#define DSI_CMD_MODE_STS_FLAG_ERR_UNWANTED_RD_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_UNWANTED_RD_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_CSM_RUNNING_FLAG_SHIFT 5
+#define DSI_CMD_MODE_STS_FLAG_CSM_RUNNING_FLAG_MASK 0x00000020
+#define DSI_CMD_MODE_STS_FLAG_CSM_RUNNING_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, CSM_RUNNING_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG 0x00000138
+#define DSI_DIRECT_CMD_STS_FLAG_CMD_TRANSMISSION_FLAG_SHIFT 0
+#define DSI_DIRECT_CMD_STS_FLAG_CMD_TRANSMISSION_FLAG_MASK 0x00000001
+#define DSI_DIRECT_CMD_STS_FLAG_CMD_TRANSMISSION_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, CMD_TRANSMISSION_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_WRITE_COMPLETED_FLAG_SHIFT 1
+#define DSI_DIRECT_CMD_STS_FLAG_WRITE_COMPLETED_FLAG_MASK 0x00000002
+#define DSI_DIRECT_CMD_STS_FLAG_WRITE_COMPLETED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, WRITE_COMPLETED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_COMPLETED_FLAG_SHIFT 2
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_COMPLETED_FLAG_MASK 0x00000004
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_COMPLETED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, TRIGGER_COMPLETED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_FLAG_SHIFT 3
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_FLAG_MASK 0x00000008
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, READ_COMPLETED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_RECEIVED_FLAG_SHIFT 4
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_RECEIVED_FLAG_MASK 0x00000010
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_RECEIVED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, ACKNOWLEDGE_RECEIVED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG_SHIFT 5
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG_MASK 0x00000020
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_RECEIVED_FLAG_SHIFT 6
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_RECEIVED_FLAG_MASK 0x00000040
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_RECEIVED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, TRIGGER_RECEIVED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_TE_RECEIVED_FLAG_SHIFT 7
+#define DSI_DIRECT_CMD_STS_FLAG_TE_RECEIVED_FLAG_MASK 0x00000080
+#define DSI_DIRECT_CMD_STS_FLAG_TE_RECEIVED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, TE_RECEIVED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_COMPLETED_FLAG_SHIFT 8
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_COMPLETED_FLAG_MASK 0x00000100
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_COMPLETED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, BTA_COMPLETED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_FINISHED_FLAG_SHIFT 9
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_FINISHED_FLAG_MASK 0x00000200
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_FINISHED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, BTA_FINISHED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_WITH_ERR_FLAG_SHIFT 10
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_WITH_ERR_FLAG_MASK 0x00000400
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_WITH_ERR_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, READ_COMPLETED_WITH_ERR_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG 0x0000013C
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_FIXED_FLAG_SHIFT 0
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_FIXED_FLAG_MASK 0x00000001
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_FIXED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_FIXED_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNCORRECTABLE_FLAG_SHIFT 1
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNCORRECTABLE_FLAG_MASK 0x00000002
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNCORRECTABLE_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_UNCORRECTABLE_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_CHECKSUM_FLAG_SHIFT 2
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_CHECKSUM_FLAG_MASK 0x00000004
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_CHECKSUM_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_CHECKSUM_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNDECODABLE_FLAG_SHIFT 3
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNDECODABLE_FLAG_MASK 0x00000008
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNDECODABLE_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_UNDECODABLE_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_RECEIVE_FLAG_SHIFT 4
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_RECEIVE_FLAG_MASK 0x00000010
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_RECEIVE_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_RECEIVE_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_OVERSIZE_FLAG_SHIFT 5
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_OVERSIZE_FLAG_MASK 0x00000020
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_OVERSIZE_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_OVERSIZE_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_WRONG_LENGTH_FLAG_SHIFT 6
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_WRONG_LENGTH_FLAG_MASK 0x00000040
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_WRONG_LENGTH_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_WRONG_LENGTH_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_MISSING_EOT_FLAG_SHIFT 7
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_MISSING_EOT_FLAG_MASK 0x00000080
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_MISSING_EOT_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_MISSING_EOT_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_EOT_WITH_ERR_FLAG_SHIFT 8
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_EOT_WITH_ERR_FLAG_MASK 0x00000100
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_EOT_WITH_ERR_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_EOT_WITH_ERR_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG 0x00000140
+#define DSI_VID_MODE_STS_FLAG_VSG_STS_FLAG_SHIFT 0
+#define DSI_VID_MODE_STS_FLAG_VSG_STS_FLAG_MASK 0x00000001
+#define DSI_VID_MODE_STS_FLAG_VSG_STS_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, VSG_STS_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_DATA_FLAG_SHIFT 1
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_DATA_FLAG_MASK 0x00000002
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_DATA_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_MISSING_DATA_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_HSYNC_FLAG_SHIFT 2
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_HSYNC_FLAG_MASK 0x00000004
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_HSYNC_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_MISSING_HSYNC_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_VSYNC_FLAG_SHIFT 3
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_VSYNC_FLAG_MASK 0x00000008
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_VSYNC_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_MISSING_VSYNC_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_LENGTH_FLAG_SHIFT 4
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_LENGTH_FLAG_MASK 0x00000010
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_LENGTH_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, REG_ERR_SMALL_LENGTH_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_HEIGHT_FLAG_SHIFT 5
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_HEIGHT_FLAG_MASK 0x00000020
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_HEIGHT_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, REG_ERR_SMALL_HEIGHT_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_BURSTWRITE_FLAG_SHIFT 6
+#define DSI_VID_MODE_STS_FLAG_ERR_BURSTWRITE_FLAG_MASK 0x00000040
+#define DSI_VID_MODE_STS_FLAG_ERR_BURSTWRITE_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_BURSTWRITE_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGWRITE_FLAG_SHIFT 7
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGWRITE_FLAG_MASK 0x00000080
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGWRITE_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_LONGWRITE_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGREAD_FLAG_SHIFT 8
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGREAD_FLAG_MASK 0x00000100
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGREAD_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_LONGREAD_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_VRS_WRONG_LENGTH_FLAG_SHIFT 9
+#define DSI_VID_MODE_STS_FLAG_ERR_VRS_WRONG_LENGTH_FLAG_MASK 0x00000200
+#define DSI_VID_MODE_STS_FLAG_ERR_VRS_WRONG_LENGTH_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_VRS_WRONG_LENGTH_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_VSG_RECOVERY_FLAG_SHIFT 10
+#define DSI_VID_MODE_STS_FLAG_VSG_RECOVERY_FLAG_MASK 0x00000400
+#define DSI_VID_MODE_STS_FLAG_VSG_RECOVERY_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, VSG_RECOVERY_FLAG, __x)
+#define DSI_TG_STS_FLAG 0x00000144
+#define DSI_TG_STS_FLAG_TVG_STS_FLAG_SHIFT 0
+#define DSI_TG_STS_FLAG_TVG_STS_FLAG_MASK 0x00000001
+#define DSI_TG_STS_FLAG_TVG_STS_FLAG(__x) \
+ DSI_VAL2REG(DSI_TG_STS_FLAG, TVG_STS_FLAG, __x)
+#define DSI_TG_STS_FLAG_TBG_STS_FLAG_SHIFT 1
+#define DSI_TG_STS_FLAG_TBG_STS_FLAG_MASK 0x00000002
+#define DSI_TG_STS_FLAG_TBG_STS_FLAG(__x) \
+ DSI_VAL2REG(DSI_TG_STS_FLAG, TBG_STS_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG 0x00000148
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_1_FLAG_SHIFT 6
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_1_FLAG_MASK 0x00000040
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_ESC_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_2_FLAG_SHIFT 7
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_2_FLAG_MASK 0x00000080
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_ESC_2_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_1_FLAG_SHIFT 8
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_1_FLAG_MASK 0x00000100
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_SYNCESC_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_2_FLAG_SHIFT 9
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_2_FLAG_MASK 0x00000200
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_SYNCESC_2_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_1_FLAG_SHIFT 10
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_1_FLAG_MASK 0x00000400
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONTROL_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_2_FLAG_SHIFT 11
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_2_FLAG_MASK 0x00000800
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONTROL_2_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_1_FLAG_SHIFT 12
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_1_FLAG_MASK 0x00001000
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP0_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_2_FLAG_SHIFT 13
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_2_FLAG_MASK 0x00002000
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP0_2_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_1_FLAG_SHIFT 14
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_1_FLAG_MASK 0x00004000
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP1_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_2_FLAG_SHIFT 15
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_2_FLAG_MASK 0x00008000
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP1_2_FLAG, __x)
+#define DSI_DPHY_LANES_TRIM 0x00000150
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1_SHIFT 0
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1_MASK 0x00000003
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SKEW_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1_SHIFT 2
+#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1_MASK 0x00000004
+#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_CD_OFF_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1_SHIFT 3
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1_MASK 0x00000008
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_UP_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1_SHIFT 4
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1_MASK 0x00000010
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_DOWN_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1_SHIFT 5
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1_MASK 0x00000020
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_TEST_RESERVED_1_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK_SHIFT 6
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK_MASK 0x000000C0
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SKEW_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK_SHIFT 8
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK_MASK 0x00000300
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_LP_RX_VIL_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK_SHIFT 10
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK_MASK 0x00000C00
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_LP_TX_SLEWRATE_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_SHIFT 12
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_MASK 0x00001000
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_0_81 0
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_0_90 1
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_ENUM(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SPECS_90_81B, \
+ DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_##__x)
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SPECS_90_81B, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK_SHIFT 13
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK_MASK 0x00002000
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_UP_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK_SHIFT 14
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK_MASK 0x00004000
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_DOWN_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK_SHIFT 15
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK_MASK 0x00008000
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_TEST_RESERVED_1_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2_SHIFT 16
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2_MASK 0x00030000
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SKEW_DAT2, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2_SHIFT 18
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2_MASK 0x00040000
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_UP_DAT2, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2_SHIFT 19
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2_MASK 0x00080000
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_DOWN_DAT2, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2_SHIFT 20
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2_MASK 0x00100000
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_TEST_RESERVED_1_DAT2, __x)
+#define DSI_ID_REG 0x00000FF0
+#define DSI_ID_REG_Y_SHIFT 0
+#define DSI_ID_REG_Y_MASK 0x0000000F
+#define DSI_ID_REG_Y(__x) \
+ DSI_VAL2REG(DSI_ID_REG, Y, __x)
+#define DSI_ID_REG_X_SHIFT 4
+#define DSI_ID_REG_X_MASK 0x000000F0
+#define DSI_ID_REG_X(__x) \
+ DSI_VAL2REG(DSI_ID_REG, X, __x)
+#define DSI_ID_REG_H_SHIFT 8
+#define DSI_ID_REG_H_MASK 0x00000300
+#define DSI_ID_REG_H(__x) \
+ DSI_VAL2REG(DSI_ID_REG, H, __x)
+#define DSI_ID_REG_PRODUCT_ID_SHIFT 10
+#define DSI_ID_REG_PRODUCT_ID_MASK 0x0003FC00
+#define DSI_ID_REG_PRODUCT_ID(__x) \
+ DSI_VAL2REG(DSI_ID_REG, PRODUCT_ID, __x)
+#define DSI_ID_REG_VENDOR_ID_SHIFT 18
+#define DSI_ID_REG_VENDOR_ID_MASK 0xFFFC0000
+#define DSI_ID_REG_VENDOR_ID(__x) \
+ DSI_VAL2REG(DSI_ID_REG, VENDOR_ID, __x)
+#define DSI_IP_CONF 0x00000FF4
+#define DSI_IP_CONF_FIFO_SIZE_SHIFT 0
+#define DSI_IP_CONF_FIFO_SIZE_MASK 0x0000003F
+#define DSI_IP_CONF_FIFO_SIZE(__x) \
+ DSI_VAL2REG(DSI_IP_CONF, FIFO_SIZE, __x)
diff --git a/drivers/video/mcde/mcde_bus.c b/drivers/video/mcde/mcde_bus.c
new file mode 100644
index 00000000000..bdcf65b0fb9
--- /dev/null
+++ b/drivers/video/mcde/mcde_bus.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE display bus driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/notifier.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_dss.h>
+
+#define to_mcde_display_driver(__drv) \
+ container_of((__drv), struct mcde_display_driver, driver)
+
+static BLOCKING_NOTIFIER_HEAD(bus_notifier_list);
+
+static int mcde_drv_suspend(struct device *_dev, pm_message_t state);
+static int mcde_drv_resume(struct device *_dev);
+struct bus_type mcde_bus_type;
+
+static int mcde_suspend_device(struct device *dev, void *data)
+{
+ pm_message_t* state = (pm_message_t *) data;
+ if (dev->driver && dev->driver->suspend)
+ return dev->driver->suspend(dev, *state);
+ return 0;
+}
+
+static int mcde_resume_device(struct device *dev, void *data)
+{
+ if (dev->driver && dev->driver->resume)
+ return dev->driver->resume(dev);
+ return 0;
+}
+
+/* Bus driver */
+
+static int mcde_bus_match(struct device *_dev, struct device_driver *driver)
+{
+ pr_debug("Matching device %s with driver %s\n",
+ dev_name(_dev), driver->name);
+
+ return strncmp(dev_name(_dev), driver->name, strlen(driver->name)) == 0;
+}
+
+static int mcde_bus_suspend(struct device *_dev, pm_message_t state)
+{
+ int ret;
+ ret = bus_for_each_dev(&mcde_bus_type, NULL, &state,
+ mcde_suspend_device);
+ if (ret) {
+ /* TODO Resume all suspended devices */
+ /* mcde_bus_resume(dev); */
+ return ret;
+ }
+ return 0;
+}
+
+static int mcde_bus_resume(struct device *_dev)
+{
+ return bus_for_each_dev(&mcde_bus_type, NULL, NULL, mcde_resume_device);
+}
+
+struct bus_type mcde_bus_type = {
+ .name = "mcde_bus",
+ .match = mcde_bus_match,
+ .suspend = mcde_bus_suspend,
+ .resume = mcde_bus_resume,
+};
+
+static int mcde_drv_probe(struct device *_dev)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ return drv->probe(dev);
+}
+
+static int mcde_drv_remove(struct device *_dev)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ return drv->remove(dev);
+}
+
+static void mcde_drv_shutdown(struct device *_dev)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ drv->shutdown(dev);
+}
+
+static int mcde_drv_suspend(struct device *_dev, pm_message_t state)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ if (drv->suspend)
+ return drv->suspend(dev, state);
+ else
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ return dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF);
+#else
+ return 0;
+#endif
+}
+
+static int mcde_drv_resume(struct device *_dev)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ if (drv->resume)
+ return drv->resume(dev);
+ else
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ return dev->set_power_mode(dev, MCDE_DISPLAY_PM_STANDBY);
+#else
+ return 0;
+#endif
+}
+
+/* Bus device */
+
+static void mcde_bus_release(struct device *dev)
+{
+}
+
+struct device mcde_bus = {
+ .init_name = "mcde_bus",
+ .release = mcde_bus_release
+};
+
+/* Public bus API */
+
+int mcde_display_driver_register(struct mcde_display_driver *drv)
+{
+ drv->driver.bus = &mcde_bus_type;
+ if (drv->probe)
+ drv->driver.probe = mcde_drv_probe;
+ if (drv->remove)
+ drv->driver.remove = mcde_drv_remove;
+ if (drv->shutdown)
+ drv->driver.shutdown = mcde_drv_shutdown;
+ drv->driver.suspend = mcde_drv_suspend;
+ drv->driver.resume = mcde_drv_resume;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(mcde_display_driver_register);
+
+void mcde_display_driver_unregister(struct mcde_display_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(mcde_display_driver_unregister);
+
+static void mcde_display_dev_release(struct device *dev)
+{
+ /* Do nothing */
+}
+
+int mcde_display_device_register(struct mcde_display_device *dev)
+{
+ /* Setup device */
+ if (!dev)
+ return -EINVAL;
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ dev->dev.bus = &mcde_bus_type;
+ if (dev->dev.parent != NULL)
+ dev->dev.parent = &mcde_bus;
+ dev->dev.release = mcde_display_dev_release;
+ if (dev->id != -1)
+ dev_set_name(&dev->dev, "%s.%d", dev->name, dev->id);
+ else
+ dev_set_name(&dev->dev, dev->name);
+
+ mcde_display_init_device(dev);
+
+ return device_register(&dev->dev);
+}
+EXPORT_SYMBOL(mcde_display_device_register);
+
+void mcde_display_device_unregister(struct mcde_display_device *dev)
+{
+ device_unregister(&dev->dev);
+}
+EXPORT_SYMBOL(mcde_display_device_unregister);
+
+/* Notifications */
+int mcde_dss_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&bus_notifier_list, nb);
+}
+EXPORT_SYMBOL(mcde_dss_register_notifier);
+
+int mcde_dss_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&bus_notifier_list, nb);
+}
+EXPORT_SYMBOL(mcde_dss_unregister_notifier);
+
+static int bus_notify_callback(struct notifier_block *nb,
+ unsigned long event, void *dev)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+
+ if (event == BUS_NOTIFY_BOUND_DRIVER) {
+ ddev->initialized = true;
+ blocking_notifier_call_chain(&bus_notifier_list,
+ MCDE_DSS_EVENT_DISPLAY_REGISTERED, ddev);
+ } else if (event == BUS_NOTIFY_UNBIND_DRIVER) {
+ ddev->initialized = false;
+ blocking_notifier_call_chain(&bus_notifier_list,
+ MCDE_DSS_EVENT_DISPLAY_UNREGISTERED, ddev);
+ }
+ return 0;
+}
+
+struct notifier_block bus_nb = {
+ .notifier_call = bus_notify_callback,
+};
+
+/* Driver init/exit */
+
+int __init mcde_display_init(void)
+{
+ int ret;
+
+ ret = bus_register(&mcde_bus_type);
+ if (ret) {
+ pr_warning("Unable to register bus type\n");
+ goto no_bus_registration;
+ }
+ ret = device_register(&mcde_bus);
+ if (ret) {
+ pr_warning("Unable to register bus device\n");
+ goto no_device_registration;
+ }
+ ret = bus_register_notifier(&mcde_bus_type, &bus_nb);
+ if (ret) {
+ pr_warning("Unable to register bus notifier\n");
+ goto no_bus_notifier;
+ }
+
+ goto out;
+
+no_bus_notifier:
+ device_unregister(&mcde_bus);
+no_device_registration:
+ bus_unregister(&mcde_bus_type);
+no_bus_registration:
+out:
+ return ret;
+}
+
+void mcde_display_exit(void)
+{
+ bus_unregister_notifier(&mcde_bus_type, &bus_nb);
+ device_unregister(&mcde_bus);
+ bus_unregister(&mcde_bus_type);
+}
diff --git a/drivers/video/mcde/mcde_debugfs.c b/drivers/video/mcde/mcde_debugfs.c
new file mode 100644
index 00000000000..586b1787d00
--- /dev/null
+++ b/drivers/video/mcde/mcde_debugfs.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE base driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+
+#include "mcde_debugfs.h"
+
+#define MAX_NUM_OVERLAYS 2
+#define MAX_NUM_CHANNELS 4
+#define DEFAULT_DMESG_FPS_LOG_INTERVAL 100
+
+struct fps_info {
+ u32 enable_dmesg;
+ u32 interval_ms;
+ struct timespec timestamp_last;
+ u32 frame_counter_last;
+ u32 frame_counter;
+ u32 fpks;
+};
+
+struct overlay_info {
+ u8 id;
+ struct dentry *dentry;
+ struct fps_info fps;
+};
+
+struct channel_info {
+ u8 id;
+ struct dentry *dentry;
+ struct mcde_chnl_state *chnl;
+ struct fps_info fps;
+ struct overlay_info overlays[MAX_NUM_OVERLAYS];
+};
+
+static struct mcde_info {
+ struct device *dev;
+ struct dentry *dentry;
+ struct channel_info channels[MAX_NUM_CHANNELS];
+} mcde;
+
+/* Requires: lhs > rhs */
+static inline u32 timespec_ms_diff(struct timespec lhs, struct timespec rhs)
+{
+ struct timespec tmp_ts = timespec_sub(lhs, rhs);
+ u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
+ do_div(tmp_ns, NSEC_PER_MSEC);
+ return (u32)tmp_ns;
+}
+
+/* Returns "frames per 1000 secs", divide by 1000 to get fps with 3 decimals */
+static u32 update_fps(struct fps_info *fps)
+{
+ struct timespec now;
+ u32 fpks = 0, ms_since_last, num_frames;
+
+ getrawmonotonic(&now);
+ fps->frame_counter++;
+
+ ms_since_last = timespec_ms_diff(now, fps->timestamp_last);
+ num_frames = fps->frame_counter - fps->frame_counter_last;
+ if (num_frames > 1 && ms_since_last >= fps->interval_ms) {
+ fpks = (num_frames * 1000000) / ms_since_last;
+ fps->timestamp_last = now;
+ fps->frame_counter_last = fps->frame_counter;
+ fps->fpks = fpks;
+ }
+
+ return fpks;
+}
+
+static void update_chnl_fps(struct channel_info *ci)
+{
+ u32 fpks = update_fps(&ci->fps);
+ if (fpks && ci->fps.enable_dmesg)
+ dev_info(mcde.dev, "FPS: chnl=%d fps=%d.%.3d\n", ci->id,
+ fpks / 1000, fpks % 1000);
+}
+
+static void update_ovly_fps(struct channel_info *ci, struct overlay_info *oi)
+{
+ u32 fpks = update_fps(&oi->fps);
+ if (fpks && oi->fps.enable_dmesg)
+ dev_info(mcde.dev, "FPS: ovly=%d.%d fps=%d.%.3d\n", ci->id,
+ oi->id, fpks / 1000, fpks % 1000);
+}
+
+int mcde_debugfs_create(struct device *dev)
+{
+ if (mcde.dev)
+ return -EBUSY;
+
+ mcde.dentry = debugfs_create_dir("mcde", NULL);
+ if (!mcde.dentry)
+ return -ENOMEM;
+ mcde.dev = dev;
+
+ return 0;
+}
+
+static struct channel_info *find_chnl(u8 chnl_id)
+{
+ if (chnl_id > MAX_NUM_CHANNELS)
+ return NULL;
+ return &mcde.channels[chnl_id];
+}
+
+static struct overlay_info *find_ovly(struct channel_info *ci, u8 ovly_id)
+{
+ if (!ci || ovly_id >= MAX_NUM_OVERLAYS)
+ return NULL;
+ return &ci->overlays[ovly_id];
+}
+
+static void create_fps_files(struct dentry *dentry, struct fps_info *fps)
+{
+ debugfs_create_u32("frame_counter", S_IRUGO, dentry,
+ &fps->frame_counter);
+ debugfs_create_u32("frames_per_ksecs", S_IRUGO, dentry, &fps->fpks);
+ debugfs_create_u32("interval_ms", S_IRUGO|S_IWUGO, dentry,
+ &fps->interval_ms);
+ debugfs_create_u32("dmesg", S_IRUGO|S_IWUGO, dentry,
+ &fps->enable_dmesg);
+}
+
+int mcde_debugfs_channel_create(u8 chnl_id, struct mcde_chnl_state *chnl)
+{
+ struct channel_info *ci = find_chnl(chnl_id);
+ char name[10];
+
+ if (!chnl || !ci)
+ return -EINVAL;
+ if (ci->chnl)
+ return -EBUSY;
+
+ snprintf(name, sizeof(name), "chnl%d", chnl_id);
+ ci->dentry = debugfs_create_dir(name, mcde.dentry);
+ if (!ci->dentry)
+ return -ENOMEM;
+
+ create_fps_files(ci->dentry, &ci->fps);
+
+ ci->fps.interval_ms = DEFAULT_DMESG_FPS_LOG_INTERVAL;
+ ci->id = chnl_id;
+ ci->chnl = chnl;
+
+ return 0;
+}
+
+int mcde_debugfs_overlay_create(u8 chnl_id, u8 ovly_id)
+{
+ struct channel_info *ci = find_chnl(chnl_id);
+ struct overlay_info *oi = find_ovly(ci, ovly_id);
+ char name[10];
+
+ if (!oi || !ci || ovly_id >= MAX_NUM_OVERLAYS)
+ return -EINVAL;
+ if (oi->dentry)
+ return -EBUSY;
+
+ snprintf(name, sizeof(name), "ovly%d", ovly_id);
+ oi->dentry = debugfs_create_dir(name, ci->dentry);
+ if (!oi->dentry)
+ return -ENOMEM;
+
+ create_fps_files(oi->dentry, &oi->fps);
+
+ oi->fps.interval_ms = DEFAULT_DMESG_FPS_LOG_INTERVAL;
+ oi->id = ovly_id;
+
+ return 0;
+}
+
+void mcde_debugfs_channel_update(u8 chnl_id)
+{
+ struct channel_info *ci = find_chnl(chnl_id);
+
+ if (!ci || !ci->chnl)
+ return;
+
+ update_chnl_fps(ci);
+}
+
+void mcde_debugfs_overlay_update(u8 chnl_id, u8 ovly_id)
+{
+ struct channel_info *ci = find_chnl(chnl_id);
+ struct overlay_info *oi = find_ovly(ci, ovly_id);
+
+ if (!oi || !oi->dentry)
+ return;
+
+ update_ovly_fps(ci, oi);
+}
+
diff --git a/drivers/video/mcde/mcde_debugfs.h b/drivers/video/mcde/mcde_debugfs.h
new file mode 100644
index 00000000000..9f1e7f18ea5
--- /dev/null
+++ b/drivers/video/mcde/mcde_debugfs.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE base driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __MCDE_DEBUGFS__H__
+#define __MCDE_DEBUGFS__H__
+
+#include <video/mcde.h>
+
+int mcde_debugfs_create(struct device *dev);
+int mcde_debugfs_channel_create(u8 chnl_id, struct mcde_chnl_state *chnl);
+int mcde_debugfs_overlay_create(u8 chnl_id, u8 ovly_id);
+
+void mcde_debugfs_channel_update(u8 chnl_id);
+void mcde_debugfs_overlay_update(u8 chnl_id, u8 ovly_id);
+
+#endif /* __MCDE_DEBUGFS__H__ */
+
diff --git a/drivers/video/mcde/mcde_display.c b/drivers/video/mcde/mcde_display.c
new file mode 100644
index 00000000000..28409166c7d
--- /dev/null
+++ b/drivers/video/mcde/mcde_display.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include <video/mcde_display.h>
+
+/*temp*/
+#include <linux/delay.h>
+
+static void mcde_display_get_native_resolution_default(
+ struct mcde_display_device *ddev, u16 *x_res, u16 *y_res)
+{
+ if (x_res)
+ *x_res = ddev->native_x_res;
+ if (y_res)
+ *y_res = ddev->native_y_res;
+}
+
+static enum mcde_ovly_pix_fmt mcde_display_get_default_pixel_format_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->default_pixel_format;
+}
+
+static void mcde_display_get_physical_size_default(
+ struct mcde_display_device *ddev, u16 *width, u16 *height)
+{
+ if (width)
+ *width = ddev->physical_width;
+ if (height)
+ *height = ddev->physical_height;
+}
+
+static int mcde_display_set_power_mode_default(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ if (ddev->platform_enable) {
+ ret = ddev->platform_enable(ddev);
+ if (ret)
+ return ret;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ /* force register settings */
+ if (ddev->port->type == MCDE_PORTTYPE_DPI)
+ ddev->update_flags = UPDATE_FLAG_VIDEO_MODE | UPDATE_FLAG_PIXEL_FORMAT;
+ }
+
+ if (ddev->port->type == MCDE_PORTTYPE_DSI) {
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_EXIT_SLEEP_MODE, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_DISPLAY_ON, NULL, 0);
+ if (ret)
+ return ret;
+
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ } else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+ /* ON -> STANDBY */
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_DISPLAY_OFF, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_ENTER_SLEEP_MODE, NULL, 0);
+ if (ret)
+ return ret;
+
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+ } else if (ddev->port->type == MCDE_PORTTYPE_DPI) {
+ ddev->power_mode = power_mode;
+ } else if (ddev->power_mode != power_mode) {
+ return -EINVAL;
+ }
+
+ /* SLEEP -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ if (ddev->platform_disable) {
+ ret = ddev->platform_disable(ddev);
+ if (ret)
+ return ret;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+ mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+
+ return ret;
+}
+
+static inline enum mcde_display_power_mode mcde_display_get_power_mode_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->power_mode;
+}
+
+static inline int mcde_display_try_video_mode_default(
+ struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode)
+{
+ /* TODO Check if inside native_xres and native_yres */
+ return 0;
+}
+
+static int mcde_display_set_video_mode_default(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode)
+{
+ int ret;
+ struct mcde_video_mode channel_video_mode;
+
+ if (!video_mode)
+ return -EINVAL;
+
+ ddev->video_mode = *video_mode;
+ channel_video_mode = ddev->video_mode;
+ /* Dependant on if display should rotate or MCDE should rotate */
+ if (ddev->rotation == MCDE_DISPLAY_ROT_90_CCW ||
+ ddev->rotation == MCDE_DISPLAY_ROT_90_CW) {
+ channel_video_mode.xres = ddev->native_x_res;
+ channel_video_mode.yres = ddev->native_y_res;
+ }
+ ret = mcde_chnl_set_video_mode(ddev->chnl_state, &channel_video_mode);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode\n", __func__);
+ return ret;
+ }
+
+ ddev->update_flags |= UPDATE_FLAG_VIDEO_MODE;
+
+ return 0;
+}
+
+static inline void mcde_display_get_video_mode_default(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ if (video_mode)
+ *video_mode = ddev->video_mode;
+}
+
+static int mcde_display_set_pixel_format_default(
+ struct mcde_display_device *ddev, enum mcde_ovly_pix_fmt format)
+{
+ int ret;
+
+ ddev->pixel_format = format;
+ ret = mcde_chnl_set_pixel_format(ddev->chnl_state,
+ ddev->port->pixel_format);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set pixel format = %d\n",
+ __func__, format);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline enum mcde_ovly_pix_fmt mcde_display_get_pixel_format_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->pixel_format;
+}
+
+
+static int mcde_display_set_rotation_default(struct mcde_display_device *ddev,
+ enum mcde_display_rotation rotation)
+{
+ int ret;
+
+ ret = mcde_chnl_set_rotation(ddev->chnl_state, rotation,
+ ddev->rotbuf1, ddev->rotbuf2);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set rotation = %d\n",
+ __func__, rotation);
+ return ret;
+ }
+
+ if (rotation == MCDE_DISPLAY_ROT_180_CCW) {
+ u8 param = 0x40;
+ (void) mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_ADDRESS_MODE, &param, 1);
+ } else if (ddev->rotation == MCDE_DISPLAY_ROT_180_CCW &&
+ rotation != MCDE_DISPLAY_ROT_180_CCW) {
+ u8 param = 0;
+ (void) mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_ADDRESS_MODE, &param, 1);
+ }
+
+ ddev->rotation = rotation;
+ ddev->update_flags |= UPDATE_FLAG_ROTATION;
+
+ return 0;
+}
+
+static inline enum mcde_display_rotation mcde_display_get_rotation_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->rotation;
+}
+
+static int mcde_display_set_synchronized_update_default(
+ struct mcde_display_device *ddev, bool enable)
+{
+ if (ddev->port->type == MCDE_PORTTYPE_DSI && enable) {
+ int ret;
+ u8 m = 0;
+
+ if (ddev->port->sync_src == MCDE_SYNCSRC_OFF)
+ return -EINVAL;
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_TEAR_ON, &m, 1);
+ if (ret < 0) {
+ dev_warn(&ddev->dev,
+ "%s:Failed to set synchornized update = %d\n",
+ __func__, enable);
+ return ret;
+ }
+ }
+ ddev->synchronized_update = enable;
+ return 0;
+}
+
+static inline bool mcde_display_get_synchronized_update_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->synchronized_update;
+}
+
+static int mcde_display_apply_config_default(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ ret = mcde_chnl_enable_synchronized_update(ddev->chnl_state,
+ ddev->synchronized_update);
+
+ if (ret < 0) {
+ dev_warn(&ddev->dev,
+ "%s:Failed to enable synchronized update\n",
+ __func__);
+ return ret;
+ }
+
+ if (!ddev->update_flags)
+ return 0;
+
+ if (ddev->update_flags & UPDATE_FLAG_VIDEO_MODE)
+ mcde_chnl_stop_flow(ddev->chnl_state);
+
+ ret = mcde_chnl_apply(ddev->chnl_state);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to apply to channel\n",
+ __func__);
+ return ret;
+ }
+ ddev->update_flags = 0;
+ ddev->first_update = true;
+
+ return 0;
+}
+
+static int mcde_display_invalidate_area_default(
+ struct mcde_display_device *ddev,
+ struct mcde_rectangle *area)
+{
+ dev_vdbg(&ddev->dev, "%s\n", __func__);
+ if (area) {
+ /* take union of rects */
+ u16 t;
+ t = min(ddev->update_area.x, area->x);
+ /* note should be > 0 */
+ ddev->update_area.w = max(ddev->update_area.x +
+ ddev->update_area.w,
+ area->x + area->w) - t;
+ ddev->update_area.x = t;
+ t = min(ddev->update_area.y, area->y);
+ ddev->update_area.h = max(ddev->update_area.y +
+ ddev->update_area.h,
+ area->y + area->h) - t;
+ ddev->update_area.y = t;
+ /* TODO: Implement real clipping when partial refresh is
+ activated.*/
+ ddev->update_area.w = min((u16) ddev->video_mode.xres,
+ (u16) ddev->update_area.w);
+ ddev->update_area.h = min((u16) ddev->video_mode.yres,
+ (u16) ddev->update_area.h);
+ } else {
+ ddev->update_area.x = 0;
+ ddev->update_area.y = 0;
+ ddev->update_area.w = ddev->video_mode.xres;
+ ddev->update_area.h = ddev->video_mode.yres;
+ /* Invalidate_area(ddev, NULL) means reset area to empty
+ * rectangle really. After that the rectangle should grow by
+ * taking an union (above). This means that the code should
+ * really look like below, however the code above is a temp fix
+ * for rotation.
+ * TODO: fix
+ * ddev->update_area.x = ddev->video_mode.xres;
+ * ddev->update_area.y = ddev->video_mode.yres;
+ * ddev->update_area.w = 0;
+ * ddev->update_area.h = 0;
+ */
+ }
+
+ return 0;
+}
+
+static int mcde_display_update_default(struct mcde_display_device *ddev,
+ bool tripple_buffer)
+{
+ int ret = 0;
+
+ ret = mcde_chnl_update(ddev->chnl_state, &ddev->update_area,
+ tripple_buffer);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to update channel\n", __func__);
+ return ret;
+ }
+ if (ddev->first_update && ddev->on_first_update)
+ ddev->on_first_update(ddev);
+
+ if (ddev->power_mode != MCDE_DISPLAY_PM_ON && ddev->set_power_mode) {
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_ON);
+ if (ret < 0) {
+ dev_warn(&ddev->dev,
+ "%s:Failed to set power mode to on\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ dev_vdbg(&ddev->dev, "Overlay updated, chnl=%d\n", ddev->chnl_id);
+
+ return 0;
+}
+
+static inline int mcde_display_on_first_update_default(
+ struct mcde_display_device *ddev)
+{
+ ddev->first_update = false;
+ return 0;
+}
+
+void mcde_display_init_device(struct mcde_display_device *ddev)
+{
+ /* Setup default callbacks */
+ ddev->get_native_resolution =
+ mcde_display_get_native_resolution_default;
+ ddev->get_default_pixel_format =
+ mcde_display_get_default_pixel_format_default;
+ ddev->get_physical_size = mcde_display_get_physical_size_default;
+ ddev->set_power_mode = mcde_display_set_power_mode_default;
+ ddev->get_power_mode = mcde_display_get_power_mode_default;
+ ddev->try_video_mode = mcde_display_try_video_mode_default;
+ ddev->set_video_mode = mcde_display_set_video_mode_default;
+ ddev->get_video_mode = mcde_display_get_video_mode_default;
+ ddev->set_pixel_format = mcde_display_set_pixel_format_default;
+ ddev->get_pixel_format = mcde_display_get_pixel_format_default;
+ ddev->set_rotation = mcde_display_set_rotation_default;
+ ddev->get_rotation = mcde_display_get_rotation_default;
+ ddev->set_synchronized_update =
+ mcde_display_set_synchronized_update_default;
+ ddev->get_synchronized_update =
+ mcde_display_get_synchronized_update_default;
+ ddev->apply_config = mcde_display_apply_config_default;
+ ddev->invalidate_area = mcde_display_invalidate_area_default;
+ ddev->update = mcde_display_update_default;
+ ddev->on_first_update = mcde_display_on_first_update_default;
+
+ mutex_init(&ddev->display_lock);
+}
+
diff --git a/drivers/video/mcde/mcde_dss.c b/drivers/video/mcde/mcde_dss.c
new file mode 100644
index 00000000000..eb07cf9534b
--- /dev/null
+++ b/drivers/video/mcde/mcde_dss.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE display sub system driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <video/mcde_dss.h>
+
+#define to_overlay(x) container_of(x, struct mcde_overlay, kobj)
+
+void overlay_release(struct kobject *kobj)
+{
+ struct mcde_overlay *ovly = to_overlay(kobj);
+
+ kfree(ovly);
+}
+
+struct kobj_type ovly_type = {
+ .release = overlay_release,
+};
+
+static int apply_overlay(struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info, bool force)
+{
+ int ret = 0;
+ if (ovly->ddev->invalidate_area) {
+ /* TODO: transform ovly coord to screen coords (vmode):
+ * add offset
+ */
+ struct mcde_rectangle dirty = info->dirty;
+ mutex_lock(&ovly->ddev->display_lock);
+ ret = ovly->ddev->invalidate_area(ovly->ddev, &dirty);
+ mutex_unlock(&ovly->ddev->display_lock);
+ }
+
+ if (ovly->info.paddr != info->paddr || force)
+ mcde_ovly_set_source_buf(ovly->state, info->paddr);
+
+ if (ovly->info.stride != info->stride || ovly->info.fmt != info->fmt ||
+ force)
+ mcde_ovly_set_source_info(ovly->state, info->stride, info->fmt);
+ if (ovly->info.src_x != info->src_x ||
+ ovly->info.src_y != info->src_y ||
+ ovly->info.w != info->w ||
+ ovly->info.h != info->h || force)
+ mcde_ovly_set_source_area(ovly->state,
+ info->src_x, info->src_y, info->w, info->h);
+ if (ovly->info.dst_x != info->dst_x || ovly->info.dst_y != info->dst_y
+ || ovly->info.dst_z != info->dst_z ||
+ force)
+ mcde_ovly_set_dest_pos(ovly->state,
+ info->dst_x, info->dst_y, info->dst_z);
+
+ mcde_ovly_apply(ovly->state);
+ ovly->info = *info;
+
+ return ret;
+}
+
+/* MCDE DSS operations */
+
+int mcde_dss_open_channel(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ struct mcde_chnl_state *chnl;
+
+ mutex_lock(&ddev->display_lock);
+ /* Acquire MCDE resources */
+ chnl = mcde_chnl_get(ddev->chnl_id, ddev->fifo, ddev->port);
+ if (IS_ERR(chnl)) {
+ ret = PTR_ERR(chnl);
+ dev_warn(&ddev->dev, "Failed to acquire MCDE channel\n");
+ goto chnl_get_failed;
+ }
+ ddev->chnl_state = chnl;
+chnl_get_failed:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_open_channel);
+
+void mcde_dss_close_channel(struct mcde_display_device *ddev)
+{
+ mutex_lock(&ddev->display_lock);
+ mcde_chnl_put(ddev->chnl_state);
+ ddev->chnl_state = NULL;
+ mutex_unlock(&ddev->display_lock);
+}
+EXPORT_SYMBOL(mcde_dss_close_channel);
+
+int mcde_dss_enable_display(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ if (ddev->enabled)
+ return 0;
+
+ mutex_lock(&ddev->display_lock);
+ mcde_chnl_enable(ddev->chnl_state);
+
+ /* Initiate display communication */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "Failed to initialize display\n");
+ goto display_failed;
+ }
+
+ ret = ddev->set_synchronized_update(ddev,
+ ddev->get_synchronized_update(ddev));
+ if (ret < 0)
+ dev_warn(&ddev->dev, "Failed to set sync\n");
+
+ ret = mcde_chnl_enable_synchronized_update(ddev->chnl_state,
+ ddev->synchronized_update);
+ if (ret < 0) {
+ dev_warn(&ddev->dev,
+ "%s:Failed to enable synchronized update\n",
+ __func__);
+ goto enable_sync_failed;
+ }
+ /* TODO: call driver for all defaults like sync_update above */
+
+ dev_dbg(&ddev->dev, "Display enabled, chnl=%d\n",
+ ddev->chnl_id);
+ ddev->enabled = true;
+ mutex_unlock(&ddev->display_lock);
+
+ return 0;
+
+enable_sync_failed:
+ ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+display_failed:
+ mcde_chnl_disable(ddev->chnl_state);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_enable_display);
+
+void mcde_dss_disable_display(struct mcde_display_device *ddev)
+{
+ if (!ddev->enabled)
+ return;
+
+ /* TODO: Disable overlays */
+ mutex_lock(&ddev->display_lock);
+
+ mcde_chnl_stop_flow(ddev->chnl_state);
+
+ (void)ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+
+ mcde_chnl_disable(ddev->chnl_state);
+
+ ddev->enabled = false;
+ mutex_unlock(&ddev->display_lock);
+
+ dev_dbg(&ddev->dev, "Display disabled, chnl=%d\n", ddev->chnl_id);
+}
+EXPORT_SYMBOL(mcde_dss_disable_display);
+
+int mcde_dss_apply_channel(struct mcde_display_device *ddev)
+{
+ int ret;
+ if (!ddev->apply_config)
+ return -EINVAL;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->apply_config(ddev);
+ mutex_unlock(&ddev->display_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_apply_channel);
+
+struct mcde_overlay *mcde_dss_create_overlay(struct mcde_display_device *ddev,
+ struct mcde_overlay_info *info)
+{
+ struct mcde_overlay *ovly;
+
+ ovly = kzalloc(sizeof(struct mcde_overlay), GFP_KERNEL);
+ if (!ovly)
+ return NULL;
+
+ kobject_init(&ovly->kobj, &ovly_type); /* Local ref */
+ kobject_get(&ovly->kobj); /* Creator ref */
+ INIT_LIST_HEAD(&ovly->list);
+ mutex_lock(&ddev->display_lock);
+ list_add(&ddev->ovlys, &ovly->list);
+ mutex_unlock(&ddev->display_lock);
+ ovly->info = *info;
+ ovly->ddev = ddev;
+
+ return ovly;
+}
+EXPORT_SYMBOL(mcde_dss_create_overlay);
+
+void mcde_dss_destroy_overlay(struct mcde_overlay *ovly)
+{
+ list_del(&ovly->list);
+ if (ovly->state)
+ mcde_dss_disable_overlay(ovly);
+ kobject_put(&ovly->kobj);
+}
+EXPORT_SYMBOL(mcde_dss_destroy_overlay);
+
+int mcde_dss_enable_overlay(struct mcde_overlay *ovly)
+{
+ int ret;
+
+ if (!ovly->ddev->chnl_state)
+ return -EINVAL;
+
+ if (!ovly->state) {
+ struct mcde_ovly_state *state;
+ state = mcde_ovly_get(ovly->ddev->chnl_state);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ dev_warn(&ovly->ddev->dev,
+ "Failed to acquire overlay\n");
+ return ret;
+ }
+ ovly->state = state;
+ }
+
+ apply_overlay(ovly, &ovly->info, true);
+
+ dev_vdbg(&ovly->ddev->dev, "Overlay enabled, chnl=%d\n",
+ ovly->ddev->chnl_id);
+ return 0;
+}
+EXPORT_SYMBOL(mcde_dss_enable_overlay);
+
+int mcde_dss_apply_overlay(struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info)
+{
+ if (info == NULL)
+ info = &ovly->info;
+ return apply_overlay(ovly, info, false);
+}
+EXPORT_SYMBOL(mcde_dss_apply_overlay);
+
+void mcde_dss_disable_overlay(struct mcde_overlay *ovly)
+{
+ if (!ovly->state)
+ return;
+
+ mcde_ovly_put(ovly->state);
+
+ dev_dbg(&ovly->ddev->dev, "Overlay disabled, chnl=%d\n",
+ ovly->ddev->chnl_id);
+
+ ovly->state = NULL;
+}
+EXPORT_SYMBOL(mcde_dss_disable_overlay);
+
+int mcde_dss_update_overlay(struct mcde_overlay *ovly, bool tripple_buffer)
+{
+ int ret;
+ dev_vdbg(&ovly->ddev->dev, "Overlay update, chnl=%d\n",
+ ovly->ddev->chnl_id);
+
+ if (!ovly->state || !ovly->ddev->update || !ovly->ddev->invalidate_area)
+ return -EINVAL;
+
+ mutex_lock(&ovly->ddev->display_lock);
+ /* Do not perform an update if power mode is off */
+ if (ovly->ddev->get_power_mode(ovly->ddev) == MCDE_DISPLAY_PM_OFF) {
+ ret = 0;
+ goto power_mode_off;
+ }
+
+ ret = ovly->ddev->update(ovly->ddev, tripple_buffer);
+ if (ret)
+ goto update_failed;
+
+ ret = ovly->ddev->invalidate_area(ovly->ddev, NULL);
+
+power_mode_off:
+update_failed:
+ mutex_unlock(&ovly->ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_update_overlay);
+
+void mcde_dss_get_overlay_info(struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info) {
+ if (info)
+ *info = ovly->info;
+}
+EXPORT_SYMBOL(mcde_dss_get_overlay_info);
+
+void mcde_dss_get_native_resolution(struct mcde_display_device *ddev,
+ u16 *x_res, u16 *y_res)
+{
+ mutex_lock(&ddev->display_lock);
+ ddev->get_native_resolution(ddev, x_res, y_res);
+ mutex_unlock(&ddev->display_lock);
+}
+EXPORT_SYMBOL(mcde_dss_get_native_resolution);
+
+enum mcde_ovly_pix_fmt mcde_dss_get_default_pixel_format(
+ struct mcde_display_device *ddev)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->get_default_pixel_format(ddev);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_get_default_pixel_format);
+
+void mcde_dss_get_physical_size(struct mcde_display_device *ddev,
+ u16 *physical_width, u16 *physical_height)
+{
+ mutex_lock(&ddev->display_lock);
+ ddev->get_physical_size(ddev, physical_width, physical_height);
+ mutex_unlock(&ddev->display_lock);
+}
+EXPORT_SYMBOL(mcde_dss_get_physical_size);
+
+int mcde_dss_try_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->try_video_mode(ddev, video_mode);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_try_video_mode);
+
+int mcde_dss_set_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *vmode)
+{
+ int ret;
+ struct mcde_video_mode old_vmode;
+
+ mutex_lock(&ddev->display_lock);
+ ddev->get_video_mode(ddev, &old_vmode);
+ if (memcmp(vmode, &old_vmode, sizeof(old_vmode)) == 0) {
+ ret = 0;
+ goto same_video_mode;
+ }
+
+ ret = ddev->set_video_mode(ddev, vmode);
+ if (ret)
+ goto set_video_mode_failed;
+
+ if (ddev->invalidate_area)
+ ret = ddev->invalidate_area(ddev, NULL);
+same_video_mode:
+set_video_mode_failed:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_set_video_mode);
+
+void mcde_dss_get_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode)
+{
+ mutex_lock(&ddev->display_lock);
+ ddev->get_video_mode(ddev, video_mode);
+ mutex_unlock(&ddev->display_lock);
+}
+EXPORT_SYMBOL(mcde_dss_get_video_mode);
+
+int mcde_dss_set_pixel_format(struct mcde_display_device *ddev,
+ enum mcde_ovly_pix_fmt pix_fmt)
+{
+ enum mcde_ovly_pix_fmt old_pix_fmt;
+ int ret;
+
+ mutex_lock(&ddev->display_lock);
+ old_pix_fmt = ddev->get_pixel_format(ddev);
+ if (old_pix_fmt == pix_fmt) {
+ ret = 0;
+ goto same_pixel_format;
+ }
+
+ ret = ddev->set_pixel_format(ddev, pix_fmt);
+
+same_pixel_format:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_set_pixel_format);
+
+int mcde_dss_get_pixel_format(struct mcde_display_device *ddev)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->get_pixel_format(ddev);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_get_pixel_format);
+
+int mcde_dss_set_rotation(struct mcde_display_device *ddev,
+ enum mcde_display_rotation rotation)
+{
+ int ret;
+ enum mcde_display_rotation old_rotation;
+
+ mutex_lock(&ddev->display_lock);
+ old_rotation = ddev->get_rotation(ddev);
+ if (old_rotation == rotation) {
+ ret = 0;
+ goto same_rotation;
+ }
+
+ ret = ddev->set_rotation(ddev, rotation);
+same_rotation:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_set_rotation);
+
+enum mcde_display_rotation mcde_dss_get_rotation(
+ struct mcde_display_device *ddev)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->get_rotation(ddev);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_get_rotation);
+
+int mcde_dss_set_synchronized_update(struct mcde_display_device *ddev,
+ bool enable)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->set_synchronized_update(ddev, enable);
+ if (ret)
+ goto sync_update_failed;
+
+ if (ddev->chnl_state)
+ mcde_chnl_enable_synchronized_update(ddev->chnl_state, enable);
+ mutex_unlock(&ddev->display_lock);
+ return 0;
+
+sync_update_failed:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_set_synchronized_update);
+
+bool mcde_dss_get_synchronized_update(struct mcde_display_device *ddev)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->get_synchronized_update(ddev);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_get_synchronized_update);
+
+int __init mcde_dss_init(void)
+{
+ return 0;
+}
+
+void mcde_dss_exit(void)
+{
+}
+
diff --git a/drivers/video/mcde/mcde_fb.c b/drivers/video/mcde/mcde_fb.c
new file mode 100644
index 00000000000..7d877ec3e4f
--- /dev/null
+++ b/drivers/video/mcde/mcde_fb.c
@@ -0,0 +1,898 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE frame buffer driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/fb.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/hwmem.h>
+#include <linux/io.h>
+
+#include <linux/console.h>
+
+#include <video/mcde_fb.h>
+
+#define MCDE_FB_BPP_MAX 16
+#define MCDE_FB_VXRES_MAX 1920
+#define MCDE_FB_VYRES_MAX 2160
+
+static struct fb_ops fb_ops;
+
+struct pix_fmt_info {
+ enum mcde_ovly_pix_fmt pix_fmt;
+
+ u32 bpp;
+ struct fb_bitfield r;
+ struct fb_bitfield g;
+ struct fb_bitfield b;
+ struct fb_bitfield a;
+ u32 nonstd;
+};
+
+struct pix_fmt_info pix_fmt_map[] = {
+ {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGB565,
+ .bpp = 16,
+ .r = { .offset = 11, .length = 5 },
+ .g = { .offset = 5, .length = 6 },
+ .b = { .offset = 0, .length = 5 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGBA5551,
+ .bpp = 16,
+ .r = { .offset = 11, .length = 5 },
+ .g = { .offset = 6, .length = 5 },
+ .b = { .offset = 1, .length = 5 },
+ .a = { .offset = 0, .length = 1 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGBA4444,
+ .bpp = 16,
+ .r = { .offset = 12, .length = 4 },
+ .g = { .offset = 8, .length = 4 },
+ .b = { .offset = 4, .length = 4 },
+ .a = { .offset = 0, .length = 4 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_YCbCr422,
+ .bpp = 16,
+ .nonstd = MCDE_OVLYPIXFMT_YCbCr422,
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGB888,
+ .bpp = 24,
+ .r = { .offset = 16, .length = 8 },
+ .g = { .offset = 8, .length = 8 },
+ .b = { .offset = 0, .length = 8 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGBA8888,
+ .bpp = 32,
+ .r = { .offset = 16, .length = 8 },
+ .g = { .offset = 8, .length = 8 },
+ .b = { .offset = 0, .length = 8 },
+ .a = { .offset = 24, .length = 8 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGBX8888,
+ .bpp = 32,
+ .r = { .offset = 16, .length = 8 },
+ .g = { .offset = 8, .length = 8 },
+ .b = { .offset = 0, .length = 8 },
+ }
+
+};
+
+static struct platform_device mcde_fb_device = {
+ .name = "mcde_fb",
+ .id = -1,
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void early_suspend(struct early_suspend *data)
+{
+ int i;
+ struct mcde_fb *mfb =
+ container_of(data, struct mcde_fb, early_suspend);
+
+ console_lock();
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ if (mfb->ovlys[i] && mfb->ovlys[i]->ddev &&
+ (mfb->ovlys[i]->ddev->stay_alive == false))
+ mcde_dss_disable_display(mfb->ovlys[i]->ddev);
+ }
+ console_unlock();
+}
+
+static void late_resume(struct early_suspend *data)
+{
+ int i;
+ struct mcde_fb *mfb =
+ container_of(data, struct mcde_fb, early_suspend);
+
+ console_lock();
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ if (mfb->ovlys[i]) {
+ struct mcde_overlay *ovly = mfb->ovlys[i];
+ (void) mcde_dss_enable_display(ovly->ddev);
+ }
+ }
+ console_unlock();
+}
+#endif
+
+/* Helpers */
+
+static struct pix_fmt_info *find_pix_fmt_info(enum mcde_ovly_pix_fmt pix_fmt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pix_fmt_map); i++) {
+ if (pix_fmt_map[i].pix_fmt == pix_fmt)
+ return &pix_fmt_map[i];
+ }
+ return NULL;
+}
+
+static bool bitfield_cmp(struct fb_bitfield *bf1, struct fb_bitfield *bf2)
+{
+ return bf1->offset == bf2->offset &&
+ bf1->length == bf2->length &&
+ bf1->msb_right == bf2->msb_right;
+}
+
+static struct pix_fmt_info *var_to_pix_fmt_info(struct fb_var_screeninfo *var)
+{
+ int i;
+ struct pix_fmt_info *info;
+
+ if (var->nonstd)
+ return find_pix_fmt_info(var->nonstd);
+
+ for (i = 0; i < ARRAY_SIZE(pix_fmt_map); i++) {
+ info = &pix_fmt_map[i];
+ if (info->bpp == var->bits_per_pixel &&
+ bitfield_cmp(&info->r, &var->red) &&
+ bitfield_cmp(&info->g, &var->green) &&
+ bitfield_cmp(&info->b, &var->blue) &&
+ bitfield_cmp(&info->a, &var->transp))
+ return info;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pix_fmt_map); i++) {
+ info = &pix_fmt_map[i];
+ if (var->bits_per_pixel == info->bpp)
+ return info;
+ }
+
+ return NULL;
+}
+
+static void pix_fmt_info_to_var(struct pix_fmt_info *pix_fmt_info,
+ struct fb_var_screeninfo *var)
+{
+ var->bits_per_pixel = pix_fmt_info->bpp;
+ var->nonstd = pix_fmt_info->nonstd;
+ var->red = pix_fmt_info->r;
+ var->green = pix_fmt_info->g;
+ var->blue = pix_fmt_info->b;
+ var->transp = pix_fmt_info->a;
+}
+
+static int init_var_fmt(struct fb_var_screeninfo *var,
+ u16 w, u16 h, u16 vw, u16 vh, enum mcde_ovly_pix_fmt pix_fmt,
+ u32 rotate)
+{
+ struct pix_fmt_info *info;
+
+ info = find_pix_fmt_info(pix_fmt);
+ if (!info)
+ return -EINVAL;
+
+ var->bits_per_pixel = info->bpp;
+ var->nonstd = info->nonstd;
+ var->red = info->r;
+ var->green = info->g;
+ var->blue = info->b;
+ var->transp = info->a;
+ var->grayscale = false;
+
+ var->xres = w;
+ var->yres = h;
+ var->xres_virtual = vw;
+ var->yres_virtual = vh;
+ var->xoffset = 0;
+ var->yoffset = 0;
+ var->activate = FB_ACTIVATE_NOW;
+ var->rotate = rotate;
+
+ return 0;
+};
+
+static int reallocate_fb_mem(struct fb_info *fbi, u32 size)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+ void *vaddr;
+ struct hwmem_alloc *alloc;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t num_mem_chunks = 1;
+ int name;
+
+ size = PAGE_ALIGN(size);
+
+ if (size == fbi->screen_size)
+ return 0;
+
+/* TODO: Remove once hwmem has support for defragmentation */
+#ifdef CONFIG_MCDE_FB_AVOID_REALLOC
+ if (!mfb->alloc) {
+ u32 old_size = size;
+
+ size = MCDE_FB_BPP_MAX / 8 * MCDE_FB_VXRES_MAX *
+ MCDE_FB_VYRES_MAX;
+#endif
+
+ alloc = hwmem_alloc(size, HWMEM_ALLOC_HINT_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED,
+ (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE |
+ HWMEM_ACCESS_IMPORT),
+ HWMEM_MEM_CONTIGUOUS_SYS);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ name = hwmem_get_name(alloc);
+ if (name < 0) {
+ hwmem_release(alloc);
+ return name;
+ }
+
+ if (mfb->alloc) {
+ hwmem_kunmap(mfb->alloc);
+ hwmem_unpin(mfb->alloc);
+ hwmem_release(mfb->alloc);
+ }
+
+ (void)hwmem_pin(alloc, &mem_chunk, &num_mem_chunks);
+
+ vaddr = hwmem_kmap(alloc);
+ if (vaddr == NULL) {
+ hwmem_unpin(alloc);
+ hwmem_release(alloc);
+ return -ENOMEM;
+ }
+
+ mfb->alloc = alloc;
+ mfb->alloc_name = name;
+
+ fbi->screen_base = vaddr;
+ fbi->fix.smem_start = mem_chunk.paddr;
+
+#ifdef CONFIG_MCDE_FB_AVOID_REALLOC
+ size = old_size;
+ }
+#endif
+
+ fbi->screen_size = size;
+ fbi->fix.smem_len = size;
+
+ return 0;
+}
+
+static void free_fb_mem(struct fb_info *fbi)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ if (mfb->alloc) {
+ hwmem_kunmap(mfb->alloc);
+ hwmem_unpin(mfb->alloc);
+ hwmem_release(mfb->alloc);
+ mfb->alloc = NULL;
+ mfb->alloc_name = 0;
+
+ fbi->fix.smem_start = 0;
+ fbi->fix.smem_len = 0;
+ fbi->screen_base = 0;
+ fbi->screen_size = 0;
+ }
+}
+
+static void init_fb(struct fb_info *fbi)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ strlcpy(fbi->fix.id, "mcde_fb", sizeof(fbi->fix.id));
+ fbi->fix.type = FB_TYPE_PACKED_PIXELS;
+ fbi->fix.visual = FB_VISUAL_DIRECTCOLOR;
+ fbi->fix.xpanstep = 1;
+ fbi->fix.ypanstep = 1;
+ fbi->flags = FBINFO_HWACCEL_DISABLED;
+ fbi->fbops = &fb_ops;
+ fbi->pseudo_palette = &mfb->pseudo_palette[0];
+}
+
+static void get_ovly_info(struct fb_info *fbi, struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ memset(info, 0, sizeof(*info));
+ info->paddr = fbi->fix.smem_start +
+ fbi->fix.line_length * fbi->var.yoffset;
+ info->vaddr = (u32 *)(fbi->screen_base +
+ fbi->fix.line_length * fbi->var.yoffset);
+ /* TODO: move mem check to check_var/pan_display */
+ if (info->paddr + fbi->fix.line_length * fbi->var.yres >
+ fbi->fix.smem_start + fbi->fix.smem_len) {
+ info->paddr = fbi->fix.smem_start;
+ info->vaddr = (u32 *)fbi->screen_base;
+ }
+ info->fmt = mfb->pix_fmt;
+ info->stride = fbi->fix.line_length;
+ if (ovly) {
+ info->src_x = ovly->info.src_x;
+ info->src_y = ovly->info.src_y;
+ info->dst_x = ovly->info.dst_x;
+ info->dst_y = ovly->info.dst_y;
+ info->dst_z = 1;
+ } else {
+ info->src_x = 0;
+ info->src_y = 0;
+ info->dst_x = 0;
+ info->dst_y = 0;
+ info->dst_z = 1;
+ }
+ info->w = fbi->var.xres;
+ info->h = fbi->var.yres;
+ info->dirty.x = 0;
+ info->dirty.y = 0;
+ info->dirty.w = fbi->var.xres;
+ info->dirty.h = fbi->var.yres;
+}
+
+void vmode_to_var(struct mcde_video_mode *video_mode,
+ struct fb_var_screeninfo *var)
+{
+ /* TODO: use only 1 vbp and 1 vfp */
+ var->xres = video_mode->xres;
+ var->yres = video_mode->yres;
+ var->pixclock = video_mode->pixclock;
+ var->upper_margin = video_mode->vbp;
+ var->lower_margin = video_mode->vfp;
+ var->vsync_len = video_mode->vsw;
+ var->left_margin = video_mode->hbp;
+ var->right_margin = video_mode->hfp;
+ var->hsync_len = video_mode->hsw;
+ var->vmode &= ~FB_VMODE_INTERLACED;
+ var->vmode |= video_mode->interlaced ?
+ FB_VMODE_INTERLACED : FB_VMODE_NONINTERLACED;
+}
+
+void var_to_vmode(struct fb_var_screeninfo *var,
+ struct mcde_video_mode *video_mode)
+{
+ video_mode->xres = var->xres;
+ video_mode->yres = var->yres;
+ video_mode->pixclock = var->pixclock;
+ video_mode->vbp = var->upper_margin;
+ video_mode->vfp = var->lower_margin;
+ video_mode->vsw = var->vsync_len;
+ video_mode->hbp = var->left_margin;
+ video_mode->hfp = var->right_margin;
+ video_mode->hsw = var->hsync_len;
+ video_mode->interlaced = (var->vmode & FB_VMODE_INTERLACED) ==
+ FB_VMODE_INTERLACED;
+}
+
+enum mcde_display_rotation var_to_rotation(struct fb_var_screeninfo *var)
+{
+ enum mcde_display_rotation rot;
+
+ switch (var->rotate) {
+ case FB_ROTATE_UR:
+ rot = MCDE_DISPLAY_ROT_0;
+ break;
+ case FB_ROTATE_CW:
+ rot = MCDE_DISPLAY_ROT_90_CW;
+ break;
+ case FB_ROTATE_UD:
+ rot = MCDE_DISPLAY_ROT_180_CW;
+ break;
+ case FB_ROTATE_CCW:
+ rot = MCDE_DISPLAY_ROT_90_CCW;
+ break;
+ default:
+ rot = MCDE_DISPLAY_ROT_0;
+ break;
+ }
+ dev_vdbg(&mcde_fb_device.dev, "var_rot: %d -> mcde_rot: %d\n",
+ var->rotate, rot);
+ return rot;
+}
+
+static struct mcde_display_device *fb_to_display(struct fb_info *fbi)
+{
+ int i;
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ if (mfb->ovlys[i])
+ return mfb->ovlys[i]->ddev;
+ }
+ return NULL;
+}
+
+static int check_var(struct fb_var_screeninfo *var, struct fb_info *fbi,
+ struct mcde_display_device *ddev)
+{
+ int ret;
+ u16 w = -1, h = -1;
+ struct mcde_video_mode vmode;
+ struct pix_fmt_info *fmtinfo;
+
+ /* TODO: check sizes/offsets/memory validity */
+
+ /* Device physical size */
+ mcde_dss_get_physical_size(ddev, &w, &h);
+ var->width = w;
+ var->height = h;
+
+ /* Rotation */
+ if (var->rotate > 3) {
+ dev_info(&(ddev->dev), "check_var failed var->rotate\n");
+ return -EINVAL;
+ }
+
+ /* Video mode */
+ var_to_vmode(var, &vmode);
+ ret = mcde_dss_try_video_mode(ddev, &vmode);
+ if (ret < 0) {
+ dev_vdbg(&(ddev->dev), "check_var failed "
+ "mcde_dss_try_video_mode with size = %x\n", ret);
+ return ret;
+ }
+ vmode_to_var(&vmode, var);
+
+ /* Pixel format */
+ fmtinfo = var_to_pix_fmt_info(var);
+ if (!fmtinfo) {
+ dev_vdbg(&(ddev->dev), "check_var failed fmtinfo\n");
+ return -EINVAL;
+ }
+ pix_fmt_info_to_var(fmtinfo, var);
+
+ /* Not used */
+ var->grayscale = 0;
+ var->sync = 0;
+
+ return 0;
+}
+
+static int apply_var(struct fb_info *fbi, struct mcde_display_device *ddev)
+{
+ int ret, i;
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+ struct fb_var_screeninfo *var;
+ struct mcde_video_mode vmode;
+ struct pix_fmt_info *fmt;
+ u32 line_len, size;
+
+ if (!ddev)
+ return -ENODEV;
+
+ dev_vdbg(&(ddev->dev), "%s\n", __func__);
+
+ var = &fbi->var;
+
+ ddev->check_transparency = 60;
+
+ /* Reallocate memory */
+ line_len = (fbi->var.bits_per_pixel * var->xres_virtual) / 8;
+ line_len = ALIGN(line_len, MCDE_BUF_LINE_ALIGMENT);
+ size = line_len * var->yres_virtual;
+ ret = reallocate_fb_mem(fbi, size);
+ if (ret) {
+ dev_vdbg(&(ddev->dev), "apply_var failed with"
+ "reallocate mem with size = %d\n", size);
+ return ret;
+ }
+ fbi->fix.line_length = line_len;
+
+ if (ddev->fictive)
+ goto apply_var_end;
+
+ /* Apply pixel format */
+ fmt = var_to_pix_fmt_info(var);
+ mfb->pix_fmt = fmt->pix_fmt;
+
+ /* Apply rotation */
+ mcde_dss_set_rotation(ddev, var_to_rotation(var));
+ /* Apply video mode */
+ memset(&vmode, 0, sizeof(struct mcde_video_mode));
+ var_to_vmode(var, &vmode);
+ ret = mcde_dss_set_video_mode(ddev, &vmode);
+ if (ret)
+ return ret;
+
+ mcde_dss_apply_channel(ddev);
+
+ /* Apply overlay info */
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ struct mcde_overlay *ovly = mfb->ovlys[i];
+ struct mcde_overlay_info info;
+ int num_buffers;
+
+ get_ovly_info(fbi, ovly, &info);
+ (void) mcde_dss_apply_overlay(ovly, &info);
+
+ num_buffers = var->yres_virtual / var->yres;
+ mcde_dss_update_overlay(ovly, num_buffers == 3);
+ }
+
+apply_var_end:
+ return 0;
+}
+
+/* FB ops */
+
+static int mcde_fb_open(struct fb_info *fbi, int user)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int mcde_fb_release(struct fb_info *fbi, int user)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int mcde_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
+{
+ struct mcde_display_device *ddev = fb_to_display(fbi);
+
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (!ddev) {
+ printk(KERN_ERR "mcde_fb_check_var failed !ddev\n");
+ return -ENODEV;
+ }
+
+ return check_var(var, fbi, ddev);
+}
+
+static int mcde_fb_set_par(struct fb_info *fbi)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+ struct mcde_display_device *ddev = fb_to_display(fbi);
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (mfb->ovlys[0]->state == NULL &&
+ ddev->fictive == false) {
+ printk(KERN_INFO "%s() - Enable fb %p\n",
+ __func__,
+ mfb->ovlys[0]);
+ mcde_dss_enable_overlay(mfb->ovlys[0]);
+ }
+
+ return apply_var(fbi, ddev);
+}
+
+static int mcde_fb_blank(int blank, struct fb_info *fbi)
+{
+ int ret = 0;
+ struct mcde_display_device *ddev = fb_to_display(fbi);
+
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (ddev->fictive)
+ goto mcde_fb_blank_end;
+
+ switch (blank) {
+ case FB_BLANK_NORMAL:
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ mcde_dss_disable_display(ddev);
+ break;
+ case FB_BLANK_UNBLANK:
+ ret = mcde_dss_enable_display(ddev);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+mcde_fb_blank_end:
+ return ret;
+}
+
+static int mcde_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fbi)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (var->xoffset == fbi->var.xoffset &&
+ var->yoffset == fbi->var.yoffset)
+ return 0;
+
+ fbi->var.xoffset = var->xoffset;
+ fbi->var.yoffset = var->yoffset;
+ return apply_var(fbi, fb_to_display(fbi));
+}
+
+static void mcde_fb_rotate(struct fb_info *fbi, int rotate)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+}
+
+static int mcde_fb_ioctl(struct fb_info *fbi, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ if (cmd == MCDE_GET_BUFFER_NAME_IOC)
+ return mfb->alloc_name;
+
+ return -EINVAL;
+}
+
+static int mcde_fb_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue, unsigned int transp,
+ struct fb_info *fbi)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (regno >= 256)
+ return 1;
+
+ if (regno < 17) {
+ u32 pseudo_val;
+ u32 r, g, b;
+
+ if (fbi->var.bits_per_pixel > 16) {
+ r = red >> 8;
+ g = green >> 8;
+ b = blue >> 8;
+ } else if (fbi->var.bits_per_pixel == 16) {
+ r = red >> (3 + 8);
+ g = green >> (2 + 8);
+ b = blue >> (3 + 8);
+ } else if (fbi->var.bits_per_pixel == 15) {
+ r = red >> (3 + 8);
+ g = green >> (3 + 8);
+ b = blue >> (3 + 8);
+ } else
+ r = b = g = (regno & 15);
+ pseudo_val = r << fbi->var.red.offset;
+ pseudo_val |= g << fbi->var.green.offset;
+ pseudo_val |= b << fbi->var.blue.offset;
+
+ ((u32 *)fbi->pseudo_palette)[regno] = pseudo_val;
+ }
+
+ return 0;
+}
+
+static int mcde_fb_setcmap(struct fb_cmap *cmap, struct fb_info *fbi)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ /*Nothing to see here, move along*/
+ return 0;
+}
+
+static struct fb_ops fb_ops = {
+ /* creg, cmap */
+ .owner = THIS_MODULE,
+ .fb_open = mcde_fb_open,
+ .fb_release = mcde_fb_release,
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = mcde_fb_check_var,
+ .fb_set_par = mcde_fb_set_par,
+ .fb_blank = mcde_fb_blank,
+ .fb_pan_display = mcde_fb_pan_display,
+ .fb_rotate = mcde_fb_rotate,
+ .fb_ioctl = mcde_fb_ioctl,
+ .fb_setcolreg = mcde_fb_setcolreg,
+ .fb_setcmap = mcde_fb_setcmap,
+};
+
+/* FB driver */
+
+struct fb_info *mcde_fb_create(struct mcde_display_device *ddev,
+ u16 w, u16 h, u16 vw, u16 vh, enum mcde_ovly_pix_fmt pix_fmt,
+ u32 rotate)
+{
+ int ret = 0;
+ struct fb_info *fbi;
+ struct mcde_fb *mfb;
+ struct mcde_overlay *ovly = NULL;
+ struct mcde_overlay_info ovly_info;
+
+ dev_vdbg(&ddev->dev, "%s\n", __func__);
+ if (!ddev->initialized) {
+ dev_warn(&ddev->dev, "%s: Device not initialized\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Init fb */
+ fbi = framebuffer_alloc(sizeof(struct mcde_fb), &mcde_fb_device.dev);
+ if (fbi == NULL) {
+ ret = -ENOMEM;
+ goto fb_alloc_failed;
+ }
+ init_fb(fbi);
+ mfb = to_mcde_fb(fbi);
+
+ if (ddev->fictive == false) {
+ ret = mcde_dss_open_channel(ddev);
+ if (ret)
+ goto channel_open_failed;
+
+ ret = mcde_dss_enable_display(ddev);
+ if (ret)
+ goto display_enable_failed;
+ }
+
+ /* Prepare var and allocate frame buffer memory */
+ init_var_fmt(&fbi->var, w, h, vw, vh, pix_fmt, rotate);
+ check_var(&fbi->var, fbi, ddev);
+ ret = apply_var(fbi, ddev);
+ if (ret)
+ goto apply_var_failed;
+
+ if (ddev->fictive == false)
+ mcde_dss_set_pixel_format(ddev, ddev->port->pixel_format);
+
+ /* Setup overlay */
+ get_ovly_info(fbi, NULL, &ovly_info);
+ ovly = mcde_dss_create_overlay(ddev, &ovly_info);
+ if (!ovly) {
+ ret = PTR_ERR(ovly);
+ goto ovly_alloc_failed;
+ }
+ mfb->ovlys[0] = ovly;
+ mfb->num_ovlys = 1;
+
+ if (ddev->fictive == false) {
+ ret = mcde_dss_enable_overlay(ovly);
+ if (ret)
+ goto ovly_enable_failed;
+ }
+
+ mfb->id = ddev->id;
+
+ /* Register framebuffer */
+ ret = register_framebuffer(fbi);
+ if (ret)
+ goto fb_register_failed;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret)
+ dev_warn(&ddev->dev, "%s: Allocate color map memory failed!\n",
+ __func__);
+
+ ddev->fbi = fbi;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (ddev->fictive == false) {
+ mfb->early_suspend.level =
+ EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ mfb->early_suspend.suspend = early_suspend;
+ mfb->early_suspend.resume = late_resume;
+ register_early_suspend(&mfb->early_suspend);
+ }
+#endif
+
+ goto out;
+fb_register_failed:
+ mcde_dss_disable_overlay(ovly);
+ovly_enable_failed:
+ mcde_dss_destroy_overlay(ovly);
+ovly_alloc_failed:
+ free_fb_mem(fbi);
+apply_var_failed:
+ mcde_dss_disable_display(ddev);
+display_enable_failed:
+ mcde_dss_close_channel(ddev);
+channel_open_failed:
+ framebuffer_release(fbi);
+ fbi = NULL;
+fb_alloc_failed:
+out:
+ return ret ? ERR_PTR(ret) : fbi;
+}
+EXPORT_SYMBOL(mcde_fb_create);
+
+int mcde_fb_attach_overlay(struct fb_info *fb_info, struct mcde_overlay *ovl)
+{
+ /* TODO: Attach extra overlay targets */
+ return -EINVAL;
+}
+
+void mcde_fb_destroy(struct mcde_display_device *dev)
+{
+ struct mcde_fb *mfb;
+ int i;
+
+ dev_vdbg(&dev->dev, "%s\n", __func__);
+
+ if (dev->fictive == false) {
+ mcde_dss_disable_display(dev);
+ mcde_dss_close_channel(dev);
+ }
+
+ mfb = to_mcde_fb(dev->fbi);
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ if (mfb->ovlys[i])
+ mcde_dss_destroy_overlay(mfb->ovlys[i]);
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (dev->fictive == false)
+ unregister_early_suspend(&mfb->early_suspend);
+#endif
+ fb_dealloc_cmap(&dev->fbi->cmap);
+
+ unregister_framebuffer(dev->fbi);
+ free_fb_mem(dev->fbi);
+ framebuffer_release(dev->fbi);
+ dev->fbi = NULL;
+}
+
+/* Overlay fbs' platform device */
+static int mcde_fb_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int mcde_fb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver mcde_fb_driver = {
+ .probe = mcde_fb_probe,
+ .remove = mcde_fb_remove,
+ .driver = {
+ .name = "mcde_fb",
+ .owner = THIS_MODULE,
+ },
+};
+
+/* MCDE fb init */
+
+int __init mcde_fb_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mcde_fb_driver);
+ if (ret)
+ goto fb_driver_failed;
+ ret = platform_device_register(&mcde_fb_device);
+ if (ret)
+ goto fb_device_failed;
+
+ goto out;
+fb_device_failed:
+ platform_driver_unregister(&mcde_fb_driver);
+fb_driver_failed:
+out:
+ return ret;
+}
+
+void mcde_fb_exit(void)
+{
+ platform_device_unregister(&mcde_fb_device);
+ platform_driver_unregister(&mcde_fb_driver);
+}
diff --git a/drivers/video/mcde/mcde_hw.c b/drivers/video/mcde/mcde_hw.c
new file mode 100644
index 00000000000..164bead10b5
--- /dev/null
+++ b/drivers/video/mcde/mcde_hw.c
@@ -0,0 +1,3522 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE base driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+
+#include <video/mcde.h>
+#include "dsilink_regs.h"
+#include "mcde_regs.h"
+#include "mcde_debugfs.h"
+
+
+/* MCDE channel states
+ *
+ * Allowed state transitions:
+ * IDLE <-> SUSPEND
+ * IDLE <-> DSI_READ
+ * IDLE <-> DSI_WRITE
+ * IDLE -> SETUP -> (WAIT_TE ->) RUNNING -> STOPPING1 -> STOPPING2 -> IDLE
+ * WAIT_TE -> STOPPED (for missing TE to allow re-enable)
+ */
+enum chnl_state {
+ CHNLSTATE_SUSPEND, /* HW in suspended mode, initial state */
+ CHNLSTATE_IDLE, /* Channel aquired, but not running, FLOEN==0 */
+ CHNLSTATE_DSI_READ, /* Executing DSI read */
+ CHNLSTATE_DSI_WRITE, /* Executing DSI write */
+ CHNLSTATE_SETUP, /* Channel register setup to prepare for running */
+ CHNLSTATE_WAIT_TE, /* Waiting for BTA or external TE */
+ CHNLSTATE_RUNNING, /* Update started, FLOEN=1, FLOEN==1 */
+ CHNLSTATE_STOPPING, /* Stopping, FLOEN=0, FLOEN==1, awaiting VCMP */
+ CHNLSTATE_STOPPED, /* Stopped, after VCMP, FLOEN==0|1 */
+};
+
+static int set_channel_state_atomic(struct mcde_chnl_state *chnl,
+ enum chnl_state state);
+static int set_channel_state_sync(struct mcde_chnl_state *chnl,
+ enum chnl_state state);
+static void stop_channel(struct mcde_chnl_state *chnl);
+static int _mcde_chnl_enable(struct mcde_chnl_state *chnl);
+static int _mcde_chnl_apply(struct mcde_chnl_state *chnl);
+static void disable_flow(struct mcde_chnl_state *chnl);
+static void enable_flow(struct mcde_chnl_state *chnl);
+static void do_softwaretrig(struct mcde_chnl_state *chnl);
+static void dsi_te_poll_req(struct mcde_chnl_state *chnl);
+static void dsi_te_poll_set_timer(struct mcde_chnl_state *chnl,
+ unsigned int timeout);
+static void dsi_te_timer_function(unsigned long value);
+static int wait_for_vcmp(struct mcde_chnl_state *chnl);
+static void probe_hw(void);
+static void wait_for_flow_disabled(struct mcde_chnl_state *chnl);
+
+#define OVLY_TIMEOUT 100
+#define CHNL_TIMEOUT 100
+#define FLOW_STOP_TIMEOUT 20
+#define SCREEN_PPL_HIGH 1920
+#define SCREEN_PPL_CEA2 720
+#define SCREEN_LPF_CEA2 480
+#define DSI_DELAY0_CEA2_ADD 10
+
+#define MCDE_SLEEP_WATCHDOG 500
+#define DSI_TE_NO_ANSWER_TIMEOUT_INIT 2500
+#define DSI_TE_NO_ANSWER_TIMEOUT 250
+#define DSI_READ_TIMEOUT 200
+#define DSI_WRITE_CMD_TIMEOUT 1000
+#define DSI_READ_DELAY 5
+#define DSI_READ_NBR_OF_RETRIES 2
+#define MCDE_FLOWEN_MAX_TRIAL 60
+
+static u8 *mcdeio;
+static u8 **dsiio;
+static struct platform_device *mcde_dev;
+static u8 num_dsilinks;
+static u8 num_channels;
+static u8 num_overlays;
+static u8 hardware_version;
+static int mcde_irq;
+
+static struct regulator *regulator_vana;
+static struct regulator *regulator_mcde_epod;
+static struct regulator *regulator_esram_epod;
+static struct clk *clock_dpi;
+static struct clk *clock_dsi;
+static struct clk *clock_mcde;
+static struct clk *clock_dsi_lp;
+static u8 mcde_is_enabled;
+static struct delayed_work hw_timeout_work;
+static u8 dsi_pll_is_enabled;
+
+static struct mutex mcde_hw_lock;
+static inline void mcde_lock(const char *func, int line)
+{
+ mutex_lock(&mcde_hw_lock);
+ dev_vdbg(&mcde_dev->dev, "Enter MCDE: %s:%d\n", func, line);
+}
+
+static inline void mcde_unlock(const char *func, int line)
+{
+ dev_vdbg(&mcde_dev->dev, "Exit MCDE: %s:%d\n", func, line);
+ mutex_unlock(&mcde_hw_lock);
+}
+
+static inline bool mcde_trylock(const char *func, int line)
+{
+ bool locked = mutex_trylock(&mcde_hw_lock) == 1;
+ if (locked)
+ dev_vdbg(&mcde_dev->dev, "Enter MCDE: %s:%d\n", func, line);
+ return locked;
+}
+
+static u8 mcde_dynamic_power_management = true;
+
+static inline u32 dsi_rreg(int i, u32 reg)
+{
+ return readl(dsiio[i] + reg);
+}
+static inline void dsi_wreg(int i, u32 reg, u32 val)
+{
+ writel(val, dsiio[i] + reg);
+}
+
+#define dsi_rfld(__i, __reg, __fld) \
+({ \
+ const u32 mask = __reg##_##__fld##_MASK; \
+ const u32 shift = __reg##_##__fld##_SHIFT; \
+ ((dsi_rreg(__i, __reg) & mask) >> shift); \
+})
+
+#define dsi_wfld(__i, __reg, __fld, __val) \
+({ \
+ const u32 mask = __reg##_##__fld##_MASK; \
+ const u32 shift = __reg##_##__fld##_SHIFT; \
+ const u32 oldval = dsi_rreg(__i, __reg); \
+ const u32 newval = ((__val) << shift); \
+ dsi_wreg(__i, __reg, (oldval & ~mask) | (newval & mask)); \
+})
+
+static inline u32 mcde_rreg(u32 reg)
+{
+ return readl(mcdeio + reg);
+}
+static inline void mcde_wreg(u32 reg, u32 val)
+{
+ writel(val, mcdeio + reg);
+}
+
+
+#define mcde_rfld(__reg, __fld) \
+({ \
+ const u32 mask = __reg##_##__fld##_MASK; \
+ const u32 shift = __reg##_##__fld##_SHIFT; \
+ ((mcde_rreg(__reg) & mask) >> shift); \
+})
+
+#define mcde_wfld(__reg, __fld, __val) \
+({ \
+ const u32 mask = __reg##_##__fld##_MASK; \
+ const u32 shift = __reg##_##__fld##_SHIFT; \
+ const u32 oldval = mcde_rreg(__reg); \
+ const u32 newval = ((__val) << shift); \
+ mcde_wreg(__reg, (oldval & ~mask) | (newval & mask)); \
+})
+
+struct ovly_regs {
+ bool enabled;
+ bool dirty;
+ bool dirty_buf;
+
+ u8 ch_id;
+ u32 baseaddress0;
+ u32 baseaddress1;
+ u8 bits_per_pixel;
+ u8 bpp;
+ bool bgr;
+ bool bebo;
+ bool opq;
+ u8 col_conv;
+ u8 alpha_source;
+ u8 alpha_value;
+ u8 pixoff;
+ u16 ppl;
+ u16 lpf;
+ u16 cropx;
+ u16 cropy;
+ u16 xpos;
+ u16 ypos;
+ u8 z;
+};
+
+struct mcde_ovly_state {
+ bool inuse;
+ u8 idx; /* MCDE overlay index */
+ struct mcde_chnl_state *chnl; /* Owner channel */
+ bool dirty;
+ bool dirty_buf;
+
+ /* Staged settings */
+ u32 paddr;
+ u16 stride;
+ enum mcde_ovly_pix_fmt pix_fmt;
+
+ u16 src_x;
+ u16 src_y;
+ u16 dst_x;
+ u16 dst_y;
+ u16 dst_z;
+ u16 w;
+ u16 h;
+
+ u8 alpha_source;
+ u8 alpha_value;
+
+ /* Applied settings */
+ struct ovly_regs regs;
+};
+
+static struct mcde_ovly_state *overlays;
+
+struct chnl_regs {
+ bool dirty;
+
+ bool floen;
+ u16 x;
+ u16 y;
+ u16 ppl;
+ u16 lpf;
+ u8 bpp;
+ bool internal_clk; /* CLKTYPE field */
+ u16 pcd;
+ u8 clksel;
+ u8 cdwin;
+ u16 (*map_r)(u8);
+ u16 (*map_g)(u8);
+ u16 (*map_b)(u8);
+ bool palette_enable;
+ bool bcd;
+ bool synchronized_update;
+ bool roten;
+ u8 rotdir;
+ u32 rotbuf1;
+ u32 rotbuf2;
+
+ /* Blending */
+ u8 blend_ctrl;
+ bool blend_en;
+ u8 alpha_blend;
+
+ /* DSI */
+ u8 dsipacking;
+};
+
+struct col_regs {
+ bool dirty;
+
+ u16 y_red;
+ u16 y_green;
+ u16 y_blue;
+ u16 cb_red;
+ u16 cb_green;
+ u16 cb_blue;
+ u16 cr_red;
+ u16 cr_green;
+ u16 cr_blue;
+ u16 off_y;
+ u16 off_cb;
+ u16 off_cr;
+};
+
+struct tv_regs {
+ bool dirty;
+
+ u16 dho; /* TV mode: left border width; destination horizontal offset */
+ /* LCD MODE: horizontal back porch */
+ u16 alw; /* TV mode: right border width */
+ /* LCD mode: horizontal front porch */
+ u16 hsw; /* horizontal synch width */
+ u16 dvo; /* TV mode: top border width; destination horizontal offset */
+ /* LCD MODE: vertical back porch */
+ u16 bsl; /* TV mode: bottom border width; blanking start line */
+ /* LCD MODE: vertical front porch */
+ /* field 1 */
+ u16 bel1; /* TV mode: field total vertical blanking lines */
+ /* LCD mode: vertical sync width */
+ u16 fsl1; /* field vbp */
+ /* field 2 */
+ u16 bel2;
+ u16 fsl2;
+ u8 tv_mode;
+ bool sel_mode_tv;
+ bool inv_clk;
+ bool interlaced_en;
+ u32 lcdtim1;
+};
+
+struct mcde_chnl_state {
+ bool enabled;
+ bool reserved;
+ enum mcde_chnl id;
+ enum mcde_fifo fifo;
+ struct mcde_port port;
+ struct mcde_ovly_state *ovly0;
+ struct mcde_ovly_state *ovly1;
+ const struct chnl_config *cfg;
+ enum chnl_state state;
+ wait_queue_head_t state_waitq;
+ wait_queue_head_t vcmp_waitq;
+ atomic_t vcmp_cnt;
+ struct timer_list dsi_te_timer;
+ enum mcde_display_power_mode power_mode;
+
+ /* Staged settings */
+ u16 (*map_r)(u8);
+ u16 (*map_g)(u8);
+ u16 (*map_b)(u8);
+ bool palette_enable;
+ bool synchronized_update;
+ struct mcde_video_mode vmode;
+ enum mcde_display_rotation rotation;
+ u32 rotbuf1;
+ u32 rotbuf2;
+
+ struct mcde_col_transform rgb_2_ycbcr;
+ struct mcde_col_transform ycbcr_2_rgb;
+ struct mcde_col_transform *transform;
+
+ /* Blending */
+ u8 blend_ctrl;
+ bool blend_en;
+ u8 alpha_blend;
+
+ /* Applied settings */
+ struct chnl_regs regs;
+ struct col_regs col_regs;
+ struct tv_regs tv_regs;
+
+ /* an interlaced digital TV signal generates a VCMP per field */
+ bool vcmp_per_field;
+ bool even_vcmp;
+
+ bool formatter_updated;
+ bool esram_is_enabled;
+};
+
+static struct mcde_chnl_state *channels;
+
+struct chnl_config {
+ /* Key */
+ enum mcde_chnl_path path;
+
+ /* Value */
+ bool swap_a_c0;
+ bool swap_a_c0_set;
+ bool swap_b_c1;
+ bool swap_b_c1_set;
+ bool fabmux;
+ bool fabmux_set;
+ bool f01mux;
+ bool f01mux_set;
+};
+
+static /* TODO: const, compiler bug? */ struct chnl_config chnl_configs[] = {
+ /* Channel A */
+ { .path = MCDE_CHNLPATH_CHNLA_FIFOA_DPI_0,
+ .swap_a_c0 = false, .swap_a_c0_set = true },
+ { .path = MCDE_CHNLPATH_CHNLA_FIFOA_DSI_IFC0_0,
+ .swap_a_c0 = false, .swap_a_c0_set = true,
+ .fabmux = false, .fabmux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLA_FIFOA_DSI_IFC0_1,
+ .swap_a_c0 = false, .swap_a_c0_set = true,
+ .fabmux = true, .fabmux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLA_FIFOC0_DSI_IFC0_2,
+ .swap_a_c0 = true, .swap_a_c0_set = true,
+ .f01mux = false, .f01mux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLA_FIFOC0_DSI_IFC1_0,
+ .swap_a_c0 = true, .swap_a_c0_set = true,
+ .f01mux = false, .f01mux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLA_FIFOC0_DSI_IFC1_1,
+ .swap_a_c0 = true, .swap_a_c0_set = true,
+ .f01mux = true, .f01mux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLA_FIFOA_DSI_IFC1_2,
+ .swap_a_c0 = false, .swap_a_c0_set = true,
+ .fabmux = false, .fabmux_set = true },
+ /* Channel B */
+ { .path = MCDE_CHNLPATH_CHNLB_FIFOB_DPI_1,
+ .swap_b_c1 = false, .swap_b_c1_set = true },
+ { .path = MCDE_CHNLPATH_CHNLB_FIFOB_DSI_IFC0_0,
+ .swap_b_c1 = false, .swap_b_c1_set = true,
+ .fabmux = true, .fabmux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLB_FIFOB_DSI_IFC0_1,
+ .swap_b_c1 = false, .swap_b_c1_set = true,
+ .fabmux = false, .fabmux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLB_FIFOC1_DSI_IFC0_2,
+ .swap_b_c1 = true, .swap_b_c1_set = true,
+ .f01mux = true, .f01mux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLB_FIFOC1_DSI_IFC1_0,
+ .swap_b_c1 = true, .swap_b_c1_set = true,
+ .f01mux = true, .f01mux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLB_FIFOC1_DSI_IFC1_1,
+ .swap_b_c1 = true, .swap_b_c1_set = true,
+ .f01mux = false, .f01mux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLB_FIFOB_DSI_IFC1_2,
+ .swap_b_c1 = false, .swap_b_c1_set = true,
+ .fabmux = true, .fabmux_set = true },
+ /* Channel C0 */
+ { .path = MCDE_CHNLPATH_CHNLC0_FIFOA_DSI_IFC0_0,
+ .swap_a_c0 = true, .swap_a_c0_set = true,
+ .fabmux = false, .fabmux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLC0_FIFOA_DSI_IFC0_1,
+ .swap_a_c0 = true, .swap_a_c0_set = true,
+ .fabmux = true, .fabmux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLC0_FIFOC0_DSI_IFC0_2,
+ .swap_a_c0 = false, .swap_a_c0_set = true,
+ .f01mux = false, .f01mux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLC0_FIFOC0_DSI_IFC1_0,
+ .swap_a_c0 = false, .swap_a_c0_set = true,
+ .f01mux = false, .f01mux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLC0_FIFOC0_DSI_IFC1_1,
+ .swap_a_c0 = false, .swap_a_c0_set = true,
+ .f01mux = true, .f01mux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLC0_FIFOA_DSI_IFC1_2,
+ .swap_a_c0 = true, .swap_a_c0_set = true,
+ .fabmux = false, .fabmux_set = true },
+ /* Channel C1 */
+ { .path = MCDE_CHNLPATH_CHNLC1_FIFOB_DSI_IFC0_0,
+ .swap_b_c1 = true, .swap_b_c1_set = true,
+ .fabmux = true, .fabmux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLC1_FIFOB_DSI_IFC0_1,
+ .swap_b_c1 = true, .swap_b_c1_set = true,
+ .fabmux = false, .fabmux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLC1_FIFOC1_DSI_IFC0_2,
+ .swap_b_c1 = false, .swap_b_c1_set = true,
+ .f01mux = true, .f01mux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLC1_FIFOC1_DSI_IFC1_0,
+ .swap_b_c1 = false, .swap_b_c1_set = true,
+ .f01mux = true, .f01mux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLC1_FIFOC1_DSI_IFC1_1,
+ .swap_b_c1 = false, .swap_b_c1_set = true,
+ .f01mux = false, .f01mux_set = true },
+ { .path = MCDE_CHNLPATH_CHNLC1_FIFOB_DSI_IFC1_2,
+ .swap_b_c1 = true, .swap_b_c1_set = true,
+ .fabmux = true, .fabmux_set = true },
+};
+
+/*
+ * Wait for CSM_RUNNING, all data sent for display
+ */
+static inline void wait_while_dsi_running(int lnk)
+{
+ u8 counter = DSI_READ_TIMEOUT;
+ while (dsi_rfld(lnk, DSI_CMD_MODE_STS, CSM_RUNNING) && --counter) {
+ dev_vdbg(&mcde_dev->dev,
+ "%s: DSI link %u read running state retry %u times\n"
+ , __func__, lnk, (DSI_READ_TIMEOUT - counter));
+ udelay(DSI_READ_DELAY);
+ }
+ WARN_ON(!counter);
+ if (!counter)
+ dev_warn(&mcde_dev->dev,
+ "%s: DSI link %u read timeout!\n", __func__, lnk);
+}
+
+static void enable_clocks_and_power(struct platform_device *pdev)
+{
+ struct mcde_platform_data *pdata = pdev->dev.platform_data;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ /* VANA should be enabled before a DSS hard reset */
+ if (regulator_vana)
+ WARN_ON_ONCE(regulator_enable(regulator_vana));
+
+ WARN_ON_ONCE(regulator_enable(regulator_mcde_epod));
+
+ pdata->platform_set_clocks();
+
+ WARN_ON_ONCE(clk_enable(clock_mcde));
+}
+
+static void disable_clocks_and_power(struct platform_device *pdev)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ clk_disable(clock_mcde);
+
+ WARN_ON_ONCE(regulator_disable(regulator_mcde_epod));
+
+ if (regulator_vana)
+ WARN_ON_ONCE(regulator_disable(regulator_vana));
+}
+
+static void update_mcde_registers(void)
+{
+ struct mcde_platform_data *pdata = mcde_dev->dev.platform_data;
+
+ if (hardware_version == MCDE_CHIP_VERSION_1_0_4) {
+ /* Setup output muxing */
+ mcde_wreg(MCDE_CONF0,
+ MCDE_CONF0_IFIFOCTRLWTRMRKLVL(7));
+
+ mcde_wfld(MCDE_RISPP, VCMPARIS, 1);
+ mcde_wfld(MCDE_RISPP, VCMPBRIS, 1);
+
+ /* Enable channel VCMP interrupts */
+ mcde_wreg(MCDE_IMSCPP,
+ MCDE_IMSCPP_VCMPAIM(true) |
+ MCDE_IMSCPP_VCMPBIM(true));
+ } else {
+ /* Setup output muxing */
+ mcde_wreg(MCDE_CONF0,
+ MCDE_CONF0_IFIFOCTRLWTRMRKLVL(7) |
+ MCDE_CONF0_OUTMUX0(pdata->outmux[0]) |
+ MCDE_CONF0_OUTMUX1(pdata->outmux[1]) |
+ MCDE_CONF0_OUTMUX2(pdata->outmux[2]) |
+ MCDE_CONF0_OUTMUX3(pdata->outmux[3]) |
+ MCDE_CONF0_OUTMUX4(pdata->outmux[4]) |
+ pdata->syncmux);
+
+ mcde_wfld(MCDE_RISPP, VCMPARIS, 1);
+ mcde_wfld(MCDE_RISPP, VCMPBRIS, 1);
+ mcde_wfld(MCDE_RISPP, VCMPC0RIS, 1);
+ mcde_wfld(MCDE_RISPP, VCMPC1RIS, 1);
+
+ /* Enable channel VCMP interrupts */
+ mcde_wreg(MCDE_IMSCPP,
+ MCDE_IMSCPP_VCMPAIM(true) |
+ MCDE_IMSCPP_VCMPBIM(true) |
+ MCDE_IMSCPP_VCMPC0IM(true) |
+ MCDE_IMSCPP_VCMPC1IM(true));
+ }
+
+ mcde_wreg(MCDE_IMSCCHNL, MCDE_IMSCCHNL_CHNLAIM(0xf));
+ mcde_wreg(MCDE_IMSCERR, 0xFFFF01FF);
+
+ /* Setup sync pulse length
+ * Setting VSPMAX=0 disables the filter and VSYNC
+ * is generated after VSPMIN mcde cycles
+ */
+ mcde_wreg(MCDE_VSCRC0,
+ MCDE_VSCRC0_VSPMIN(0) |
+ MCDE_VSCRC0_VSPMAX(0));
+ mcde_wreg(MCDE_VSCRC1,
+ MCDE_VSCRC1_VSPMIN(1) |
+ MCDE_VSCRC1_VSPMAX(0xff));
+}
+
+static void disable_formatter(struct mcde_port *port)
+{
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ wait_while_dsi_running(port->link);
+ if (dsi_pll_is_enabled && (--dsi_pll_is_enabled == 0)) {
+ struct mcde_platform_data *pdata =
+ mcde_dev->dev.platform_data;
+ dev_dbg(&mcde_dev->dev, "%s disable dsipll\n",
+ __func__);
+ pdata->platform_disable_dsipll();
+ }
+ clk_disable(clock_dsi);
+ clk_disable(clock_dsi_lp);
+ } else if (port->type == MCDE_PORTTYPE_DPI) {
+ clk_disable(clock_dpi);
+ }
+}
+
+static void disable_mcde_hw(bool force_disable)
+{
+ int i;
+ bool mcde_up = false;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!mcde_is_enabled)
+ return;
+
+ for (i = 0; i < num_channels; i++) {
+ struct mcde_chnl_state *chnl = &channels[i];
+ if (force_disable || (chnl->enabled &&
+ chnl->state != CHNLSTATE_RUNNING)) {
+ stop_channel(chnl);
+ set_channel_state_sync(chnl, CHNLSTATE_SUSPEND);
+
+ if (chnl->formatter_updated) {
+ disable_formatter(&chnl->port);
+ chnl->formatter_updated = false;
+ }
+ if (chnl->esram_is_enabled) {
+ WARN_ON_ONCE(regulator_disable(
+ regulator_esram_epod));
+ chnl->esram_is_enabled = false;
+ }
+ } else if (chnl->enabled && chnl->state == CHNLSTATE_RUNNING) {
+ mcde_up = true;
+ }
+ }
+
+ if (mcde_up)
+ return;
+
+ free_irq(mcde_irq, &mcde_dev->dev);
+
+ disable_clocks_and_power(mcde_dev);
+
+ mcde_is_enabled = false;
+}
+
+static void dpi_video_mode_apply(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ chnl->tv_regs.interlaced_en = chnl->vmode.interlaced;
+
+ chnl->tv_regs.sel_mode_tv = chnl->port.phy.dpi.tv_mode;
+ if (chnl->tv_regs.sel_mode_tv) {
+ /* TV mode */
+ u32 bel;
+ /* -4 since hsw is excluding SAV/EAV, 2 bytes each */
+ chnl->tv_regs.hsw = chnl->vmode.hbp + chnl->vmode.hfp - 4;
+ /* vbp_field2 = vbp_field1 + 1 */
+ chnl->tv_regs.fsl1 = chnl->vmode.vbp / 2;
+ chnl->tv_regs.fsl2 = chnl->vmode.vbp - chnl->tv_regs.fsl1;
+ /* +1 since vbp_field2 = vbp_field1 + 1 */
+ bel = chnl->vmode.vbp + chnl->vmode.vfp;
+ /* in TV mode: bel2 = bel1 + 1 */
+ chnl->tv_regs.bel1 = bel / 2;
+ chnl->tv_regs.bel2 = bel - chnl->tv_regs.bel1;
+ if (chnl->port.phy.dpi.bus_width == 4)
+ chnl->tv_regs.tv_mode = MCDE_TVCRA_TVMODE_SDTV_656P_BE;
+ else
+ chnl->tv_regs.tv_mode = MCDE_TVCRA_TVMODE_SDTV_656P;
+ if (hardware_version == MCDE_CHIP_VERSION_3_0_8 ||
+ hardware_version == MCDE_CHIP_VERSION_4_0_4)
+ chnl->tv_regs.inv_clk = true;
+ else {
+ chnl->tv_regs.dho = MCDE_CONFIG_TVOUT_HBORDER;
+ chnl->tv_regs.alw = MCDE_CONFIG_TVOUT_HBORDER;
+ chnl->tv_regs.dvo = MCDE_CONFIG_TVOUT_VBORDER;
+ chnl->tv_regs.bsl = MCDE_CONFIG_TVOUT_VBORDER;
+ }
+ } else {
+ /* LCD mode */
+ u32 polarity;
+ chnl->tv_regs.hsw = chnl->vmode.hsw;
+ chnl->tv_regs.dho = chnl->vmode.hbp;
+ chnl->tv_regs.alw = chnl->vmode.hfp;
+ chnl->tv_regs.bel1 = chnl->vmode.vsw;
+ chnl->tv_regs.bel2 = chnl->tv_regs.bel1;
+ chnl->tv_regs.dvo = chnl->vmode.vbp;
+ chnl->tv_regs.bsl = chnl->vmode.vfp;
+ chnl->tv_regs.fsl1 = 0;
+ chnl->tv_regs.fsl2 = 0;
+ polarity = chnl->port.phy.dpi.polarity;
+ chnl->tv_regs.lcdtim1 = MCDE_LCDTIM1A_IHS(
+ (polarity & DPI_ACT_LOW_HSYNC) != 0);
+ chnl->tv_regs.lcdtim1 |= MCDE_LCDTIM1A_IVS(
+ (polarity & DPI_ACT_LOW_VSYNC) != 0);
+ chnl->tv_regs.lcdtim1 |= MCDE_LCDTIM1A_IOE(
+ (polarity & DPI_ACT_LOW_DATA_ENABLE) != 0);
+ chnl->tv_regs.lcdtim1 |= MCDE_LCDTIM1A_IPC(
+ (polarity & DPI_ACT_ON_FALLING_EDGE) != 0);
+ }
+ chnl->tv_regs.dirty = true;
+}
+
+static void update_dpi_registers(enum mcde_chnl chnl_id, struct tv_regs *regs)
+{
+ u8 idx = chnl_id;
+
+ dev_dbg(&mcde_dev->dev, "%s\n", __func__);
+ mcde_wreg(MCDE_TVCRA + idx * MCDE_TVCRA_GROUPOFFSET,
+ MCDE_TVCRA_SEL_MOD(regs->sel_mode_tv) |
+ MCDE_TVCRA_INTEREN(regs->interlaced_en) |
+ MCDE_TVCRA_IFIELD(0) |
+ MCDE_TVCRA_TVMODE(regs->tv_mode) |
+ MCDE_TVCRA_SDTVMODE(MCDE_TVCRA_SDTVMODE_Y0CBY1CR) |
+ MCDE_TVCRA_CKINV(regs->inv_clk) |
+ MCDE_TVCRA_AVRGEN(0));
+ mcde_wreg(MCDE_TVBLUA + idx * MCDE_TVBLUA_GROUPOFFSET,
+ MCDE_TVBLUA_TVBLU(MCDE_CONFIG_TVOUT_BACKGROUND_LUMINANCE) |
+ MCDE_TVBLUA_TVBCB(MCDE_CONFIG_TVOUT_BACKGROUND_CHROMINANCE_CB)|
+ MCDE_TVBLUA_TVBCR(MCDE_CONFIG_TVOUT_BACKGROUND_CHROMINANCE_CR));
+
+ /* Vertical timing registers */
+ mcde_wreg(MCDE_TVDVOA + idx * MCDE_TVDVOA_GROUPOFFSET,
+ MCDE_TVDVOA_DVO1(regs->dvo) |
+ MCDE_TVDVOA_DVO2(regs->dvo));
+ mcde_wreg(MCDE_TVBL1A + idx * MCDE_TVBL1A_GROUPOFFSET,
+ MCDE_TVBL1A_BEL1(regs->bel1) |
+ MCDE_TVBL1A_BSL1(regs->bsl));
+ mcde_wreg(MCDE_TVBL2A + idx * MCDE_TVBL1A_GROUPOFFSET,
+ MCDE_TVBL2A_BEL2(regs->bel2) |
+ MCDE_TVBL2A_BSL2(regs->bsl));
+ mcde_wreg(MCDE_TVISLA + idx * MCDE_TVISLA_GROUPOFFSET,
+ MCDE_TVISLA_FSL1(regs->fsl1) |
+ MCDE_TVISLA_FSL2(regs->fsl2));
+
+ /* Horizontal timing registers */
+ if (!regs->sel_mode_tv ||
+ hardware_version == MCDE_CHIP_VERSION_3_0_8 ||
+ hardware_version == MCDE_CHIP_VERSION_4_0_4) {
+ mcde_wreg(MCDE_TVLBALWA + idx * MCDE_TVLBALWA_GROUPOFFSET,
+ MCDE_TVLBALWA_LBW(regs->hsw) |
+ MCDE_TVLBALWA_ALW(regs->alw));
+ mcde_wreg(MCDE_TVTIM1A + idx * MCDE_TVTIM1A_GROUPOFFSET,
+ MCDE_TVTIM1A_DHO(regs->dho));
+ } else {
+ /* in earlier versions the LBW and DHO fields are swapped
+ * TV mode only
+ */
+ mcde_wreg(MCDE_TVLBALWA + idx * MCDE_TVLBALWA_GROUPOFFSET,
+ MCDE_TVLBALWA_LBW(regs->dho) |
+ MCDE_TVLBALWA_ALW(regs->alw));
+ mcde_wreg(MCDE_TVTIM1A + idx * MCDE_TVTIM1A_GROUPOFFSET,
+ MCDE_TVTIM1A_DHO(regs->hsw));
+ }
+ if (!regs->sel_mode_tv)
+ mcde_wreg(MCDE_LCDTIM1A + idx * MCDE_LCDTIM1A_GROUPOFFSET,
+ regs->lcdtim1);
+ regs->dirty = false;
+}
+
+static void update_col_registers(enum mcde_chnl chnl_id, struct col_regs *regs)
+{
+ u8 idx = chnl_id;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ mcde_wreg(MCDE_RGBCONV1A + idx * MCDE_RGBCONV1A_GROUPOFFSET,
+ MCDE_RGBCONV1A_YR_RED(regs->y_red) |
+ MCDE_RGBCONV1A_YR_GREEN(regs->y_green));
+ mcde_wreg(MCDE_RGBCONV2A + idx * MCDE_RGBCONV2A_GROUPOFFSET,
+ MCDE_RGBCONV2A_YR_BLUE(regs->y_blue) |
+ MCDE_RGBCONV2A_CR_RED(regs->cr_red));
+ mcde_wreg(MCDE_RGBCONV3A + idx * MCDE_RGBCONV3A_GROUPOFFSET,
+ MCDE_RGBCONV3A_CR_GREEN(regs->cr_green) |
+ MCDE_RGBCONV3A_CR_BLUE(regs->cr_blue));
+ mcde_wreg(MCDE_RGBCONV4A + idx * MCDE_RGBCONV4A_GROUPOFFSET,
+ MCDE_RGBCONV4A_CB_RED(regs->cb_red) |
+ MCDE_RGBCONV4A_CB_GREEN(regs->cb_green));
+ mcde_wreg(MCDE_RGBCONV5A + idx * MCDE_RGBCONV5A_GROUPOFFSET,
+ MCDE_RGBCONV5A_CB_BLUE(regs->cb_blue) |
+ MCDE_RGBCONV5A_OFF_RED(regs->off_cr));
+ mcde_wreg(MCDE_RGBCONV6A + idx * MCDE_RGBCONV6A_GROUPOFFSET,
+ MCDE_RGBCONV6A_OFF_GREEN(regs->off_y) |
+ MCDE_RGBCONV6A_OFF_BLUE(regs->off_cb));
+ regs->dirty = false;
+}
+
+/* MCDE internal helpers */
+static u8 portfmt2dsipacking(enum mcde_port_pix_fmt pix_fmt)
+{
+ switch (pix_fmt) {
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ return MCDE_DSIVID0CONF0_PACKING_RGB565;
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ return MCDE_DSIVID0CONF0_PACKING_RGB666;
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ default:
+ return MCDE_DSIVID0CONF0_PACKING_RGB888;
+ case MCDE_PORTPIXFMT_DSI_YCBCR422:
+ return MCDE_DSIVID0CONF0_PACKING_HDTV;
+ }
+}
+
+static u8 portfmt2bpp(enum mcde_port_pix_fmt pix_fmt)
+{
+ /* TODO: Check DPI spec *//* REVIEW: Remove or check */
+ switch (pix_fmt) {
+ case MCDE_PORTPIXFMT_DPI_16BPP_C1:
+ case MCDE_PORTPIXFMT_DPI_16BPP_C2:
+ case MCDE_PORTPIXFMT_DPI_16BPP_C3:
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ case MCDE_PORTPIXFMT_DSI_YCBCR422:
+ return 16;
+ case MCDE_PORTPIXFMT_DPI_18BPP_C1:
+ case MCDE_PORTPIXFMT_DPI_18BPP_C2:
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ return 18;
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ case MCDE_PORTPIXFMT_DPI_24BPP:
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ return 24;
+ default:
+ return 1;
+ }
+}
+
+static u8 bpp2outbpp(u8 bpp)
+{
+ switch (bpp) {
+ case 16:
+ return MCDE_CRA1_OUTBPP_16BPP;
+ case 18:
+ return MCDE_CRA1_OUTBPP_18BPP;
+ case 24:
+ return MCDE_CRA1_OUTBPP_24BPP;
+ default:
+ return 0;
+ }
+}
+
+static u8 portfmt2cdwin(enum mcde_port_pix_fmt pix_fmt)
+{
+ switch (pix_fmt) {
+ case MCDE_PORTPIXFMT_DPI_16BPP_C1:
+ return MCDE_CRA1_CDWIN_16BPP_C1;
+ case MCDE_PORTPIXFMT_DPI_16BPP_C2:
+ return MCDE_CRA1_CDWIN_16BPP_C2;
+ case MCDE_PORTPIXFMT_DPI_16BPP_C3:
+ return MCDE_CRA1_CDWIN_16BPP_C3;
+ case MCDE_PORTPIXFMT_DPI_18BPP_C1:
+ return MCDE_CRA1_CDWIN_18BPP_C1;
+ case MCDE_PORTPIXFMT_DPI_18BPP_C2:
+ return MCDE_CRA1_CDWIN_18BPP_C2;
+ case MCDE_PORTPIXFMT_DPI_24BPP:
+ return MCDE_CRA1_CDWIN_24BPP;
+ default:
+ /* only DPI formats are relevant */
+ return 0;
+ }
+}
+
+static u32 get_input_fifo_size(void)
+{
+ if (hardware_version == MCDE_CHIP_VERSION_1_0_4 ||
+ hardware_version == MCDE_CHIP_VERSION_4_0_4)
+ return MCDE_INPUT_FIFO_SIZE_4_0_4;
+ else
+ return MCDE_INPUT_FIFO_SIZE_3_0_8;
+}
+
+static u32 get_output_fifo_size(enum mcde_fifo fifo)
+{
+ u32 ret = 1; /* Avoid div by zero */
+
+ switch (fifo) {
+ case MCDE_FIFO_A:
+ case MCDE_FIFO_B:
+ ret = MCDE_FIFO_AB_SIZE;
+ break;
+ case MCDE_FIFO_C0:
+ case MCDE_FIFO_C1:
+ ret = MCDE_FIFO_C0C1_SIZE;
+ break;
+ default:
+ dev_warn(&mcde_dev->dev, "Unsupported fifo");
+ break;
+ }
+ return ret;
+}
+
+static u8 get_dsi_formid(const struct mcde_port *port)
+{
+ if (hardware_version == MCDE_CHIP_VERSION_4_0_4) {
+ if (port->link == 0)
+ return MCDE_CTRLA_FORMID_DSI0VID;
+ if (port->link == 1)
+ return MCDE_CTRLA_FORMID_DSI0CMD;
+ } else {
+ if (port->ifc == DSI_VIDEO_MODE && port->link == 0)
+ return MCDE_CTRLA_FORMID_DSI0VID;
+ else if (port->ifc == DSI_VIDEO_MODE && port->link == 1)
+ return MCDE_CTRLA_FORMID_DSI1VID;
+ else if (port->ifc == DSI_VIDEO_MODE && port->link == 2)
+ return MCDE_CTRLA_FORMID_DSI2VID;
+ else if (port->ifc == DSI_CMD_MODE && port->link == 0)
+ return MCDE_CTRLA_FORMID_DSI0CMD;
+ else if (port->ifc == DSI_CMD_MODE && port->link == 1)
+ return MCDE_CTRLA_FORMID_DSI1CMD;
+ else if (port->ifc == DSI_CMD_MODE && port->link == 2)
+ return MCDE_CTRLA_FORMID_DSI2CMD;
+ }
+ return 0;
+}
+
+static struct mcde_chnl_state *find_channel_by_dsilink(int link)
+{
+ struct mcde_chnl_state *chnl = &channels[0];
+ for (; chnl < &channels[num_channels]; chnl++)
+ if (chnl->enabled && chnl->port.link == link &&
+ chnl->port.type == MCDE_PORTTYPE_DSI)
+ return chnl;
+ return NULL;
+}
+
+static inline void mcde_handle_vcmp(struct mcde_chnl_state *chnl)
+{
+ if (!chnl->vcmp_per_field ||
+ (chnl->vcmp_per_field && chnl->even_vcmp)) {
+ atomic_inc(&chnl->vcmp_cnt);
+ if (chnl->state == CHNLSTATE_STOPPING)
+ set_channel_state_atomic(chnl, CHNLSTATE_STOPPED);
+ wake_up_all(&chnl->vcmp_waitq);
+ }
+ chnl->even_vcmp = !chnl->even_vcmp;
+}
+
+static void handle_dsi_irq(struct mcde_chnl_state *chnl, int i)
+{
+ u32 irq_status = dsi_rfld(i, DSI_DIRECT_CMD_STS_FLAG, TE_RECEIVED_FLAG);
+ if (irq_status) {
+ dsi_wreg(i, DSI_DIRECT_CMD_STS_CLR,
+ DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR(true));
+ dev_vdbg(&mcde_dev->dev, "BTA TE DSI%d\n", i);
+ do_softwaretrig(chnl);
+ }
+
+ irq_status = dsi_rfld(i, DSI_CMD_MODE_STS_FLAG, ERR_NO_TE_FLAG);
+ if (irq_status) {
+ dsi_wreg(i, DSI_CMD_MODE_STS_CLR,
+ DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR(true));
+ dev_warn(&mcde_dev->dev, "NO_TE DSI%d\n", i);
+ set_channel_state_atomic(chnl, CHNLSTATE_STOPPED);
+ }
+
+ irq_status = dsi_rfld(i, DSI_DIRECT_CMD_STS, TRIGGER_RECEIVED);
+ if (irq_status) {
+ /* DSI TE polling answer received */
+ dsi_wreg(i, DSI_DIRECT_CMD_STS_CLR,
+ DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR(true));
+
+ /* Reset TE watchdog timer */
+ if (chnl->port.sync_src == MCDE_SYNCSRC_TE_POLLING)
+ dsi_te_poll_set_timer(chnl, DSI_TE_NO_ANSWER_TIMEOUT);
+ }
+}
+
+static irqreturn_t mcde_irq_handler(int irq, void *dev)
+{
+ int i;
+ u32 irq_status;
+
+ irq_status = mcde_rreg(MCDE_MISCHNL);
+ if (irq_status) {
+ dev_err(&mcde_dev->dev, "chnl error=%.8x\n", irq_status);
+ mcde_wreg(MCDE_RISCHNL, irq_status);
+ }
+ irq_status = mcde_rreg(MCDE_MISERR);
+ if (irq_status) {
+ dev_err(&mcde_dev->dev, "error=%.8x\n", irq_status);
+ mcde_wreg(MCDE_RISERR, irq_status);
+ }
+
+ /* Handle channel irqs */
+ irq_status = mcde_rreg(MCDE_RISPP);
+ if (irq_status & MCDE_RISPP_VCMPARIS_MASK)
+ mcde_handle_vcmp(&channels[MCDE_CHNL_A]);
+ if (irq_status & MCDE_RISPP_VCMPBRIS_MASK)
+ mcde_handle_vcmp(&channels[MCDE_CHNL_B]);
+ if (irq_status & MCDE_RISPP_VCMPC0RIS_MASK)
+ mcde_handle_vcmp(&channels[MCDE_CHNL_C0]);
+ if (irq_status & MCDE_RISPP_VCMPC1RIS_MASK)
+ mcde_handle_vcmp(&channels[MCDE_CHNL_C1]);
+ mcde_wreg(MCDE_RISPP, irq_status);
+
+ for (i = 0; i < num_dsilinks; i++) {
+ struct mcde_chnl_state *chnl_from_dsi;
+
+ chnl_from_dsi = find_channel_by_dsilink(i);
+
+ if (chnl_from_dsi == NULL)
+ continue;
+
+ handle_dsi_irq(chnl_from_dsi, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Transitions allowed: WAIT_TE -> UPDATE -> STOPPING */
+static int set_channel_state_atomic(struct mcde_chnl_state *chnl,
+ enum chnl_state state)
+{
+ enum chnl_state chnl_state = chnl->state;
+
+ dev_dbg(&mcde_dev->dev, "Channel state change"
+ " (chnl=%d, old=%d, new=%d)\n", chnl->id, chnl_state, state);
+
+ if ((chnl_state == CHNLSTATE_SETUP && state == CHNLSTATE_WAIT_TE) ||
+ (chnl_state == CHNLSTATE_SETUP && state == CHNLSTATE_RUNNING) ||
+ (chnl_state == CHNLSTATE_WAIT_TE && state == CHNLSTATE_RUNNING) ||
+ (chnl_state == CHNLSTATE_RUNNING && state == CHNLSTATE_STOPPING)) {
+ /* Set wait TE, running, or stopping state */
+ chnl->state = state;
+ return 0;
+ } else if ((chnl_state == CHNLSTATE_STOPPING &&
+ state == CHNLSTATE_STOPPED) ||
+ (chnl_state == CHNLSTATE_WAIT_TE &&
+ state == CHNLSTATE_STOPPED)) {
+ /* Set stopped state */
+ chnl->state = state;
+ wake_up_all(&chnl->state_waitq);
+ return 0;
+ } else if (state == CHNLSTATE_IDLE) {
+ /* Set idle state */
+ WARN_ON_ONCE(chnl_state != CHNLSTATE_DSI_READ &&
+ chnl_state != CHNLSTATE_DSI_WRITE &&
+ chnl_state != CHNLSTATE_SUSPEND);
+ chnl->state = state;
+ wake_up_all(&chnl->state_waitq);
+ return 0;
+ } else {
+ /* Invalid atomic state transition */
+ dev_warn(&mcde_dev->dev, "Channel state change error (chnl=%d,"
+ " old=%d, new=%d)\n", chnl->id, chnl_state, state);
+ WARN_ON_ONCE(true);
+ return -EINVAL;
+ }
+}
+
+/* LOCKING: mcde_hw_lock */
+static int set_channel_state_sync(struct mcde_chnl_state *chnl,
+ enum chnl_state state)
+{
+ int ret = 0;
+ enum chnl_state chnl_state = chnl->state;
+
+ dev_dbg(&mcde_dev->dev, "Channel state change"
+ " (chnl=%d, old=%d, new=%d)\n", chnl->id, chnl->state, state);
+
+ /* No change */
+ if (chnl_state == state)
+ return 0;
+
+ /* Wait for IDLE before changing state */
+ if (chnl_state != CHNLSTATE_IDLE) {
+ ret = wait_event_timeout(chnl->state_waitq,
+ /* STOPPED -> IDLE is manual, so wait for both */
+ chnl->state == CHNLSTATE_STOPPED ||
+ chnl->state == CHNLSTATE_IDLE,
+ msecs_to_jiffies(CHNL_TIMEOUT));
+ if (WARN_ON_ONCE(!ret))
+ dev_warn(&mcde_dev->dev, "Wait for channel timeout "
+ "(chnl=%d, curr=%d, new=%d)\n",
+ chnl->id, chnl->state, state);
+ chnl_state = chnl->state;
+ }
+
+ /* Do manual transition from STOPPED to IDLE */
+ if (chnl_state == CHNLSTATE_STOPPED)
+ wait_for_flow_disabled(chnl);
+
+ /* State is IDLE, do transition to new state */
+ chnl->state = state;
+
+ return ret;
+}
+
+static int wait_for_vcmp(struct mcde_chnl_state *chnl)
+{
+ u64 vcmp = atomic_read(&chnl->vcmp_cnt) + 1;
+ int ret = wait_event_timeout(chnl->vcmp_waitq,
+ atomic_read(&chnl->vcmp_cnt) >= vcmp,
+ msecs_to_jiffies(CHNL_TIMEOUT));
+ return ret;
+}
+
+static int update_channel_static_registers(struct mcde_chnl_state *chnl)
+{
+ const struct chnl_config *cfg = chnl->cfg;
+ const struct mcde_port *port = &chnl->port;
+
+ if (hardware_version == MCDE_CHIP_VERSION_3_0_5) {
+ /* Fifo & muxing */
+ if (cfg->swap_a_c0_set)
+ mcde_wfld(MCDE_CONF0, SWAP_A_C0_V1, cfg->swap_a_c0);
+ if (cfg->swap_b_c1_set)
+ mcde_wfld(MCDE_CONF0, SWAP_B_C1_V1, cfg->swap_b_c1);
+ if (cfg->fabmux_set)
+ mcde_wfld(MCDE_CR, FABMUX_V1, cfg->fabmux);
+ if (cfg->f01mux_set)
+ mcde_wfld(MCDE_CR, F01MUX_V1, cfg->f01mux);
+
+ if (port->type == MCDE_PORTTYPE_DPI) {
+ if (port->link == 0)
+ mcde_wfld(MCDE_CR, DPIA_EN_V1, true);
+ else if (port->link == 1)
+ mcde_wfld(MCDE_CR, DPIB_EN_V1, true);
+ } else if (port->type == MCDE_PORTTYPE_DSI) {
+ if (port->ifc == DSI_VIDEO_MODE && port->link == 0)
+ mcde_wfld(MCDE_CR, DSIVID0_EN_V1, true);
+ else if (port->ifc == DSI_VIDEO_MODE && port->link == 1)
+ mcde_wfld(MCDE_CR, DSIVID1_EN_V1, true);
+ else if (port->ifc == DSI_VIDEO_MODE && port->link == 2)
+ mcde_wfld(MCDE_CR, DSIVID2_EN_V1, true);
+ else if (port->ifc == DSI_CMD_MODE && port->link == 0)
+ mcde_wfld(MCDE_CR, DSICMD0_EN_V1, true);
+ else if (port->ifc == DSI_CMD_MODE && port->link == 1)
+ mcde_wfld(MCDE_CR, DSICMD1_EN_V1, true);
+ else if (port->ifc == DSI_CMD_MODE && port->link == 2)
+ mcde_wfld(MCDE_CR, DSICMD2_EN_V1, true);
+ }
+ } else if (hardware_version == MCDE_CHIP_VERSION_3_0_8 ||
+ hardware_version == MCDE_CHIP_VERSION_4_0_4) {
+ switch (chnl->fifo) {
+ case MCDE_FIFO_A:
+ mcde_wreg(MCDE_CHNL0MUXING_V2 + chnl->id *
+ MCDE_CHNL0MUXING_V2_GROUPOFFSET,
+ MCDE_CHNL0MUXING_V2_FIFO_ID_ENUM(FIFO_A));
+ if (port->type == MCDE_PORTTYPE_DPI) {
+ mcde_wfld(MCDE_CTRLA, FORMTYPE,
+ MCDE_CTRLA_FORMTYPE_DPITV);
+ mcde_wfld(MCDE_CTRLA, FORMID, port->link);
+ } else if (port->type == MCDE_PORTTYPE_DSI) {
+ mcde_wfld(MCDE_CTRLA, FORMTYPE,
+ MCDE_CTRLA_FORMTYPE_DSI);
+ mcde_wfld(MCDE_CTRLA, FORMID,
+ get_dsi_formid(port));
+ }
+ break;
+ case MCDE_FIFO_B:
+ mcde_wreg(MCDE_CHNL0MUXING_V2 + chnl->id *
+ MCDE_CHNL0MUXING_V2_GROUPOFFSET,
+ MCDE_CHNL0MUXING_V2_FIFO_ID_ENUM(FIFO_B));
+ if (port->type == MCDE_PORTTYPE_DPI) {
+ mcde_wfld(MCDE_CTRLB, FORMTYPE,
+ MCDE_CTRLB_FORMTYPE_DPITV);
+ mcde_wfld(MCDE_CTRLB, FORMID, port->link);
+ } else if (port->type == MCDE_PORTTYPE_DSI) {
+ mcde_wfld(MCDE_CTRLB, FORMTYPE,
+ MCDE_CTRLB_FORMTYPE_DSI);
+ mcde_wfld(MCDE_CTRLB, FORMID,
+ get_dsi_formid(port));
+ }
+
+ break;
+ case MCDE_FIFO_C0:
+ mcde_wreg(MCDE_CHNL0MUXING_V2 + chnl->id *
+ MCDE_CHNL0MUXING_V2_GROUPOFFSET,
+ MCDE_CHNL0MUXING_V2_FIFO_ID_ENUM(FIFO_C0));
+ if (port->type == MCDE_PORTTYPE_DPI)
+ return -EINVAL;
+ mcde_wfld(MCDE_CTRLC0, FORMTYPE,
+ MCDE_CTRLC0_FORMTYPE_DSI);
+ mcde_wfld(MCDE_CTRLC0, FORMID, get_dsi_formid(port));
+ break;
+ case MCDE_FIFO_C1:
+ mcde_wreg(MCDE_CHNL0MUXING_V2 + chnl->id *
+ MCDE_CHNL0MUXING_V2_GROUPOFFSET,
+ MCDE_CHNL0MUXING_V2_FIFO_ID_ENUM(FIFO_C1));
+ if (port->type == MCDE_PORTTYPE_DPI)
+ return -EINVAL;
+ mcde_wfld(MCDE_CTRLC1, FORMTYPE,
+ MCDE_CTRLC1_FORMTYPE_DSI);
+ mcde_wfld(MCDE_CTRLC1, FORMID, get_dsi_formid(port));
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (hardware_version == MCDE_CHIP_VERSION_1_0_4) {
+ switch (chnl->fifo) {
+ case MCDE_FIFO_A:
+ /* only channel A is supported */
+ if (chnl->id != 0)
+ return -EINVAL;
+
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ if ((port->link == 1 &&
+ port->ifc == DSI_VIDEO_MODE) ||
+ (port->link == 0 && port->ifc == DSI_CMD_MODE))
+ return -EINVAL;
+ mcde_wfld(MCDE_CR, DSI0_EN_V3, true);
+
+ } else if (port->type == MCDE_PORTTYPE_DPI) {
+ mcde_wfld(MCDE_CR, DPI_EN_V3, true);
+ }
+ break;
+ case MCDE_FIFO_B:
+ if (port->type != MCDE_PORTTYPE_DSI)
+ return -EINVAL;
+ /* only channel B is supported */
+ if (chnl->id != 1)
+ return -EINVAL;
+
+ if ((port->link == 0 && port->ifc == DSI_VIDEO_MODE) ||
+ (port->link == 1 && port->ifc == DSI_CMD_MODE))
+ return -EINVAL;
+
+ mcde_wfld(MCDE_CR, DSI1_EN_V3, true);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Formatter */
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ int i = 0;
+ u8 idx;
+ u8 lnk = port->link;
+ int ret;
+
+ WARN_ON_ONCE(clk_enable(clock_dsi));
+ WARN_ON_ONCE(clk_enable(clock_dsi_lp));
+
+ if (!dsi_pll_is_enabled) {
+ struct mcde_platform_data *pdata =
+ mcde_dev->dev.platform_data;
+ ret = pdata->platform_enable_dsipll();
+ if (ret < 0) {
+ dev_warn(&mcde_dev->dev, "%s: "
+ "enable_dsipll failed ret = %d\n",
+ __func__, ret);
+ goto enable_dsipll_err;
+ }
+ dev_dbg(&mcde_dev->dev, "%s enable dsipll\n",
+ __func__);
+ }
+ dsi_pll_is_enabled++;
+
+ if (hardware_version == MCDE_CHIP_VERSION_1_0_4 ||
+ hardware_version == MCDE_CHIP_VERSION_4_0_4)
+ idx = port->link;
+ else
+ idx = 2 * port->link + port->ifc;
+
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, LINK_EN, true);
+ dev_dbg(&mcde_dev->dev, "DSI%d LINK_EN\n", lnk);
+
+ if (port->sync_src == MCDE_SYNCSRC_TE_POLLING) {
+ if (hardware_version == MCDE_CHIP_VERSION_3_0_5) {
+ dev_err(&mcde_dev->dev,
+ "DSI TE polling is not supported on this HW\n");
+ goto dsi_link_error;
+ }
+
+ /* Enable DSI TE polling */
+ dsi_te_poll_req(chnl);
+
+ /* Set timer to detect non TE answer */
+ dsi_te_poll_set_timer(chnl,
+ DSI_TE_NO_ANSWER_TIMEOUT_INIT);
+ } else {
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, READ_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, true);
+ }
+
+ if (hardware_version == MCDE_CHIP_VERSION_3_0_5) {
+ if (port->phy.dsi.data_lanes_swap) {
+ dev_warn(&mcde_dev->dev,
+ "DSI %d data lane remap not available!\n",
+ lnk);
+ goto dsi_link_error;
+ }
+ } else
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, DLX_REMAP_EN,
+ port->phy.dsi.data_lanes_swap);
+
+ dsi_wreg(lnk, DSI_MCTL_DPHY_STATIC,
+ DSI_MCTL_DPHY_STATIC_UI_X4(port->phy.dsi.ui));
+ dsi_wreg(lnk, DSI_DPHY_LANES_TRIM,
+ DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_ENUM(0_90));
+ dsi_wreg(lnk, DSI_MCTL_DPHY_TIMEOUT,
+ DSI_MCTL_DPHY_TIMEOUT_CLK_DIV(0xf) |
+ DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL(0x3fff) |
+ DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL(0x3fff));
+ dsi_wreg(lnk, DSI_MCTL_MAIN_PHY_CTL,
+ DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME(0xf) |
+ DSI_MCTL_MAIN_PHY_CTL_LANE2_EN(true) |
+ DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS(
+ port->phy.dsi.clk_cont));
+ dsi_wreg(lnk, DSI_MCTL_ULPOUT_TIME,
+ DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME(1) |
+ DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME(1));
+ /* TODO: make enum */
+ dsi_wfld(lnk, DSI_CMD_MODE_CTL, ARB_MODE, false);
+ /* TODO: make enum */
+ dsi_wfld(lnk, DSI_CMD_MODE_CTL, ARB_PRI, port->ifc == 1);
+ dsi_wreg(lnk, DSI_MCTL_MAIN_EN,
+ DSI_MCTL_MAIN_EN_PLL_START(true) |
+ DSI_MCTL_MAIN_EN_CKLANE_EN(true) |
+ DSI_MCTL_MAIN_EN_DAT1_EN(true) |
+ DSI_MCTL_MAIN_EN_DAT2_EN(port->phy.dsi.num_data_lanes
+ == 2) |
+ DSI_MCTL_MAIN_EN_IF1_EN(port->ifc == 0) |
+ DSI_MCTL_MAIN_EN_IF2_EN(port->ifc == 1));
+ while (dsi_rfld(lnk, DSI_MCTL_MAIN_STS, CLKLANE_READY) == 0 ||
+ dsi_rfld(lnk, DSI_MCTL_MAIN_STS, DAT1_READY) == 0 ||
+ dsi_rfld(lnk, DSI_MCTL_MAIN_STS, DAT2_READY) == 0) {
+ mdelay(1);
+ if (i++ == 10) {
+ dev_warn(&mcde_dev->dev,
+ "DSI lane not ready (link=%d)!\n", lnk);
+ goto dsi_link_error;
+ }
+ }
+
+ mcde_wreg(MCDE_DSIVID0CONF0 +
+ idx * MCDE_DSIVID0CONF0_GROUPOFFSET,
+ MCDE_DSIVID0CONF0_BLANKING(0) |
+ MCDE_DSIVID0CONF0_VID_MODE(
+ port->mode == MCDE_PORTMODE_VID) |
+ MCDE_DSIVID0CONF0_CMD8(true) |
+ MCDE_DSIVID0CONF0_BIT_SWAP(false) |
+ MCDE_DSIVID0CONF0_BYTE_SWAP(false) |
+ MCDE_DSIVID0CONF0_DCSVID_NOTGEN(true));
+
+ if (port->mode == MCDE_PORTMODE_CMD) {
+ if (port->ifc == DSI_VIDEO_MODE)
+ dsi_wfld(port->link, DSI_CMD_MODE_CTL, IF1_ID,
+ port->phy.dsi.virt_id);
+ else if (port->ifc == DSI_CMD_MODE)
+ dsi_wfld(port->link, DSI_CMD_MODE_CTL, IF2_ID,
+ port->phy.dsi.virt_id);
+ }
+ }
+
+ if (port->type == MCDE_PORTTYPE_DPI)
+ WARN_ON_ONCE(clk_enable(clock_dpi));
+
+ mcde_wfld(MCDE_CR, MCDEEN, true);
+ chnl->formatter_updated = true;
+
+ dev_vdbg(&mcde_dev->dev, "Static registers setup, chnl=%d\n", chnl->id);
+
+ return 0;
+dsi_link_error:
+ if (dsi_pll_is_enabled && (--dsi_pll_is_enabled == 0)) {
+ struct mcde_platform_data *pdata =
+ mcde_dev->dev.platform_data;
+ dev_dbg(&mcde_dev->dev, "%s le:disable dsipll\n", __func__);
+ pdata->platform_disable_dsipll();
+ }
+enable_dsipll_err:
+ clk_disable(clock_dsi_lp);
+ clk_disable(clock_dsi);
+ return -EINVAL;
+}
+
+void mcde_chnl_col_convert_apply(struct mcde_chnl_state *chnl,
+ struct mcde_col_transform *transform)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (chnl->transform != transform) {
+
+ chnl->col_regs.y_red = transform->matrix[0][0];
+ chnl->col_regs.y_green = transform->matrix[0][1];
+ chnl->col_regs.y_blue = transform->matrix[0][2];
+ chnl->col_regs.cb_red = transform->matrix[1][0];
+ chnl->col_regs.cb_green = transform->matrix[1][1];
+ chnl->col_regs.cb_blue = transform->matrix[1][2];
+ chnl->col_regs.cr_red = transform->matrix[2][0];
+ chnl->col_regs.cr_green = transform->matrix[2][1];
+ chnl->col_regs.cr_blue = transform->matrix[2][2];
+ chnl->col_regs.off_y = transform->offset[0];
+ chnl->col_regs.off_cb = transform->offset[1];
+ chnl->col_regs.off_cr = transform->offset[2];
+ chnl->col_regs.dirty = true;
+
+ chnl->transform = transform;
+ }
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+static void chnl_ovly_pixel_format_apply(struct mcde_chnl_state *chnl,
+ struct mcde_ovly_state *ovly)
+{
+ struct mcde_port *port = &chnl->port;
+ struct ovly_regs *regs = &ovly->regs;
+
+ /* Note: YUV -> YUV: blending YUV overlays will not make sense. */
+ static struct mcde_col_transform crycb_2_ycbcr = {
+ /* Note that in MCDE YUV 422 pixels come as VYU pixels */
+ .matrix = {
+ {0x0000, 0x0100, 0x0000},
+ {0x0000, 0x0000, 0x0100},
+ {0x0100, 0x0000, 0x0000},
+ },
+ .offset = {0, 0, 0},
+ };
+
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ if (port->pixel_format != MCDE_PORTPIXFMT_DSI_YCBCR422) {
+ if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422) {
+ /* standard case: DSI: RGB -> RGB */
+ regs->col_conv = MCDE_OVL0CR_COLCCTRL_DISABLED;
+ } else {
+ /* DSI: YUV -> RGB */
+ /* TODO change matrix */
+ regs->col_conv =
+ MCDE_OVL0CR_COLCCTRL_ENABLED_SAT;
+ mcde_chnl_col_convert_apply(chnl,
+ &chnl->ycbcr_2_rgb);
+ }
+ } else {
+ if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422)
+ /* DSI: RGB -> YUV */
+ mcde_chnl_col_convert_apply(chnl,
+ &chnl->rgb_2_ycbcr);
+ else
+ /* DSI: YUV -> YUV */
+ mcde_chnl_col_convert_apply(chnl,
+ &crycb_2_ycbcr);
+ regs->col_conv = MCDE_OVL0CR_COLCCTRL_ENABLED_NO_SAT;
+ }
+ } else if (port->type == MCDE_PORTTYPE_DPI && port->phy.dpi.tv_mode) {
+ regs->col_conv = MCDE_OVL0CR_COLCCTRL_ENABLED_NO_SAT;
+ if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422)
+ mcde_chnl_col_convert_apply(chnl, &chnl->rgb_2_ycbcr);
+ else
+ mcde_chnl_col_convert_apply(chnl, &crycb_2_ycbcr);
+ } else if (port->type == MCDE_PORTTYPE_DPI) {
+ /* Note: YUV is not support port pixel format for DPI */
+ if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422) {
+ /* standard case: DPI: RGB -> RGB */
+ regs->col_conv = MCDE_OVL0CR_COLCCTRL_DISABLED;
+ } else {
+ /* DPI: YUV -> RGB */
+ regs->col_conv =
+ MCDE_OVL0CR_COLCCTRL_ENABLED_SAT;
+ mcde_chnl_col_convert_apply(chnl,
+ &chnl->ycbcr_2_rgb);
+ }
+ }
+}
+
+/* REVIEW: Make update_* an mcde_rectangle? */
+static void update_overlay_registers(u8 idx, struct ovly_regs *regs,
+ struct mcde_port *port, enum mcde_fifo fifo,
+ u16 update_x, u16 update_y, u16 update_w,
+ u16 update_h, s16 stride, bool interlaced,
+ enum mcde_display_rotation rotation)
+{
+ /* TODO: fix clipping for small overlay */
+ u32 lmrgn = (regs->cropx + update_x) * regs->bits_per_pixel;
+ u32 tmrgn = (regs->cropy + update_y) * stride;
+ u32 ppl = regs->ppl - update_x;
+ u32 lpf = regs->lpf - update_y;
+ s32 ljinc = stride;
+ u32 pixelfetchwtrmrklevel;
+ u8 nr_of_bufs = 1;
+ u32 fifo_size;
+ u32 sel_mod = MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL;
+
+ if (rotation == MCDE_DISPLAY_ROT_180_CCW) {
+ ljinc = -ljinc;
+ tmrgn += stride * (regs->lpf - 1) / 8;
+ }
+
+ /*
+ * Preferably most of this is done in some apply function instead of for
+ * every update. However lpf has a dependency on update_y.
+ */
+ if (interlaced && port->type == MCDE_PORTTYPE_DSI) {
+ nr_of_bufs = 2;
+ lpf = lpf / 2;
+ ljinc *= 2;
+ }
+
+ fifo_size = get_output_fifo_size(fifo);
+
+ if ((fifo == MCDE_FIFO_A || fifo == MCDE_FIFO_B) &&
+ regs->ppl >= fifo_size * 2)
+ pixelfetchwtrmrklevel = get_input_fifo_size() * 2;
+ else
+ pixelfetchwtrmrklevel = get_input_fifo_size() / 2;
+
+ if (port->update_auto_trig && port->type == MCDE_PORTTYPE_DSI) {
+ switch (port->sync_src) {
+ case MCDE_SYNCSRC_OFF:
+ sel_mod = MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL;
+ break;
+ case MCDE_SYNCSRC_TE0:
+ case MCDE_SYNCSRC_TE1:
+ case MCDE_SYNCSRC_TE_POLLING:
+ default:
+ sel_mod = MCDE_EXTSRC0CR_SEL_MOD_AUTO_TOGGLE;
+ break;
+ }
+ } else if (port->type == MCDE_PORTTYPE_DPI)
+ sel_mod = MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL;
+
+ mcde_wreg(MCDE_EXTSRC0CONF + idx * MCDE_EXTSRC0CONF_GROUPOFFSET,
+ MCDE_EXTSRC0CONF_BUF_ID(0) |
+ MCDE_EXTSRC0CONF_BUF_NB(nr_of_bufs) |
+ MCDE_EXTSRC0CONF_PRI_OVLID(idx) |
+ MCDE_EXTSRC0CONF_BPP(regs->bpp) |
+ MCDE_EXTSRC0CONF_BGR(regs->bgr) |
+ MCDE_EXTSRC0CONF_BEBO(regs->bebo) |
+ MCDE_EXTSRC0CONF_BEPO(false));
+ mcde_wreg(MCDE_EXTSRC0CR + idx * MCDE_EXTSRC0CR_GROUPOFFSET,
+ MCDE_EXTSRC0CR_SEL_MOD(sel_mod) |
+ MCDE_EXTSRC0CR_MULTIOVL_CTRL_ENUM(PRIMARY) |
+ MCDE_EXTSRC0CR_FS_DIV_DISABLE(false) |
+ MCDE_EXTSRC0CR_FORCE_FS_DIV(false));
+ mcde_wreg(MCDE_OVL0CR + idx * MCDE_OVL0CR_GROUPOFFSET,
+ MCDE_OVL0CR_OVLEN(regs->enabled) |
+ MCDE_OVL0CR_COLCCTRL(regs->col_conv) |
+ MCDE_OVL0CR_CKEYGEN(false) |
+ MCDE_OVL0CR_ALPHAPMEN(false) |
+ MCDE_OVL0CR_OVLF(false) |
+ MCDE_OVL0CR_OVLR(false) |
+ MCDE_OVL0CR_OVLB(false) |
+ MCDE_OVL0CR_FETCH_ROPC(0) |
+ MCDE_OVL0CR_STBPRIO(0) |
+ MCDE_OVL0CR_BURSTSIZE_ENUM(HW_8W) |
+ /* TODO: enum, get from ovly */
+ MCDE_OVL0CR_MAXOUTSTANDING_ENUM(8_REQ) |
+ /* TODO: _HW_8W, calculate? */
+ MCDE_OVL0CR_ROTBURSTSIZE_ENUM(HW_8W));
+ mcde_wreg(MCDE_OVL0CONF + idx * MCDE_OVL0CONF_GROUPOFFSET,
+ MCDE_OVL0CONF_PPL(ppl) |
+ MCDE_OVL0CONF_EXTSRC_ID(idx) |
+ MCDE_OVL0CONF_LPF(lpf));
+ mcde_wreg(MCDE_OVL0CONF2 + idx * MCDE_OVL0CONF2_GROUPOFFSET,
+ MCDE_OVL0CONF2_BP(regs->alpha_source) |
+ MCDE_OVL0CONF2_ALPHAVALUE(regs->alpha_value) |
+ MCDE_OVL0CONF2_OPQ(regs->opq) |
+ MCDE_OVL0CONF2_PIXOFF(lmrgn & 63) |
+ MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL(
+ pixelfetchwtrmrklevel));
+ mcde_wreg(MCDE_OVL0LJINC + idx * MCDE_OVL0LJINC_GROUPOFFSET,
+ ljinc);
+ mcde_wreg(MCDE_OVL0CROP + idx * MCDE_OVL0CROP_GROUPOFFSET,
+ MCDE_OVL0CROP_TMRGN(tmrgn) |
+ MCDE_OVL0CROP_LMRGN(lmrgn >> 6));
+ regs->dirty = false;
+
+ dev_vdbg(&mcde_dev->dev, "Overlay registers setup, idx=%d\n", idx);
+}
+
+static void update_overlay_registers_on_the_fly(u8 idx, struct ovly_regs *regs)
+{
+ mcde_wreg(MCDE_OVL0COMP + idx * MCDE_OVL0COMP_GROUPOFFSET,
+ MCDE_OVL0COMP_XPOS(regs->xpos) |
+ MCDE_OVL0COMP_CH_ID(regs->ch_id) |
+ MCDE_OVL0COMP_YPOS(regs->ypos) |
+ MCDE_OVL0COMP_Z(regs->z));
+
+ mcde_wreg(MCDE_EXTSRC0A0 + idx * MCDE_EXTSRC0A0_GROUPOFFSET,
+ regs->baseaddress0);
+ mcde_wreg(MCDE_EXTSRC0A1 + idx * MCDE_EXTSRC0A1_GROUPOFFSET,
+ regs->baseaddress1);
+ regs->dirty_buf = false;
+}
+
+static void do_softwaretrig(struct mcde_chnl_state *chnl)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ enable_flow(chnl);
+ mcde_wreg(MCDE_CHNL0SYNCHSW +
+ chnl->id * MCDE_CHNL0SYNCHSW_GROUPOFFSET,
+ MCDE_CHNL0SYNCHSW_SW_TRIG(true));
+ disable_flow(chnl);
+
+ local_irq_restore(flags);
+
+ dev_vdbg(&mcde_dev->dev, "Software TRIG on channel %d\n", chnl->id);
+}
+
+static void disable_flow(struct mcde_chnl_state *chnl)
+{
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(chnl->state != CHNLSTATE_RUNNING))
+ return;
+
+ local_irq_save(flags);
+
+ switch (chnl->id) {
+ case MCDE_CHNL_A:
+ mcde_wfld(MCDE_CRA0, FLOEN, false);
+ break;
+ case MCDE_CHNL_B:
+ mcde_wfld(MCDE_CRB0, FLOEN, false);
+ break;
+ case MCDE_CHNL_C0:
+ mcde_wfld(MCDE_CRC, C1EN, false);
+ break;
+ case MCDE_CHNL_C1:
+ mcde_wfld(MCDE_CRC, C2EN, false);
+ break;
+ }
+
+ set_channel_state_atomic(chnl, CHNLSTATE_STOPPING);
+
+ local_irq_restore(flags);
+}
+
+static void stop_channel(struct mcde_chnl_state *chnl)
+{
+ const struct mcde_port *port = &chnl->port;
+ bool dpi_lcd_mode;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (chnl->state != CHNLSTATE_RUNNING)
+ return;
+
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ dsi_wfld(port->link, DSI_MCTL_MAIN_PHY_CTL, CLK_CONTINUOUS,
+ false);
+ if (port->sync_src == MCDE_SYNCSRC_TE_POLLING)
+ del_timer(&chnl->dsi_te_timer);
+ }
+
+ disable_flow(chnl);
+ /*
+ * Needs to manually trigger VCOMP after the channel is
+ * disabled. For all channels using video mode
+ * except for dpi lcd.
+ */
+ dpi_lcd_mode = (port->type == MCDE_PORTTYPE_DPI &&
+ !chnl->port.phy.dpi.tv_mode);
+
+ if (chnl->port.update_auto_trig && !dpi_lcd_mode)
+ mcde_wreg(MCDE_SISPP, 1 << chnl->id);
+}
+
+static void wait_for_flow_disabled(struct mcde_chnl_state *chnl)
+{
+ int i = 0;
+
+ switch (chnl->id) {
+ case MCDE_CHNL_A:
+ for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) {
+ if (!mcde_rfld(MCDE_CRA0, FLOEN)) {
+ dev_vdbg(&mcde_dev->dev,
+ "Flow (A) disable after >= %d ms\n", i);
+ break;
+ }
+ msleep(1);
+ }
+ break;
+ case MCDE_CHNL_B:
+ for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) {
+ if (!mcde_rfld(MCDE_CRB0, FLOEN)) {
+ dev_vdbg(&mcde_dev->dev,
+ "Flow (B) disable after >= %d ms\n", i);
+ break;
+ }
+ msleep(1);
+ }
+ break;
+ case MCDE_CHNL_C0:
+ for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) {
+ if (!mcde_rfld(MCDE_CRC, C1EN)) {
+ dev_vdbg(&mcde_dev->dev,
+ "Flow (C1) disable after >= %d ms\n", i);
+ break;
+ }
+ msleep(1);
+ }
+ break;
+ case MCDE_CHNL_C1:
+ for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) {
+ if (!mcde_rfld(MCDE_CRC, C2EN)) {
+ dev_vdbg(&mcde_dev->dev,
+ "Flow (C2) disable after >= %d ms\n", i);
+ break;
+ }
+ msleep(1);
+ }
+ break;
+ }
+ if (i == MCDE_FLOWEN_MAX_TRIAL)
+ dev_err(&mcde_dev->dev, "%s: channel %d timeout\n",
+ __func__, chnl->id);
+}
+
+static void enable_flow(struct mcde_chnl_state *chnl)
+{
+ const struct mcde_port *port = &chnl->port;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (port->type == MCDE_PORTTYPE_DSI)
+ dsi_wfld(port->link, DSI_MCTL_MAIN_PHY_CTL, CLK_CONTINUOUS,
+ port->phy.dsi.clk_cont);
+
+ /*
+ * When ROTEN is set, the FLOEN bit will also be set but
+ * the flow has to be started anyway.
+ */
+ switch (chnl->id) {
+ case MCDE_CHNL_A:
+ WARN_ON_ONCE(mcde_rfld(MCDE_CRA0, FLOEN));
+ mcde_wfld(MCDE_CRA0, ROTEN, chnl->regs.roten);
+ mcde_wfld(MCDE_CRA0, FLOEN, true);
+ break;
+ case MCDE_CHNL_B:
+ WARN_ON_ONCE(mcde_rfld(MCDE_CRB0, FLOEN));
+ mcde_wfld(MCDE_CRB0, ROTEN, chnl->regs.roten);
+ mcde_wfld(MCDE_CRB0, FLOEN, true);
+ break;
+ case MCDE_CHNL_C0:
+ WARN_ON_ONCE(mcde_rfld(MCDE_CRC, C1EN));
+ mcde_wfld(MCDE_CRC, C1EN, true);
+ break;
+ case MCDE_CHNL_C1:
+ WARN_ON_ONCE(mcde_rfld(MCDE_CRC, C2EN));
+ mcde_wfld(MCDE_CRC, C2EN, true);
+ break;
+ }
+
+ set_channel_state_atomic(chnl, CHNLSTATE_RUNNING);
+}
+
+static void work_sleep_function(struct work_struct *ptr)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ if (mcde_trylock(__func__, __LINE__)) {
+ if (mcde_dynamic_power_management)
+ disable_mcde_hw(false);
+ mcde_unlock(__func__, __LINE__);
+ }
+}
+
+/* TODO get from register */
+#define MCDE_CLK_FREQ_MHZ 160
+static u32 get_pkt_div(u32 disp_ppl,
+ struct mcde_port *port,
+ enum mcde_fifo fifo)
+{
+ /*
+ * The lines can be split in several packets only on DSI CMD mode.
+ * In DSI VIDEO mode, 1 line = 1 packet.
+ * DPI is like DSI VIDEO (watermark = 1 line).
+ * DPI waits for fifo ready only for the first line of the first frame.
+ * If line is wider than fifo size, one can set watermark
+ * at fifo size, or set it to line size as watermark will be
+ * saturated at fifo size inside MCDE.
+ */
+ switch (port->type) {
+ case MCDE_PORTTYPE_DSI:
+ if (port->mode == MCDE_PORTMODE_CMD)
+ /* Equivalent of ceil(disp_ppl/fifo_size) */
+ return (disp_ppl - 1) / get_output_fifo_size(fifo) + 1;
+ else
+ return 1;
+ break;
+ case MCDE_PORTTYPE_DPI:
+ return 1;
+ break;
+ default:
+ break;
+ }
+ return 1;
+}
+
+void update_channel_registers(enum mcde_chnl chnl_id, struct chnl_regs *regs,
+ struct mcde_port *port, enum mcde_fifo fifo,
+ struct mcde_video_mode *video_mode)
+{
+ u8 idx = chnl_id;
+ u32 out_synch_src = MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER;
+ u32 src_synch = MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE;
+ u32 fifo_wtrmrk = 0;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ /*
+ * Select appropriate fifo watermark.
+ * Watermark will be saturated at fifo size inside MCDE.
+ */
+ fifo_wtrmrk = video_mode->xres /
+ get_pkt_div(video_mode->xres, port, fifo);
+
+ dev_vdbg(&mcde_dev->dev, "%s fifo_watermark=%d for chnl_id=%d\n",
+ __func__, fifo_wtrmrk, chnl_id);
+
+ switch (chnl_id) {
+ case MCDE_CHNL_A:
+ mcde_wfld(MCDE_CTRLA, FIFOWTRMRK, fifo_wtrmrk);
+ break;
+ case MCDE_CHNL_B:
+ mcde_wfld(MCDE_CTRLB, FIFOWTRMRK, fifo_wtrmrk);
+ break;
+ case MCDE_CHNL_C0:
+ mcde_wfld(MCDE_CTRLC0, FIFOWTRMRK, fifo_wtrmrk);
+ break;
+ case MCDE_CHNL_C1:
+ mcde_wfld(MCDE_CTRLC1, FIFOWTRMRK, fifo_wtrmrk);
+ break;
+ default:
+ break;
+ }
+
+ /* Channel */
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ if (port->update_auto_trig) {
+ switch (port->sync_src) {
+ case MCDE_SYNCSRC_TE0:
+ out_synch_src =
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_VSYNC0;
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_OUTPUT;
+ break;
+ case MCDE_SYNCSRC_OFF:
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE;
+ break;
+ case MCDE_SYNCSRC_TE1:
+ default:
+ out_synch_src =
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_VSYNC1;
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_OUTPUT;
+ break;
+ case MCDE_SYNCSRC_TE_POLLING:
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_OUTPUT;
+ break;
+ }
+ } else {
+ if (port->sync_src == MCDE_SYNCSRC_TE0) {
+ out_synch_src =
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_VSYNC0;
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_OUTPUT;
+ }
+ }
+ } else if (port->type == MCDE_PORTTYPE_DPI) {
+ src_synch = port->update_auto_trig ?
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_OUTPUT :
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE;
+ }
+
+ mcde_wreg(MCDE_CHNL0CONF + idx * MCDE_CHNL0CONF_GROUPOFFSET,
+ MCDE_CHNL0CONF_PPL(regs->ppl-1) |
+ MCDE_CHNL0CONF_LPF(regs->lpf-1));
+ mcde_wreg(MCDE_CHNL0STAT + idx * MCDE_CHNL0STAT_GROUPOFFSET,
+ MCDE_CHNL0STAT_CHNLBLBCKGND_EN(false) |
+ MCDE_CHNL0STAT_CHNLRD(true));
+ mcde_wreg(MCDE_CHNL0SYNCHMOD +
+ idx * MCDE_CHNL0SYNCHMOD_GROUPOFFSET,
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH(src_synch) |
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC(out_synch_src));
+ mcde_wreg(MCDE_CHNL0BCKGNDCOL + idx * MCDE_CHNL0BCKGNDCOL_GROUPOFFSET,
+ MCDE_CHNL0BCKGNDCOL_B(0) |
+ MCDE_CHNL0BCKGNDCOL_G(0) |
+ MCDE_CHNL0BCKGNDCOL_R(0));
+
+ if (chnl_id == MCDE_CHNL_A || chnl_id == MCDE_CHNL_B) {
+ u32 mcde_crx1;
+ u32 mcde_pal0x;
+ u32 mcde_pal1x;
+ if (chnl_id == MCDE_CHNL_A) {
+ mcde_crx1 = MCDE_CRA1;
+ mcde_pal0x = MCDE_PAL0A;
+ mcde_pal1x = MCDE_PAL1A;
+ mcde_wfld(MCDE_CRA0, PALEN, regs->palette_enable);
+ } else {
+ mcde_crx1 = MCDE_CRB1;
+ mcde_pal0x = MCDE_PAL0B;
+ mcde_pal1x = MCDE_PAL1B;
+ mcde_wfld(MCDE_CRB0, PALEN, regs->palette_enable);
+ }
+ mcde_wreg(mcde_crx1,
+ MCDE_CRA1_PCD(regs->pcd) |
+ MCDE_CRA1_CLKSEL(regs->clksel) |
+ MCDE_CRA1_CDWIN(regs->cdwin) |
+ MCDE_CRA1_OUTBPP(bpp2outbpp(regs->bpp)) |
+ MCDE_CRA1_BCD(regs->bcd) |
+ MCDE_CRA1_CLKTYPE(regs->internal_clk));
+ if (regs->palette_enable) {
+ int i;
+ for (i = 0; i < 256; i++) {
+ mcde_wreg(mcde_pal0x,
+ MCDE_PAL0A_GREEN(regs->map_g(i)) |
+ MCDE_PAL0A_BLUE(regs->map_b(i)));
+ mcde_wreg(mcde_pal1x,
+ MCDE_PAL1A_RED(regs->map_r(i)));
+ }
+ }
+ }
+
+ /* Formatter */
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ u8 fidx;
+ u32 temp, packet;
+ /* pkt_div is used to avoid underflow in output fifo for
+ * large packets */
+ u32 pkt_div = 1;
+ u32 dsi_delay0 = 0;
+ u32 screen_ppl, screen_lpf;
+
+ if (hardware_version == MCDE_CHIP_VERSION_1_0_4 ||
+ hardware_version == MCDE_CHIP_VERSION_4_0_4)
+ fidx = port->link;
+ else
+ fidx = 2 * port->link + port->ifc;
+
+ screen_ppl = video_mode->xres;
+ screen_lpf = video_mode->yres;
+
+ pkt_div = get_pkt_div(screen_ppl, port, fifo);
+
+ if (video_mode->interlaced)
+ screen_lpf /= 2;
+
+ /* pkt_delay_progressive = pixelclock * htot /
+ * (1E12 / 160E6) / pkt_div */
+ dsi_delay0 = (video_mode->pixclock + 1) *
+ (video_mode->xres + video_mode->hbp +
+ video_mode->hfp) /
+ (1000000 / MCDE_CLK_FREQ_MHZ) / pkt_div;
+
+ if ((screen_ppl == SCREEN_PPL_CEA2) &&
+ (screen_lpf == SCREEN_LPF_CEA2))
+ dsi_delay0 += DSI_DELAY0_CEA2_ADD;
+
+ temp = mcde_rreg(MCDE_DSIVID0CONF0 +
+ fidx * MCDE_DSIVID0CONF0_GROUPOFFSET);
+ mcde_wreg(MCDE_DSIVID0CONF0 +
+ fidx * MCDE_DSIVID0CONF0_GROUPOFFSET,
+ (temp & ~MCDE_DSIVID0CONF0_PACKING_MASK) |
+ MCDE_DSIVID0CONF0_PACKING(regs->dsipacking));
+ /* 1==CMD8 */
+ packet = ((screen_ppl / pkt_div * regs->bpp) >> 3) + 1;
+ mcde_wreg(MCDE_DSIVID0FRAME +
+ fidx * MCDE_DSIVID0FRAME_GROUPOFFSET,
+ MCDE_DSIVID0FRAME_FRAME(packet * pkt_div * screen_lpf));
+ mcde_wreg(MCDE_DSIVID0PKT + fidx * MCDE_DSIVID0PKT_GROUPOFFSET,
+ MCDE_DSIVID0PKT_PACKET(packet));
+ mcde_wreg(MCDE_DSIVID0SYNC +
+ fidx * MCDE_DSIVID0SYNC_GROUPOFFSET,
+ MCDE_DSIVID0SYNC_SW(0) |
+ MCDE_DSIVID0SYNC_DMA(0));
+ mcde_wreg(MCDE_DSIVID0CMDW +
+ fidx * MCDE_DSIVID0CMDW_GROUPOFFSET,
+ MCDE_DSIVID0CMDW_CMDW_START(DCS_CMD_WRITE_START) |
+ MCDE_DSIVID0CMDW_CMDW_CONTINUE(DCS_CMD_WRITE_CONTINUE));
+ mcde_wreg(MCDE_DSIVID0DELAY0 +
+ fidx * MCDE_DSIVID0DELAY0_GROUPOFFSET,
+ MCDE_DSIVID0DELAY0_INTPKTDEL(dsi_delay0));
+ mcde_wreg(MCDE_DSIVID0DELAY1 +
+ fidx * MCDE_DSIVID0DELAY1_GROUPOFFSET,
+ MCDE_DSIVID0DELAY1_TEREQDEL(0) |
+ MCDE_DSIVID0DELAY1_FRAMESTARTDEL(0));
+ } else if (port->type == MCDE_PORTTYPE_DPI &&
+ !port->phy.dpi.tv_mode) {
+ /* DPI LCD Mode */
+ if (chnl_id == MCDE_CHNL_A) {
+ mcde_wreg(MCDE_SYNCHCONFA,
+ MCDE_SYNCHCONFA_HWREQVEVENT_ENUM(
+ ACTIVE_VIDEO) |
+ MCDE_SYNCHCONFA_HWREQVCNT(
+ video_mode->yres - 1) |
+ MCDE_SYNCHCONFA_SWINTVEVENT_ENUM(
+ ACTIVE_VIDEO) |
+ MCDE_SYNCHCONFA_SWINTVCNT(
+ video_mode->yres - 1));
+ } else if (chnl_id == MCDE_CHNL_B) {
+ mcde_wreg(MCDE_SYNCHCONFB,
+ MCDE_SYNCHCONFB_HWREQVEVENT_ENUM(
+ ACTIVE_VIDEO) |
+ MCDE_SYNCHCONFB_HWREQVCNT(
+ video_mode->yres - 1) |
+ MCDE_SYNCHCONFB_SWINTVEVENT_ENUM(
+ ACTIVE_VIDEO) |
+ MCDE_SYNCHCONFB_SWINTVCNT(
+ video_mode->yres - 1));
+ }
+ }
+
+ if (regs->roten) {
+ mcde_wreg(MCDE_ROTADD0A + chnl_id * MCDE_ROTADD0A_GROUPOFFSET,
+ regs->rotbuf1);
+ mcde_wreg(MCDE_ROTADD1A + chnl_id * MCDE_ROTADD1A_GROUPOFFSET,
+ regs->rotbuf2);
+ mcde_wreg(MCDE_ROTACONF + chnl_id * MCDE_ROTACONF_GROUPOFFSET,
+ MCDE_ROTACONF_ROTBURSTSIZE_ENUM(8W) |
+ MCDE_ROTACONF_ROTBURSTSIZE_HW(1) |
+ MCDE_ROTACONF_ROTDIR(regs->rotdir) |
+ MCDE_ROTACONF_STRIP_WIDTH_ENUM(16PIX) |
+ MCDE_ROTACONF_RD_MAXOUT_ENUM(4_REQ) |
+ MCDE_ROTACONF_WR_MAXOUT_ENUM(8_REQ));
+ }
+
+ /* Blending */
+ if (chnl_id == MCDE_CHNL_A) {
+ mcde_wfld(MCDE_CRA0, BLENDEN, regs->blend_en);
+ mcde_wfld(MCDE_CRA0, BLENDCTRL, regs->blend_ctrl);
+ mcde_wfld(MCDE_CRA0, ALPHABLEND, regs->alpha_blend);
+ } else if (chnl_id == MCDE_CHNL_B) {
+ mcde_wfld(MCDE_CRB0, BLENDEN, regs->blend_en);
+ mcde_wfld(MCDE_CRB0, BLENDCTRL, regs->blend_ctrl);
+ mcde_wfld(MCDE_CRB0, ALPHABLEND, regs->alpha_blend);
+ }
+
+ dev_vdbg(&mcde_dev->dev, "Channel registers setup, chnl=%d\n", chnl_id);
+ regs->dirty = false;
+}
+
+static int enable_mcde_hw(void)
+{
+ int ret;
+ int i;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ cancel_delayed_work(&hw_timeout_work);
+ schedule_delayed_work(&hw_timeout_work,
+ msecs_to_jiffies(MCDE_SLEEP_WATCHDOG));
+
+ for (i = 0; i < num_channels; i++) {
+ struct mcde_chnl_state *chnl = &channels[i];
+ if (chnl->state == CHNLSTATE_SUSPEND) {
+ /* Mark all registers as dirty */
+ set_channel_state_atomic(chnl, CHNLSTATE_IDLE);
+ chnl->ovly0->regs.dirty = true;
+ chnl->ovly0->regs.dirty_buf = true;
+ if (chnl->ovly1) {
+ chnl->ovly1->regs.dirty = true;
+ chnl->ovly1->regs.dirty_buf = true;
+ }
+ chnl->regs.dirty = true;
+ chnl->col_regs.dirty = true;
+ chnl->tv_regs.dirty = true;
+ atomic_set(&chnl->vcmp_cnt, 0);
+ }
+ }
+
+ if (mcde_is_enabled) {
+ dev_vdbg(&mcde_dev->dev, "%s - already enabled\n", __func__);
+ return 0;
+ }
+
+ enable_clocks_and_power(mcde_dev);
+
+ ret = request_irq(mcde_irq, mcde_irq_handler, 0, "mcde",
+ &mcde_dev->dev);
+ if (ret) {
+ dev_dbg(&mcde_dev->dev, "Failed to request irq (irq=%d)\n",
+ mcde_irq);
+ cancel_delayed_work(&hw_timeout_work);
+ return -EINVAL;
+ }
+
+ update_mcde_registers();
+
+ dev_vdbg(&mcde_dev->dev, "%s - enable done\n", __func__);
+
+ mcde_is_enabled = true;
+ return 0;
+}
+
+/* DSI */
+static int mcde_dsi_direct_cmd_write(struct mcde_chnl_state *chnl,
+ bool dcs, u8 cmd, u8 *data, int len)
+{
+ int i, ret = 0;
+ u32 wrdat[4] = { 0, 0, 0, 0 };
+ u32 settings;
+ u8 link = chnl->port.link;
+ u8 virt_id = chnl->port.phy.dsi.virt_id;
+ u32 counter = DSI_WRITE_CMD_TIMEOUT;
+
+ if (len > MCDE_MAX_DSI_DIRECT_CMD_WRITE ||
+ chnl->port.type != MCDE_PORTTYPE_DSI)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+
+ _mcde_chnl_enable(chnl);
+ if (enable_mcde_hw()) {
+ mcde_unlock(__func__, __LINE__);
+ return -EINVAL;
+ }
+ if (!chnl->formatter_updated)
+ (void)update_channel_static_registers(chnl);
+
+ set_channel_state_sync(chnl, CHNLSTATE_DSI_WRITE);
+
+ if (dcs) {
+ wrdat[0] = cmd;
+ for (i = 1; i <= len; i++)
+ wrdat[i>>2] |= ((u32)data[i-1] << ((i & 3) * 8));
+ } else {
+ /* no explicit cmd byte for generic_write, only params */
+ for (i = 0; i < len; i++)
+ wrdat[i>>2] |= ((u32)data[i] << ((i & 3) * 8));
+ }
+
+ settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(WRITE) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(len > 1) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(len+1) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true);
+ if (dcs) {
+ if (len == 0)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ DCS_SHORT_WRITE_0);
+ else if (len == 1)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ DCS_SHORT_WRITE_1);
+ else
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ DCS_LONG_WRITE);
+ } else {
+ if (len == 0)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ GENERIC_SHORT_WRITE_0);
+ else if (len == 1)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ GENERIC_SHORT_WRITE_1);
+ else if (len == 2)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ GENERIC_SHORT_WRITE_2);
+ else
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ GENERIC_LONG_WRITE);
+ }
+
+ dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings);
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, wrdat[0]);
+ if (len > 3)
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT1, wrdat[1]);
+ if (len > 7)
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT2, wrdat[2]);
+ if (len > 11)
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT3, wrdat[3]);
+ dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, ~0);
+ dsi_wreg(link, DSI_CMD_MODE_STS_CLR, ~0);
+ dsi_wreg(link, DSI_DIRECT_CMD_SEND, true);
+
+ /* loop will normally run zero or one time until WRITE_COMPLETED */
+ while (!dsi_rfld(link, DSI_DIRECT_CMD_STS, WRITE_COMPLETED)
+ && --counter)
+ cpu_relax();
+
+ if (!counter) {
+ dev_err(&mcde_dev->dev,
+ "%s: DSI write cmd 0x%x timeout on DSI link %u!\n",
+ __func__, cmd, link);
+ ret = -ETIME;
+ } else {
+ /* inform if >100 loops before command completion */
+ if (counter < (DSI_WRITE_CMD_TIMEOUT-DSI_WRITE_CMD_TIMEOUT/10))
+ dev_vdbg(&mcde_dev->dev,
+ "%s: %u loops for DSI command %x completion\n",
+ __func__, (DSI_WRITE_CMD_TIMEOUT - counter),
+ cmd);
+
+ dev_vdbg(&mcde_dev->dev, "DSI Write ok %x error %x\n",
+ dsi_rreg(link, DSI_DIRECT_CMD_STS_FLAG),
+ dsi_rreg(link, DSI_CMD_MODE_STS_FLAG));
+ }
+
+ set_channel_state_atomic(chnl, CHNLSTATE_IDLE);
+
+ mcde_unlock(__func__, __LINE__);
+
+ return ret;
+}
+
+int mcde_dsi_generic_write(struct mcde_chnl_state *chnl, u8* para, int len)
+{
+ return mcde_dsi_direct_cmd_write(chnl, false, 0, para, len);
+}
+
+int mcde_dsi_dcs_write(struct mcde_chnl_state *chnl, u8 cmd, u8* data, int len)
+{
+ return mcde_dsi_direct_cmd_write(chnl, true, cmd, data, len);
+}
+
+int mcde_dsi_dcs_read(struct mcde_chnl_state *chnl,
+ u8 cmd, u32 *data, int *len)
+{
+ int ret = 0;
+ u8 link = chnl->port.link;
+ u8 virt_id = chnl->port.phy.dsi.virt_id;
+ u32 settings;
+ bool ok = false;
+ bool error, ack_with_err;
+ u8 nbr_of_retries = DSI_READ_NBR_OF_RETRIES;
+
+ if (*len > MCDE_MAX_DCS_READ || chnl->port.type != MCDE_PORTTYPE_DSI)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+
+ _mcde_chnl_enable(chnl);
+ if (enable_mcde_hw()) {
+ mcde_unlock(__func__, __LINE__);
+ return -EINVAL;
+ }
+ if (!chnl->formatter_updated)
+ (void)update_channel_static_registers(chnl);
+
+ set_channel_state_sync(chnl, CHNLSTATE_DSI_READ);
+
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true);
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, READ_EN, true);
+ settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(READ) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(false) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(1) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(DCS_READ);
+ dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings);
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, cmd);
+
+ do {
+ u8 wait = DSI_READ_TIMEOUT;
+ dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, ~0);
+ dsi_wreg(link, DSI_DIRECT_CMD_RD_STS_CLR, ~0);
+ dsi_wreg(link, DSI_DIRECT_CMD_SEND, true);
+
+ while (wait-- && !(error = dsi_rfld(link, DSI_DIRECT_CMD_STS,
+ READ_COMPLETED_WITH_ERR)) &&
+ !(ok = dsi_rfld(link, DSI_DIRECT_CMD_STS,
+ READ_COMPLETED)))
+ udelay(DSI_READ_DELAY);
+
+ ack_with_err = dsi_rfld(link, DSI_DIRECT_CMD_STS,
+ ACKNOWLEDGE_WITH_ERR_RECEIVED);
+ if (ack_with_err)
+ dev_warn(&mcde_dev->dev,
+ "DCS Acknowledge Error Report %.4X\n",
+ dsi_rfld(link, DSI_DIRECT_CMD_STS, ACK_VAL));
+ } while (--nbr_of_retries && ack_with_err);
+
+ if (ok) {
+ int rdsize;
+ u32 rddat;
+
+ rdsize = dsi_rfld(link, DSI_DIRECT_CMD_RD_PROPERTY, RD_SIZE);
+ rddat = dsi_rreg(link, DSI_DIRECT_CMD_RDDAT);
+ if (rdsize < *len)
+ dev_warn(&mcde_dev->dev, "DCS incomplete read %d<%d"
+ " (%.8X)\n", rdsize, *len, rddat);
+ *len = min(*len, rdsize);
+ memcpy(data, &rddat, *len);
+ } else {
+ dev_err(&mcde_dev->dev, "DCS read failed, err=%d, sts=%X\n",
+ error, dsi_rreg(link, DSI_DIRECT_CMD_STS));
+ ret = -EIO;
+ }
+
+ dsi_wreg(link, DSI_CMD_MODE_STS_CLR, ~0);
+ dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, ~0);
+
+ set_channel_state_atomic(chnl, CHNLSTATE_IDLE);
+
+ mcde_unlock(__func__, __LINE__);
+
+ return ret;
+}
+
+/*
+ * Set Maximum Return Packet size is a command that specifies the
+ * maximum size of the payload transmitted from peripheral back to
+ * the host processor.
+ *
+ * During power-on or reset sequence, the Maximum Return Packet Size
+ * is set to a default value of one. In order to be able to use
+ * mcde_dsi_dcs_read for reading more than 1 byte at a time, this
+ * parameter should be set by the host processor to the desired value
+ * in the initialization routine before commencing normal operation.
+ */
+int mcde_dsi_set_max_pkt_size(struct mcde_chnl_state *chnl)
+{
+ u32 settings;
+ u8 link = chnl->port.link;
+ u8 virt_id = chnl->port.phy.dsi.virt_id;
+
+ if (chnl->port.type != MCDE_PORTTYPE_DSI)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+
+ if (enable_mcde_hw()) {
+ mcde_unlock(__func__, __LINE__);
+ return -EIO;
+ }
+
+ set_channel_state_sync(chnl, CHNLSTATE_DSI_WRITE);
+
+ /*
+ * Set Maximum Return Packet Size is a two-byte command packet
+ * that specifies the maximum size of the payload as u16 value.
+ * The order of bytes is: MaxSize LSB, MaxSize MSB
+ */
+ settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(WRITE) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(false) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(2) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ SET_MAX_PKT_SIZE);
+ dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings);
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, MCDE_MAX_DCS_READ);
+ dsi_wreg(link, DSI_DIRECT_CMD_SEND, true);
+
+ set_channel_state_atomic(chnl, CHNLSTATE_IDLE);
+
+ mcde_unlock(__func__, __LINE__);
+
+ return 0;
+}
+
+static void dsi_te_poll_req(struct mcde_chnl_state *chnl)
+{
+ u8 lnk = chnl->port.link;
+ const struct mcde_port *port = &chnl->port;
+
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, false);
+ if (port->ifc == 0)
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, IF1_TE_EN, true);
+ if (port->ifc == 1)
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, IF2_TE_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, READ_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, HOST_EOT_GEN, true);
+ dsi_wfld(lnk, DSI_CMD_MODE_CTL, TE_TIMEOUT, 0x3FF);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, TE_POLLING_EN, true);
+}
+
+static void dsi_te_poll_set_timer(struct mcde_chnl_state *chnl,
+ unsigned int timeout)
+{
+ mod_timer(&chnl->dsi_te_timer,
+ jiffies +
+ msecs_to_jiffies(timeout));
+}
+
+static void dsi_te_timer_function(unsigned long arg)
+{
+ struct mcde_chnl_state *chnl;
+ u8 lnk;
+
+ if (arg >= num_channels) {
+ dev_err(&mcde_dev->dev, "%s invalid arg:%ld\n", __func__, arg);
+ return;
+ }
+
+ chnl = &channels[arg];
+
+ if (mcde_is_enabled && chnl->enabled && chnl->formatter_updated) {
+ lnk = chnl->port.link;
+ /* No TE answer; force stop */
+ dsi_wfld(lnk, DSI_MCTL_MAIN_PHY_CTL, FORCE_STOP_MODE, true);
+ udelay(20);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_PHY_CTL, FORCE_STOP_MODE, false);
+ dev_info(&mcde_dev->dev, "DSI%d force stop\n", lnk);
+ dsi_te_poll_set_timer(chnl, DSI_TE_NO_ANSWER_TIMEOUT);
+ } else {
+ dev_info(&mcde_dev->dev, "1:DSI force stop\n");
+ }
+}
+
+static void dsi_te_request(struct mcde_chnl_state *chnl)
+{
+ u8 link = chnl->port.link;
+ u8 virt_id = chnl->port.phy.dsi.virt_id;
+ u32 settings;
+
+ dev_vdbg(&mcde_dev->dev, "Request BTA TE, chnl=%d\n",
+ chnl->id);
+
+ set_channel_state_atomic(chnl, CHNLSTATE_WAIT_TE);
+
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true);
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, true);
+ dsi_wfld(link, DSI_CMD_MODE_CTL, TE_TIMEOUT, 0x3FF);
+ settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(TE_REQ) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(false) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(2) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(DCS_SHORT_WRITE_1);
+ dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings);
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, DCS_CMD_SET_TEAR_ON);
+ dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR,
+ DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR(true));
+ dsi_wfld(link, DSI_DIRECT_CMD_STS_CTL, TE_RECEIVED_EN, true);
+ dsi_wreg(link, DSI_CMD_MODE_STS_CLR,
+ DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR(true));
+ dsi_wfld(link, DSI_CMD_MODE_STS_CTL, ERR_NO_TE_EN, true);
+ dsi_wreg(link, DSI_DIRECT_CMD_SEND, true);
+}
+
+/* MCDE channels */
+static struct mcde_chnl_state *_mcde_chnl_get(enum mcde_chnl chnl_id,
+ enum mcde_fifo fifo, const struct mcde_port *port)
+{
+ int i;
+ struct mcde_chnl_state *chnl = NULL;
+ enum mcde_chnl_path path;
+ const struct chnl_config *cfg = NULL;
+
+ static struct mcde_col_transform ycbcr_2_rgb = {
+ /* Note that in MCDE YUV 422 pixels come as VYU pixels */
+ .matrix = {
+ {0xff30, 0x012a, 0xff9c},
+ {0x0000, 0x012a, 0x0204},
+ {0x0199, 0x012a, 0x0000},
+ },
+ .offset = {0x0088, 0xfeeb, 0xff21},
+ };
+
+ static struct mcde_col_transform rgb_2_ycbcr = {
+ .matrix = {
+ {0x0042, 0x0081, 0x0019},
+ {0xffda, 0xffb6, 0x0070},
+ {0x0070, 0xffa2, 0xffee},
+ },
+ .offset = {0x0010, 0x0080, 0x0080},
+ };
+
+ /* Allocate channel */
+ for (i = 0; i < num_channels; i++) {
+ if (chnl_id == channels[i].id)
+ chnl = &channels[i];
+ }
+ if (!chnl) {
+ dev_dbg(&mcde_dev->dev, "Invalid channel, chnl=%d\n", chnl_id);
+ return ERR_PTR(-EINVAL);
+ }
+ if (chnl->reserved) {
+ dev_dbg(&mcde_dev->dev, "Channel in use, chnl=%d\n", chnl_id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ if (hardware_version == MCDE_CHIP_VERSION_3_0_5) {
+ path = MCDE_CHNLPATH(chnl->id, fifo, port->type,
+ port->ifc, port->link);
+ for (i = 0; i < ARRAY_SIZE(chnl_configs); i++)
+ if (chnl_configs[i].path == path) {
+ cfg = &chnl_configs[i];
+ break;
+ }
+ if (cfg == NULL) {
+ dev_dbg(&mcde_dev->dev, "Invalid config, chnl=%d,"
+ " path=0x%.8X\n", chnl_id, path);
+ return ERR_PTR(-EINVAL);
+ } else {
+ dev_info(&mcde_dev->dev, "Config, chnl=%d,"
+ " path=0x%.8X\n", chnl_id, path);
+ }
+ }
+ /* TODO: verify that cfg is ok to activate (check other chnl cfgs) */
+
+ chnl->cfg = cfg;
+ chnl->port = *port;
+ chnl->fifo = fifo;
+ chnl->formatter_updated = false;
+ chnl->ycbcr_2_rgb = ycbcr_2_rgb;
+ chnl->rgb_2_ycbcr = rgb_2_ycbcr;
+
+ chnl->blend_en = true;
+ chnl->blend_ctrl = MCDE_CRA0_BLENDCTRL_SOURCE;
+ chnl->alpha_blend = 0xFF;
+
+ _mcde_chnl_apply(chnl);
+ chnl->reserved = true;
+
+ if (chnl->port.type == MCDE_PORTTYPE_DPI) {
+ if (chnl->port.phy.dpi.tv_mode)
+ chnl->vcmp_per_field = true;
+ }
+
+ return chnl;
+}
+
+static int _mcde_chnl_apply(struct mcde_chnl_state *chnl)
+{
+ bool roten = false;
+ u8 rotdir = 0;
+
+ if (chnl->rotation == MCDE_DISPLAY_ROT_90_CCW) {
+ roten = true;
+ rotdir = MCDE_ROTACONF_ROTDIR_CCW;
+ } else if (chnl->rotation == MCDE_DISPLAY_ROT_90_CW) {
+ roten = true;
+ rotdir = MCDE_ROTACONF_ROTDIR_CW;
+ }
+ /* REVIEW: 180 deg? */
+
+ chnl->regs.bpp = portfmt2bpp(chnl->port.pixel_format);
+ chnl->regs.synchronized_update = chnl->synchronized_update;
+ chnl->regs.roten = roten;
+ chnl->regs.rotdir = rotdir;
+ chnl->regs.rotbuf1 = chnl->rotbuf1;
+ chnl->regs.rotbuf2 = chnl->rotbuf2;
+ chnl->regs.palette_enable = chnl->palette_enable;
+ chnl->regs.map_r = chnl->map_r;
+ chnl->regs.map_g = chnl->map_g;
+ chnl->regs.map_b = chnl->map_b;
+ if (chnl->port.type == MCDE_PORTTYPE_DSI) {
+ chnl->regs.clksel = MCDE_CRA1_CLKSEL_166MHZ;
+ chnl->regs.dsipacking =
+ portfmt2dsipacking(chnl->port.pixel_format);
+ } else if (chnl->port.type == MCDE_PORTTYPE_DPI) {
+ if (chnl->port.phy.dpi.tv_mode) {
+ chnl->regs.internal_clk = false;
+ chnl->regs.bcd = true;
+ if (chnl->id == MCDE_CHNL_A)
+ chnl->regs.clksel = MCDE_CRA1_CLKSEL_EXT_TV1;
+ else
+ chnl->regs.clksel = MCDE_CRA1_CLKSEL_EXT_TV2;
+ } else {
+ chnl->regs.internal_clk = true;
+ chnl->regs.clksel = MCDE_CRA1_CLKSEL_LCD;
+ chnl->regs.cdwin =
+ portfmt2cdwin(chnl->port.pixel_format);
+ chnl->regs.bcd = (chnl->port.phy.dpi.clock_div < 2);
+ if (!chnl->regs.bcd)
+ chnl->regs.pcd =
+ chnl->port.phy.dpi.clock_div - 2;
+ }
+ dpi_video_mode_apply(chnl);
+ }
+
+ chnl->regs.blend_ctrl = chnl->blend_ctrl;
+ chnl->regs.blend_en = chnl->blend_en;
+ chnl->regs.alpha_blend = chnl->alpha_blend;
+
+ chnl->regs.dirty = true;
+
+ dev_vdbg(&mcde_dev->dev, "Channel applied, chnl=%d\n", chnl->id);
+ return 0;
+}
+
+static void setup_channel(struct mcde_chnl_state *chnl)
+{
+ set_channel_state_sync(chnl, CHNLSTATE_SETUP);
+
+ if (chnl->port.type == MCDE_PORTTYPE_DPI && chnl->tv_regs.dirty)
+ update_dpi_registers(chnl->id, &chnl->tv_regs);
+ if ((chnl->id == MCDE_CHNL_A || chnl->id == MCDE_CHNL_B) &&
+ chnl->col_regs.dirty)
+ update_col_registers(chnl->id, &chnl->col_regs);
+ if (chnl->regs.dirty)
+ update_channel_registers(chnl->id, &chnl->regs, &chnl->port,
+ chnl->fifo, &chnl->vmode);
+}
+
+static void chnl_update_continous(struct mcde_chnl_state *chnl,
+ bool tripple_buffer)
+{
+ if (chnl->state == CHNLSTATE_RUNNING) {
+ if (!tripple_buffer)
+ wait_for_vcmp(chnl);
+ return;
+ }
+
+ setup_channel(chnl);
+ if (chnl->port.sync_src == MCDE_SYNCSRC_TE0) {
+ mcde_wfld(MCDE_CRC, SYCEN0, true);
+ } else if (chnl->port.sync_src == MCDE_SYNCSRC_TE1) {
+ if (hardware_version == MCDE_CHIP_VERSION_3_0_8 ||
+ hardware_version == MCDE_CHIP_VERSION_4_0_4) {
+ mcde_wfld(MCDE_VSCRC1, VSSEL, 1);
+ mcde_wfld(MCDE_CRC, SYCEN1, true);
+ } else {
+ mcde_wfld(MCDE_VSCRC1, VSSEL, 0);
+ mcde_wfld(MCDE_CRC, SYCEN0, true);
+ }
+ }
+
+ enable_flow(chnl);
+}
+
+static void chnl_update_non_continous(struct mcde_chnl_state *chnl)
+{
+ /* Commit settings to registers */
+ setup_channel(chnl);
+
+ if (chnl->regs.synchronized_update &&
+ chnl->power_mode == MCDE_DISPLAY_PM_ON) {
+ if (chnl->port.type == MCDE_PORTTYPE_DSI &&
+ chnl->port.sync_src == MCDE_SYNCSRC_BTA)
+ dsi_te_request(chnl);
+ } else {
+ do_softwaretrig(chnl);
+ dev_vdbg(&mcde_dev->dev, "Channel update (no sync), chnl=%d\n",
+ chnl->id);
+ }
+}
+
+static void chnl_update_overlay(struct mcde_chnl_state *chnl,
+ struct mcde_ovly_state *ovly)
+{
+ if (!ovly)
+ return;
+
+ if (ovly->regs.dirty_buf) {
+ update_overlay_registers_on_the_fly(ovly->idx, &ovly->regs);
+ mcde_debugfs_overlay_update(chnl->id, ovly != chnl->ovly0);
+ }
+ if (ovly->regs.dirty) {
+ chnl_ovly_pixel_format_apply(chnl, ovly);
+ update_overlay_registers(ovly->idx, &ovly->regs, &chnl->port,
+ chnl->fifo, chnl->regs.x, chnl->regs.y,
+ chnl->regs.ppl, chnl->regs.lpf, ovly->stride,
+ chnl->vmode.interlaced, chnl->rotation);
+ if (chnl->id == MCDE_CHNL_A || chnl->id == MCDE_CHNL_B)
+ update_col_registers(chnl->id, &chnl->col_regs);
+ }
+}
+
+static int _mcde_chnl_update(struct mcde_chnl_state *chnl,
+ struct mcde_rectangle *update_area,
+ bool tripple_buffer)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ /* TODO: lock & make wait->trig async */
+ if (!chnl->enabled || !update_area
+ || (update_area->w == 0 && update_area->h == 0)) {
+ return -EINVAL;
+ }
+
+ if (chnl->port.update_auto_trig && tripple_buffer)
+ wait_for_vcmp(chnl);
+
+ chnl->regs.x = update_area->x;
+ chnl->regs.y = update_area->y;
+ /* TODO Crop against video_mode.xres and video_mode.yres */
+ chnl->regs.ppl = update_area->w;
+ chnl->regs.lpf = update_area->h;
+ if (chnl->port.type == MCDE_PORTTYPE_DPI &&
+ chnl->port.phy.dpi.tv_mode) {
+ /* subtract border */
+ chnl->regs.ppl -= chnl->tv_regs.dho + chnl->tv_regs.alw;
+ /* subtract double borders, ie. for both fields */
+ chnl->regs.lpf -= 2 * (chnl->tv_regs.dvo + chnl->tv_regs.bsl);
+ } else if (chnl->port.type == MCDE_PORTTYPE_DSI &&
+ chnl->vmode.interlaced)
+ chnl->regs.lpf /= 2;
+
+ chnl_update_overlay(chnl, chnl->ovly0);
+ chnl_update_overlay(chnl, chnl->ovly1);
+
+ if (chnl->port.update_auto_trig)
+ chnl_update_continous(chnl, tripple_buffer);
+ else
+ chnl_update_non_continous(chnl);
+
+ dev_vdbg(&mcde_dev->dev, "Channel updated, chnl=%d\n", chnl->id);
+ mcde_debugfs_channel_update(chnl->id);
+ return 0;
+}
+
+static int _mcde_chnl_enable(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ chnl->enabled = true;
+ return 0;
+}
+
+/* API entry points */
+/* MCDE channels */
+struct mcde_chnl_state *mcde_chnl_get(enum mcde_chnl chnl_id,
+ enum mcde_fifo fifo, const struct mcde_port *port)
+{
+ struct mcde_chnl_state *chnl;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ chnl = _mcde_chnl_get(chnl_id, fifo, port);
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return chnl;
+}
+
+int mcde_chnl_set_pixel_format(struct mcde_chnl_state *chnl,
+ enum mcde_port_pix_fmt pix_fmt)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+ chnl->port.pixel_format = pix_fmt;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+
+int mcde_chnl_set_palette(struct mcde_chnl_state *chnl,
+ struct mcde_palette_table *palette)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+ if (palette != NULL) {
+ chnl->map_r = palette->map_col_ch0;
+ chnl->map_g = palette->map_col_ch1;
+ chnl->map_b = palette->map_col_ch2;
+ chnl->palette_enable = true;
+ } else {
+ chnl->map_r = NULL;
+ chnl->map_g = NULL;
+ chnl->map_b = NULL;
+ chnl->palette_enable = false;
+ }
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+ return 0;
+}
+
+void mcde_chnl_set_col_convert(struct mcde_chnl_state *chnl,
+ struct mcde_col_transform *transform,
+ enum mcde_col_convert convert)
+{
+ switch (convert) {
+ case MCDE_CONVERT_RGB_2_YCBCR:
+ memcpy(&chnl->rgb_2_ycbcr, transform,
+ sizeof(struct mcde_col_transform));
+ /* force update: */
+ if (chnl->transform == &chnl->rgb_2_ycbcr) {
+ chnl->transform = NULL;
+ chnl->ovly0->dirty = true;
+ chnl->ovly1->dirty = true;
+ }
+ break;
+ case MCDE_CONVERT_YCBCR_2_RGB:
+ memcpy(&chnl->ycbcr_2_rgb, transform,
+ sizeof(struct mcde_col_transform));
+ /* force update: */
+ if (chnl->transform == &chnl->ycbcr_2_rgb) {
+ chnl->transform = NULL;
+ chnl->ovly0->dirty = true;
+ chnl->ovly1->dirty = true;
+ }
+ break;
+ default:
+ /* Trivial transforms are handled internally */
+ dev_warn(&mcde_dev->dev,
+ "%s: unsupported col convert\n", __func__);
+ break;
+ }
+}
+
+int mcde_chnl_set_video_mode(struct mcde_chnl_state *chnl,
+ struct mcde_video_mode *vmode)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (chnl == NULL || vmode == NULL)
+ return -EINVAL;
+
+ chnl->vmode = *vmode;
+
+ chnl->ovly0->dirty = true;
+ if (chnl->ovly1)
+ chnl->ovly1->dirty = true;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+EXPORT_SYMBOL(mcde_chnl_set_video_mode);
+
+int mcde_chnl_set_rotation(struct mcde_chnl_state *chnl,
+ enum mcde_display_rotation rotation, u32 rotbuf1, u32 rotbuf2)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ if (chnl->id != MCDE_CHNL_A && chnl->id != MCDE_CHNL_B)
+ return -EINVAL;
+
+ chnl->rotation = rotation;
+ chnl->rotbuf1 = rotbuf1;
+ chnl->rotbuf2 = rotbuf2;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+
+int mcde_chnl_enable_synchronized_update(struct mcde_chnl_state *chnl,
+ bool enable)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ chnl->synchronized_update = enable;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+
+int mcde_chnl_set_power_mode(struct mcde_chnl_state *chnl,
+ enum mcde_display_power_mode power_mode)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ chnl->power_mode = power_mode;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+
+int mcde_chnl_apply(struct mcde_chnl_state *chnl)
+{
+ int ret ;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+ ret = _mcde_chnl_apply(chnl);
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit with ret %d\n", __func__, ret);
+
+ return ret;
+}
+
+int mcde_chnl_update(struct mcde_chnl_state *chnl,
+ struct mcde_rectangle *update_area,
+ bool tripple_buffer)
+{
+ int ret;
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+ enable_mcde_hw();
+ if (!chnl->formatter_updated)
+ (void)update_channel_static_registers(chnl);
+
+ if (chnl->regs.roten && !chnl->esram_is_enabled) {
+ WARN_ON_ONCE(regulator_enable(regulator_esram_epod));
+ chnl->esram_is_enabled = true;
+ }
+
+ ret = _mcde_chnl_update(chnl, update_area, tripple_buffer);
+
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit with ret %d\n", __func__, ret);
+
+ return ret;
+}
+
+void mcde_chnl_put(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (chnl->enabled) {
+ stop_channel(chnl);
+ cancel_delayed_work(&hw_timeout_work);
+ disable_mcde_hw(false);
+ chnl->enabled = false;
+ }
+
+ chnl->reserved = false;
+ if (chnl->port.type == MCDE_PORTTYPE_DPI) {
+ if (chnl->port.phy.dpi.tv_mode) {
+ chnl->vcmp_per_field = false;
+ chnl->even_vcmp = false;
+ }
+ }
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+void mcde_chnl_stop_flow(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+ if (mcde_is_enabled && chnl->enabled)
+ stop_channel(chnl);
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+void mcde_chnl_enable(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+ _mcde_chnl_enable(chnl);
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+void mcde_chnl_disable(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+ cancel_delayed_work(&hw_timeout_work);
+ /* The channel must be stopped before it is disabled */
+ WARN_ON_ONCE(chnl->state == CHNLSTATE_RUNNING);
+ disable_mcde_hw(false);
+ chnl->enabled = false;
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+/* MCDE overlays */
+struct mcde_ovly_state *mcde_ovly_get(struct mcde_chnl_state *chnl)
+{
+ struct mcde_ovly_state *ovly;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return ERR_PTR(-EINVAL);
+
+ if (!chnl->ovly0->inuse)
+ ovly = chnl->ovly0;
+ else if (chnl->ovly1 && !chnl->ovly1->inuse)
+ ovly = chnl->ovly1;
+ else
+ ovly = ERR_PTR(-EBUSY);
+
+ if (!IS_ERR(ovly)) {
+ ovly->inuse = true;
+ ovly->paddr = 0;
+ ovly->stride = 0;
+ ovly->pix_fmt = MCDE_OVLYPIXFMT_RGB565;
+ ovly->src_x = 0;
+ ovly->src_y = 0;
+ ovly->dst_x = 0;
+ ovly->dst_y = 0;
+ ovly->dst_z = 0;
+ ovly->w = 0;
+ ovly->h = 0;
+ ovly->alpha_value = 0xFF;
+ ovly->alpha_source = MCDE_OVL1CONF2_BP_PER_PIXEL_ALPHA;
+ ovly->dirty = true;
+ mcde_ovly_apply(ovly);
+ }
+
+ return ovly;
+}
+
+void mcde_ovly_put(struct mcde_ovly_state *ovly)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!ovly->inuse)
+ return;
+ if (ovly->regs.enabled) {
+ ovly->paddr = 0;
+ ovly->dirty = true;
+ mcde_ovly_apply(ovly);/* REVIEW: API call calling API call! */
+ }
+ ovly->inuse = false;
+}
+
+void mcde_ovly_set_source_buf(struct mcde_ovly_state *ovly, u32 paddr)
+{
+ if (!ovly->inuse)
+ return;
+
+ ovly->dirty = paddr == 0 || ovly->paddr == 0;
+ ovly->dirty_buf = true;
+
+ ovly->paddr = paddr;
+}
+
+void mcde_ovly_set_source_info(struct mcde_ovly_state *ovly,
+ u32 stride, enum mcde_ovly_pix_fmt pix_fmt)
+{
+ if (!ovly->inuse)
+ return;
+
+ ovly->stride = stride;
+ ovly->pix_fmt = pix_fmt;
+ ovly->dirty = true;
+}
+
+void mcde_ovly_set_source_area(struct mcde_ovly_state *ovly,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ if (!ovly->inuse)
+ return;
+
+ ovly->src_x = x;
+ ovly->src_y = y;
+ ovly->w = w;
+ ovly->h = h;
+ ovly->dirty = true;
+}
+
+void mcde_ovly_set_dest_pos(struct mcde_ovly_state *ovly, u16 x, u16 y, u8 z)
+{
+ if (!ovly->inuse)
+ return;
+
+ ovly->dst_x = x;
+ ovly->dst_y = y;
+ ovly->dst_z = z;
+ ovly->dirty = true;
+}
+
+void mcde_ovly_apply(struct mcde_ovly_state *ovly)
+{
+ if (!ovly->inuse)
+ return;
+
+ mcde_lock(__func__, __LINE__);
+
+ if (ovly->dirty || ovly->dirty_buf) {
+ ovly->regs.ch_id = ovly->chnl->id;
+ ovly->regs.enabled = ovly->paddr != 0;
+ ovly->regs.baseaddress0 = ovly->paddr;
+ ovly->regs.baseaddress1 =
+ ovly->regs.baseaddress0 + ovly->stride;
+ ovly->regs.dirty_buf = true;
+ ovly->dirty_buf = false;
+ }
+ if (!ovly->dirty) {
+ mcde_unlock(__func__, __LINE__);
+ return;
+ }
+
+ switch (ovly->pix_fmt) {/* REVIEW: Extract to table */
+ case MCDE_OVLYPIXFMT_RGB565:
+ ovly->regs.bits_per_pixel = 16;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_RGB565;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = true;
+ break;
+ case MCDE_OVLYPIXFMT_RGBA5551:
+ ovly->regs.bits_per_pixel = 16;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_IRGB1555;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = false;
+ break;
+ case MCDE_OVLYPIXFMT_RGBA4444:
+ ovly->regs.bits_per_pixel = 16;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_ARGB4444;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = false;
+ break;
+ case MCDE_OVLYPIXFMT_RGB888:
+ ovly->regs.bits_per_pixel = 24;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_RGB888;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = true;
+ break;
+ case MCDE_OVLYPIXFMT_RGBX8888:
+ ovly->regs.bits_per_pixel = 32;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_XRGB8888;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = true;
+ ovly->regs.opq = true;
+ break;
+ case MCDE_OVLYPIXFMT_RGBA8888:
+ ovly->regs.bits_per_pixel = 32;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_ARGB8888;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = false;
+ break;
+ case MCDE_OVLYPIXFMT_YCbCr422:
+ ovly->regs.bits_per_pixel = 16;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_YCBCR422;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = true;
+ break;
+ default:
+ break;
+ }
+
+ ovly->regs.ppl = ovly->w;
+ ovly->regs.lpf = ovly->h;
+ ovly->regs.cropx = ovly->src_x;
+ ovly->regs.cropy = ovly->src_y;
+ ovly->regs.xpos = ovly->dst_x;
+ ovly->regs.ypos = ovly->dst_y;
+ ovly->regs.z = ovly->dst_z > 0; /* 0 or 1 */
+ ovly->regs.col_conv = MCDE_OVL0CR_COLCCTRL_DISABLED;
+ ovly->regs.alpha_source = ovly->alpha_source;
+ ovly->regs.alpha_value = ovly->alpha_value;
+
+ ovly->regs.dirty = true;
+ ovly->dirty = false;
+
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "Overlay applied, idx=%d chnl=%d\n",
+ ovly->idx, ovly->chnl->id);
+}
+
+static int init_clocks_and_power(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct mcde_platform_data *pdata = pdev->dev.platform_data;
+
+ if (pdata->regulator_mcde_epod_id) {
+ regulator_mcde_epod = regulator_get(&pdev->dev,
+ pdata->regulator_mcde_epod_id);
+ if (IS_ERR(regulator_mcde_epod)) {
+ ret = PTR_ERR(regulator_mcde_epod);
+ dev_warn(&pdev->dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_mcde_epod_id);
+ regulator_mcde_epod = NULL;
+ return ret;
+ }
+ } else {
+ dev_warn(&pdev->dev, "%s: No mcde regulator id supplied\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (pdata->regulator_esram_epod_id) {
+ regulator_esram_epod = regulator_get(&pdev->dev,
+ pdata->regulator_esram_epod_id);
+ if (IS_ERR(regulator_esram_epod)) {
+ ret = PTR_ERR(regulator_esram_epod);
+ dev_warn(&pdev->dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_esram_epod_id);
+ regulator_esram_epod = NULL;
+ goto regulator_esram_err;
+ }
+ } else {
+ dev_warn(&pdev->dev, "%s: No esram regulator id supplied\n",
+ __func__);
+ }
+
+ if (pdata->regulator_vana_id) {
+ regulator_vana = regulator_get(&pdev->dev,
+ pdata->regulator_vana_id);
+ if (IS_ERR(regulator_vana)) {
+ ret = PTR_ERR(regulator_vana);
+ dev_warn(&pdev->dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_vana_id);
+ regulator_vana = NULL;
+ goto regulator_vana_err;
+ }
+ } else {
+ dev_dbg(&pdev->dev, "%s: No vana regulator id supplied\n",
+ __func__);
+ }
+
+ /*
+ * DSI, DSI LP and DPI clocks are not necessary to get
+ * There can be platforms that not have DSI, DPI support
+ */
+ clock_dsi = clk_get(&pdev->dev, pdata->clock_dsi_id);
+ if (IS_ERR(clock_dsi))
+ dev_dbg(&pdev->dev, "%s: Failed to get clock '%s'\n",
+ __func__, pdata->clock_dsi_id);
+
+ clock_dsi_lp = clk_get(&pdev->dev, pdata->clock_dsi_lp_id);
+ if (IS_ERR(clock_dsi_lp))
+ dev_dbg(&pdev->dev, "%s: Failed to get clock '%s'\n",
+ __func__, pdata->clock_dsi_lp_id);
+
+ clock_dpi = clk_get(&pdev->dev, pdata->clock_dpi_id);
+ if (IS_ERR(clock_dpi))
+ dev_dbg(&pdev->dev, "%s: Failed to get clock '%s'\n",
+ __func__, pdata->clock_dpi_id);
+
+ clock_mcde = clk_get(&pdev->dev, pdata->clock_mcde_id);
+ if (IS_ERR(clock_mcde)) {
+ ret = PTR_ERR(clock_mcde);
+ dev_warn(&pdev->dev, "%s: Failed to get clock '%s'\n",
+ __func__, pdata->clock_mcde_id);
+ goto clk_mcde_err;
+ }
+
+ return ret;
+
+clk_mcde_err:
+ clk_put(clock_dpi);
+ clk_put(clock_dsi_lp);
+ clk_put(clock_dsi);
+
+ if (regulator_vana)
+ regulator_put(regulator_vana);
+regulator_vana_err:
+ if (regulator_esram_epod)
+ regulator_put(regulator_esram_epod);
+regulator_esram_err:
+ regulator_put(regulator_mcde_epod);
+ return ret;
+}
+
+static void remove_clocks_and_power(struct platform_device *pdev)
+{
+ /* REVIEW: Release only if exist */
+ /* REVIEW: Remove make sure MCDE is done */
+ clk_put(clock_dpi);
+ clk_put(clock_dsi_lp);
+ clk_put(clock_dsi);
+ clk_put(clock_mcde);
+ if (regulator_vana)
+ regulator_put(regulator_vana);
+ regulator_put(regulator_mcde_epod);
+ regulator_put(regulator_esram_epod);
+}
+static void probe_hw(void)
+{
+ int i;
+ u8 major_version;
+ u8 minor_version;
+ u8 development_version;
+
+ dev_info(&mcde_dev->dev, "Probe HW\n");
+
+ /* Get MCDE HW version */
+ regulator_enable(regulator_mcde_epod);
+ clk_enable(clock_mcde);
+ major_version = (u8)mcde_rfld(MCDE_PID, MAJOR_VERSION);
+ minor_version = (u8)mcde_rfld(MCDE_PID, MINOR_VERSION);
+ development_version = (u8)mcde_rfld(MCDE_PID, DEVELOPMENT_VERSION);
+
+ dev_info(&mcde_dev->dev, "MCDE HW revision %u.%u.%u.%u\n",
+ major_version, minor_version, development_version,
+ mcde_rfld(MCDE_PID, METALFIX_VERSION));
+
+ clk_disable(clock_mcde);
+ regulator_disable(regulator_mcde_epod);
+
+ if (major_version == 3 && minor_version == 0 &&
+ development_version >= 8) {
+ hardware_version = MCDE_CHIP_VERSION_3_0_8;
+ dev_info(&mcde_dev->dev, "V2 HW\n");
+ } else if (major_version == 3 && minor_version == 0 &&
+ development_version >= 5) {
+ hardware_version = MCDE_CHIP_VERSION_3_0_5;
+ dev_info(&mcde_dev->dev, "V1 HW\n");
+ } else if (major_version == 1 && minor_version == 0 &&
+ development_version >= 4) {
+ hardware_version = MCDE_CHIP_VERSION_1_0_4;
+ mcde_dynamic_power_management = false;
+ num_channels = 2;
+ num_overlays = 3;
+ dev_info(&mcde_dev->dev, "V1_U5500 HW\n");
+ } else if (major_version == 4 && minor_version == 0 &&
+ development_version >= 4) {
+ hardware_version = MCDE_CHIP_VERSION_4_0_4;
+ dev_info(&mcde_dev->dev, "V2_U5500 HW\n");
+ } else {
+ dev_err(&mcde_dev->dev, "Unsupported HW version\n");
+ }
+
+ /* Init MCDE */
+ for (i = 0; i < num_overlays; i++)
+ overlays[i].idx = i;
+
+ if (hardware_version == MCDE_CHIP_VERSION_1_0_4 ||
+ hardware_version == MCDE_CHIP_VERSION_4_0_4) {
+ channels[0].ovly0 = &overlays[0];
+ channels[0].ovly1 = &overlays[1];
+ channels[1].ovly0 = &overlays[2];
+ channels[1].ovly1 = NULL;
+ } else {
+ channels[0].ovly0 = &overlays[0];
+ channels[0].ovly1 = &overlays[1];
+ channels[1].ovly0 = &overlays[2];
+ channels[1].ovly1 = &overlays[3];
+ channels[2].ovly0 = &overlays[4];
+ channels[2].ovly1 = NULL;
+ channels[3].ovly0 = &overlays[5];
+ channels[3].ovly1 = NULL;
+ }
+
+ mcde_debugfs_create(&mcde_dev->dev);
+ for (i = 0; i < num_channels; i++) {
+ channels[i].id = i;
+
+ channels[i].ovly0->chnl = &channels[i];
+ if (channels[i].ovly1)
+ channels[i].ovly1->chnl = &channels[i];
+
+ init_waitqueue_head(&channels[i].state_waitq);
+ init_waitqueue_head(&channels[i].vcmp_waitq);
+ init_timer(&channels[i].dsi_te_timer);
+ channels[i].dsi_te_timer.function =
+ dsi_te_timer_function;
+ channels[i].dsi_te_timer.data = i;
+
+ mcde_debugfs_channel_create(i, &channels[i]);
+ mcde_debugfs_overlay_create(i, 0);
+ if (channels[i].ovly1)
+ mcde_debugfs_overlay_create(i, 1);
+ }
+}
+
+static int __devinit mcde_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i;
+ struct resource *res;
+ struct mcde_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ num_dsilinks = pdata->num_dsilinks;
+ mcde_dev = pdev;
+
+ num_channels = pdata->num_channels;
+ num_overlays = pdata->num_overlays;
+
+ channels = kzalloc(num_channels * sizeof(struct mcde_chnl_state),
+ GFP_KERNEL);
+ if (!channels) {
+ ret = -ENOMEM;
+ goto failed_channels_alloc;
+ }
+
+ overlays = kzalloc(num_overlays * sizeof(struct mcde_ovly_state),
+ GFP_KERNEL);
+ if (!overlays) {
+ ret = -ENOMEM;
+ goto failed_overlays_alloc;
+ }
+
+ dsiio = kzalloc(num_dsilinks * sizeof(*dsiio), GFP_KERNEL);
+ if (!dsiio) {
+ ret = -ENOMEM;
+ goto failed_dsi_alloc;
+ }
+
+ /* Hook up irq */
+ mcde_irq = platform_get_irq(pdev, 0);
+ if (mcde_irq <= 0) {
+ dev_dbg(&pdev->dev, "No irq defined\n");
+ ret = -EINVAL;
+ goto failed_irq_get;
+ }
+
+ /* Map I/O */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(&pdev->dev, "No MCDE io defined\n");
+ ret = -EINVAL;
+ goto failed_get_mcde_io;
+ }
+ mcdeio = ioremap(res->start, res->end - res->start + 1);
+ if (!mcdeio) {
+ dev_dbg(&pdev->dev, "MCDE iomap failed\n");
+ ret = -EINVAL;
+ goto failed_map_mcde_io;
+ }
+ dev_info(&pdev->dev, "MCDE iomap: 0x%.8X->0x%.8X\n",
+ (u32)res->start, (u32)mcdeio);
+ for (i = 0; i < num_dsilinks; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1+i);
+ if (!res) {
+ dev_dbg(&pdev->dev, "No DSI%d io defined\n", i);
+ ret = -EINVAL;
+ goto failed_get_dsi_io;
+ }
+ dsiio[i] = ioremap(res->start, res->end - res->start + 1);
+ if (!dsiio[i]) {
+ dev_dbg(&pdev->dev, "MCDE DSI%d iomap failed\n", i);
+ ret = -EINVAL;
+ goto failed_map_dsi_io;
+ }
+ dev_info(&pdev->dev, "MCDE DSI%d iomap: 0x%.8X->0x%.8X\n",
+ i, (u32)res->start, (u32)dsiio[i]);
+ }
+
+ ret = init_clocks_and_power(pdev);
+ if (ret < 0) {
+ dev_warn(&pdev->dev, "%s: init_clocks_and_power failed\n"
+ , __func__);
+ goto failed_init_clocks;
+ }
+
+ INIT_DELAYED_WORK_DEFERRABLE(&hw_timeout_work, work_sleep_function);
+
+ probe_hw();
+
+ ret = enable_mcde_hw();
+ if (ret)
+ goto failed_mcde_enable;
+
+ return 0;
+failed_mcde_enable:
+failed_init_clocks:
+failed_map_dsi_io:
+failed_get_dsi_io:
+ for (i = 0; i < num_dsilinks; i++) {
+ if (dsiio[i])
+ iounmap(dsiio[i]);
+ }
+ iounmap(mcdeio);
+failed_map_mcde_io:
+failed_get_mcde_io:
+failed_irq_get:
+ kfree(dsiio);
+ dsiio = NULL;
+failed_dsi_alloc:
+ kfree(overlays);
+ overlays = NULL;
+failed_overlays_alloc:
+ kfree(channels);
+ channels = NULL;
+failed_channels_alloc:
+ num_channels = 0;
+ num_overlays = 0;
+ return ret;
+}
+
+static int __devexit mcde_remove(struct platform_device *pdev)
+{
+ struct mcde_chnl_state *chnl = &channels[0];
+
+ for (; chnl < &channels[num_channels]; chnl++) {
+ if (del_timer(&chnl->dsi_te_timer))
+ dev_vdbg(&mcde_dev->dev,
+ "%s dsi timer could not be stopped\n"
+ , __func__);
+ }
+
+ remove_clocks_and_power(pdev);
+ return 0;
+}
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+static int mcde_resume(struct platform_device *pdev)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+
+ if (enable_mcde_hw()) {
+ mcde_unlock(__func__, __LINE__);
+ return -EINVAL;
+ }
+
+ mcde_unlock(__func__, __LINE__);
+
+ return 0;
+}
+
+static int mcde_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+
+ cancel_delayed_work(&hw_timeout_work);
+
+ if (!mcde_is_enabled) {
+ mcde_unlock(__func__, __LINE__);
+ return 0;
+ }
+ disable_mcde_hw(true);
+
+ mcde_unlock(__func__, __LINE__);
+
+ return ret;
+}
+#endif
+
+static struct platform_driver mcde_driver = {
+ .probe = mcde_probe,
+ .remove = mcde_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ .suspend = mcde_suspend,
+ .resume = mcde_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .name = "mcde",
+ },
+};
+
+int __init mcde_init(void)
+{
+ mutex_init(&mcde_hw_lock);
+ return platform_driver_register(&mcde_driver);
+}
+
+void mcde_exit(void)
+{
+ /* REVIEW: shutdown MCDE? */
+ platform_driver_unregister(&mcde_driver);
+}
diff --git a/drivers/video/mcde/mcde_mod.c b/drivers/video/mcde/mcde_mod.c
new file mode 100644
index 00000000000..60df0d4965f
--- /dev/null
+++ b/drivers/video/mcde/mcde_mod.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <video/mcde.h>
+#include <video/mcde_fb.h>
+#include <video/mcde_dss.h>
+#include <video/mcde_display.h>
+
+/* Module init */
+
+static int __init mcde_subsystem_init(void)
+{
+ int ret;
+ pr_info("MCDE subsystem init begin\n");
+
+ /* MCDE module init sequence */
+ ret = mcde_init();
+ if (ret)
+ goto mcde_failed;
+ ret = mcde_display_init();
+ if (ret)
+ goto mcde_display_failed;
+ ret = mcde_dss_init();
+ if (ret)
+ goto mcde_dss_failed;
+ ret = mcde_fb_init();
+ if (ret)
+ goto mcde_fb_failed;
+ pr_info("MCDE subsystem init done\n");
+
+ goto done;
+mcde_fb_failed:
+ mcde_dss_exit();
+mcde_dss_failed:
+ mcde_display_exit();
+mcde_display_failed:
+ mcde_exit();
+mcde_failed:
+done:
+ return ret;
+}
+#ifdef MODULE
+module_init(mcde_subsystem_init);
+#else
+fs_initcall(mcde_subsystem_init);
+#endif
+
+static void __exit mcde_module_exit(void)
+{
+ mcde_exit();
+ mcde_display_exit();
+ mcde_dss_exit();
+}
+module_exit(mcde_module_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE driver");
+
diff --git a/drivers/video/mcde/mcde_regs.h b/drivers/video/mcde/mcde_regs.h
new file mode 100644
index 00000000000..7f28c26360b
--- /dev/null
+++ b/drivers/video/mcde/mcde_regs.h
@@ -0,0 +1,5315 @@
+
+#define MCDE_VAL2REG(__reg, __fld, __val) \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK)
+#define MCDE_REG2VAL(__reg, __fld, __val) \
+ (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT)
+
+#define MCDE_CR 0x00000000
+#define MCDE_CR_DSICMD2_EN_V1_SHIFT 0
+#define MCDE_CR_DSICMD2_EN_V1_MASK 0x00000001
+#define MCDE_CR_DSICMD2_EN_V1(__x) \
+ MCDE_VAL2REG(MCDE_CR, DSICMD2_EN_V1, __x)
+#define MCDE_CR_DSICMD1_EN_V1_SHIFT 1
+#define MCDE_CR_DSICMD1_EN_V1_MASK 0x00000002
+#define MCDE_CR_DSICMD1_EN_V1(__x) \
+ MCDE_VAL2REG(MCDE_CR, DSICMD1_EN_V1, __x)
+#define MCDE_CR_DSI0_EN_V3_SHIFT 0
+#define MCDE_CR_DSI0_EN_V3_MASK 0x00000001
+#define MCDE_CR_DSI0_EN_V3(__x) \
+ MCDE_VAL2REG(MCDE_CR, DSI0_EN_V3, __x)
+#define MCDE_CR_DSI1_EN_V3_SHIFT 1
+#define MCDE_CR_DSI1_EN_V3_MASK 0x00000002
+#define MCDE_CR_DSI1_EN_V3(__x) \
+ MCDE_VAL2REG(MCDE_CR, DSI1_EN_V3, __x)
+#define MCDE_CR_DSICMD0_EN_V1_SHIFT 2
+#define MCDE_CR_DSICMD0_EN_V1_MASK 0x00000004
+#define MCDE_CR_DSICMD0_EN_V1(__x) \
+ MCDE_VAL2REG(MCDE_CR, DSICMD0_EN_V1, __x)
+#define MCDE_CR_DSIVID2_EN_V1_SHIFT 3
+#define MCDE_CR_DSIVID2_EN_V1_MASK 0x00000008
+#define MCDE_CR_DSIVID2_EN_V1(__x) \
+ MCDE_VAL2REG(MCDE_CR, DSIVID2_EN_V1, __x)
+#define MCDE_CR_DSIVID1_EN_V1_SHIFT 4
+#define MCDE_CR_DSIVID1_EN_V1_MASK 0x00000010
+#define MCDE_CR_DSIVID1_EN_V1(__x) \
+ MCDE_VAL2REG(MCDE_CR, DSIVID1_EN_V1, __x)
+#define MCDE_CR_DSIVID0_EN_V1_SHIFT 5
+#define MCDE_CR_DSIVID0_EN_V1_MASK 0x00000020
+#define MCDE_CR_DSIVID0_EN_V1(__x) \
+ MCDE_VAL2REG(MCDE_CR, DSIVID0_EN_V1, __x)
+#define MCDE_CR_DBIC1_EN_V1_SHIFT 6
+#define MCDE_CR_DBIC1_EN_V1_MASK 0x00000040
+#define MCDE_CR_DBIC1_EN_V1(__x) \
+ MCDE_VAL2REG(MCDE_CR, DBIC1_EN_V1, __x)
+#define MCDE_CR_DBIC0_EN_V1_SHIFT 7
+#define MCDE_CR_DBIC0_EN_V1_MASK 0x00000080
+#define MCDE_CR_DBIC0_EN_V1(__x) \
+ MCDE_VAL2REG(MCDE_CR, DBIC0_EN_V1, __x)
+#define MCDE_CR_DBI_EN_V3_SHIFT 7
+#define MCDE_CR_DBI_EN_V3_MASK 0x00000080
+#define MCDE_CR_DBI_EN_V3(__x) \
+ MCDE_VAL2REG(MCDE_CR, DBI_EN_V3, __x)
+#define MCDE_CR_DPIB_EN_V1_SHIFT 8
+#define MCDE_CR_DPIB_EN_V1_MASK 0x00000100
+#define MCDE_CR_DPIB_EN_V1(__x) \
+ MCDE_VAL2REG(MCDE_CR, DPIB_EN_V1, __x)
+#define MCDE_CR_DPIA_EN_V1_SHIFT 9
+#define MCDE_CR_DPIA_EN_V1_MASK 0x00000200
+#define MCDE_CR_DPIA_EN_V1(__x) \
+ MCDE_VAL2REG(MCDE_CR, DPIA_EN_V1, __x)
+#define MCDE_CR_DPI_EN_V3_SHIFT 9
+#define MCDE_CR_DPI_EN_V3_MASK 0x00000200
+#define MCDE_CR_DPI_EN_V3(__x) \
+ MCDE_VAL2REG(MCDE_CR, DPI_EN_V3, __x)
+#define MCDE_CR_IFIFOCTRLEN_SHIFT 15
+#define MCDE_CR_IFIFOCTRLEN_MASK 0x00008000
+#define MCDE_CR_IFIFOCTRLEN(__x) \
+ MCDE_VAL2REG(MCDE_CR, IFIFOCTRLEN, __x)
+#define MCDE_CR_F01MUX_V1_SHIFT 16
+#define MCDE_CR_F01MUX_V1_MASK 0x00010000
+#define MCDE_CR_F01MUX_V1(__x) \
+ MCDE_VAL2REG(MCDE_CR, F01MUX_V1, __x)
+#define MCDE_CR_FABMUX_V1_SHIFT 17
+#define MCDE_CR_FABMUX_V1_MASK 0x00020000
+#define MCDE_CR_FABMUX_V1(__x) \
+ MCDE_VAL2REG(MCDE_CR, FABMUX_V1, __x)
+#define MCDE_CR_AUTOCLKG_EN_SHIFT 30
+#define MCDE_CR_AUTOCLKG_EN_MASK 0x40000000
+#define MCDE_CR_AUTOCLKG_EN(__x) \
+ MCDE_VAL2REG(MCDE_CR, AUTOCLKG_EN, __x)
+#define MCDE_CR_MCDEEN_SHIFT 31
+#define MCDE_CR_MCDEEN_MASK 0x80000000
+#define MCDE_CR_MCDEEN(__x) \
+ MCDE_VAL2REG(MCDE_CR, MCDEEN, __x)
+#define MCDE_CONF0 0x00000004
+#define MCDE_CONF0_SYNCMUX0_SHIFT 0
+#define MCDE_CONF0_SYNCMUX0_MASK 0x00000001
+#define MCDE_CONF0_SYNCMUX0(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX0, __x)
+#define MCDE_CONF0_SYNCMUX1_SHIFT 1
+#define MCDE_CONF0_SYNCMUX1_MASK 0x00000002
+#define MCDE_CONF0_SYNCMUX1(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX1, __x)
+#define MCDE_CONF0_SYNCMUX2_SHIFT 2
+#define MCDE_CONF0_SYNCMUX2_MASK 0x00000004
+#define MCDE_CONF0_SYNCMUX2(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX2, __x)
+#define MCDE_CONF0_SYNCMUX3_SHIFT 3
+#define MCDE_CONF0_SYNCMUX3_MASK 0x00000008
+#define MCDE_CONF0_SYNCMUX3(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX3, __x)
+#define MCDE_CONF0_SYNCMUX4_SHIFT 4
+#define MCDE_CONF0_SYNCMUX4_MASK 0x00000010
+#define MCDE_CONF0_SYNCMUX4(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX4, __x)
+#define MCDE_CONF0_SYNCMUX5_SHIFT 5
+#define MCDE_CONF0_SYNCMUX5_MASK 0x00000020
+#define MCDE_CONF0_SYNCMUX5(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX5, __x)
+#define MCDE_CONF0_SYNCMUX6_SHIFT 6
+#define MCDE_CONF0_SYNCMUX6_MASK 0x00000040
+#define MCDE_CONF0_SYNCMUX6(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX6, __x)
+#define MCDE_CONF0_SYNCMUX7_SHIFT 7
+#define MCDE_CONF0_SYNCMUX7_MASK 0x00000080
+#define MCDE_CONF0_SYNCMUX7(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX7, __x)
+#define MCDE_CONF0_SWAP_A_C0_V1_SHIFT 8
+#define MCDE_CONF0_SWAP_A_C0_V1_MASK 0x00000100
+#define MCDE_CONF0_SWAP_A_C0_V1(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SWAP_A_C0_V1, __x)
+#define MCDE_CONF0_SWAP_B_C1_V1_SHIFT 9
+#define MCDE_CONF0_SWAP_B_C1_V1_MASK 0x00000200
+#define MCDE_CONF0_SWAP_B_C1_V1(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SWAP_B_C1_V1, __x)
+#define MCDE_CONF0_FSYNCTRLA_V1_SHIFT 10
+#define MCDE_CONF0_FSYNCTRLA_V1_MASK 0x00000400
+#define MCDE_CONF0_FSYNCTRLA_V1(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, FSYNCTRLA_V1, __x)
+#define MCDE_CONF0_FSYNCTRLB_V1_SHIFT 11
+#define MCDE_CONF0_FSYNCTRLB_V1_MASK 0x00000800
+#define MCDE_CONF0_FSYNCTRLB_V1(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, FSYNCTRLB_V1, __x)
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT 12
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_MASK 0x00007000
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, IFIFOCTRLWTRMRKLVL, __x)
+#define MCDE_CONF0_OUTMUX0_SHIFT 16
+#define MCDE_CONF0_OUTMUX0_MASK 0x00070000
+#define MCDE_CONF0_OUTMUX0(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX0, __x)
+#define MCDE_CONF0_OUTMUX1_SHIFT 19
+#define MCDE_CONF0_OUTMUX1_MASK 0x00380000
+#define MCDE_CONF0_OUTMUX1(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX1, __x)
+#define MCDE_CONF0_OUTMUX2_SHIFT 22
+#define MCDE_CONF0_OUTMUX2_MASK 0x01C00000
+#define MCDE_CONF0_OUTMUX2(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX2, __x)
+#define MCDE_CONF0_OUTMUX3_SHIFT 25
+#define MCDE_CONF0_OUTMUX3_MASK 0x0E000000
+#define MCDE_CONF0_OUTMUX3(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX3, __x)
+#define MCDE_CONF0_OUTMUX4_SHIFT 28
+#define MCDE_CONF0_OUTMUX4_MASK 0x70000000
+#define MCDE_CONF0_OUTMUX4(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX4, __x)
+#define MCDE_SSP 0x00000008
+#define MCDE_SSP_SSPDATA_SHIFT 0
+#define MCDE_SSP_SSPDATA_MASK 0x000000FF
+#define MCDE_SSP_SSPDATA(__x) \
+ MCDE_VAL2REG(MCDE_SSP, SSPDATA, __x)
+#define MCDE_SSP_SSPCMD_SHIFT 8
+#define MCDE_SSP_SSPCMD_MASK 0x00000100
+#define MCDE_SSP_SSPCMD_DATA 0
+#define MCDE_SSP_SSPCMD_COMMAND 1
+#define MCDE_SSP_SSPCMD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SSP, SSPCMD, MCDE_SSP_SSPCMD_##__x)
+#define MCDE_SSP_SSPCMD(__x) \
+ MCDE_VAL2REG(MCDE_SSP, SSPCMD, __x)
+#define MCDE_SSP_SSPEN_SHIFT 16
+#define MCDE_SSP_SSPEN_MASK 0x00010000
+#define MCDE_SSP_SSPEN(__x) \
+ MCDE_VAL2REG(MCDE_SSP, SSPEN, __x)
+#define MCDE_AIS 0x00000100
+#define MCDE_AIS_MCDEPPI_SHIFT 0
+#define MCDE_AIS_MCDEPPI_MASK 0x00000001
+#define MCDE_AIS_MCDEPPI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, MCDEPPI, __x)
+#define MCDE_AIS_MCDEOVLI_SHIFT 1
+#define MCDE_AIS_MCDEOVLI_MASK 0x00000002
+#define MCDE_AIS_MCDEOVLI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, MCDEOVLI, __x)
+#define MCDE_AIS_MCDECHNLI_SHIFT 2
+#define MCDE_AIS_MCDECHNLI_MASK 0x00000004
+#define MCDE_AIS_MCDECHNLI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, MCDECHNLI, __x)
+#define MCDE_AIS_MCDEERRI_SHIFT 3
+#define MCDE_AIS_MCDEERRI_MASK 0x00000008
+#define MCDE_AIS_MCDEERRI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, MCDEERRI, __x)
+#define MCDE_AIS_DSI0AI_SHIFT 4
+#define MCDE_AIS_DSI0AI_MASK 0x00000010
+#define MCDE_AIS_DSI0AI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, DSI0AI, __x)
+#define MCDE_AIS_DSI1AI_SHIFT 5
+#define MCDE_AIS_DSI1AI_MASK 0x00000020
+#define MCDE_AIS_DSI1AI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, DSI1AI, __x)
+#define MCDE_AIS_DSI2AI_SHIFT 6
+#define MCDE_AIS_DSI2AI_MASK 0x00000040
+#define MCDE_AIS_DSI2AI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, DSI2AI, __x)
+#define MCDE_IMSCPP 0x00000104
+#define MCDE_IMSCPP_VCMPAIM_SHIFT 0
+#define MCDE_IMSCPP_VCMPAIM_MASK 0x00000001
+#define MCDE_IMSCPP_VCMPAIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VCMPAIM, __x)
+#define MCDE_IMSCPP_VCMPBIM_SHIFT 1
+#define MCDE_IMSCPP_VCMPBIM_MASK 0x00000002
+#define MCDE_IMSCPP_VCMPBIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VCMPBIM, __x)
+#define MCDE_IMSCPP_VSCC0IM_SHIFT 2
+#define MCDE_IMSCPP_VSCC0IM_MASK 0x00000004
+#define MCDE_IMSCPP_VSCC0IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VSCC0IM, __x)
+#define MCDE_IMSCPP_VSCC1IM_SHIFT 3
+#define MCDE_IMSCPP_VSCC1IM_MASK 0x00000008
+#define MCDE_IMSCPP_VSCC1IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VSCC1IM, __x)
+#define MCDE_IMSCPP_VCMPC0IM_SHIFT 4
+#define MCDE_IMSCPP_VCMPC0IM_MASK 0x00000010
+#define MCDE_IMSCPP_VCMPC0IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VCMPC0IM, __x)
+#define MCDE_IMSCPP_VCMPC1IM_SHIFT 5
+#define MCDE_IMSCPP_VCMPC1IM_MASK 0x00000020
+#define MCDE_IMSCPP_VCMPC1IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VCMPC1IM, __x)
+#define MCDE_IMSCPP_ROTFDIM_B_SHIFT 6
+#define MCDE_IMSCPP_ROTFDIM_B_MASK 0x00000040
+#define MCDE_IMSCPP_ROTFDIM_B(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, ROTFDIM_B, __x)
+#define MCDE_IMSCPP_ROTFDIM_A_SHIFT 7
+#define MCDE_IMSCPP_ROTFDIM_A_MASK 0x00000080
+#define MCDE_IMSCPP_ROTFDIM_A(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, ROTFDIM_A, __x)
+#define MCDE_IMSCOVL 0x00000108
+#define MCDE_IMSCOVL_OVLRDIM_SHIFT 0
+#define MCDE_IMSCOVL_OVLRDIM_MASK 0x0000FFFF
+#define MCDE_IMSCOVL_OVLRDIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCOVL, OVLRDIM, __x)
+#define MCDE_IMSCOVL_OVLFDIM_SHIFT 16
+#define MCDE_IMSCOVL_OVLFDIM_MASK 0xFFFF0000
+#define MCDE_IMSCOVL_OVLFDIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCOVL, OVLFDIM, __x)
+#define MCDE_IMSCCHNL 0x0000010C
+#define MCDE_IMSCCHNL_CHNLRDIM_SHIFT 0
+#define MCDE_IMSCCHNL_CHNLRDIM_MASK 0x0000FFFF
+#define MCDE_IMSCCHNL_CHNLRDIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCCHNL, CHNLRDIM, __x)
+#define MCDE_IMSCCHNL_CHNLAIM_SHIFT 16
+#define MCDE_IMSCCHNL_CHNLAIM_MASK 0xFFFF0000
+#define MCDE_IMSCCHNL_CHNLAIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCCHNL, CHNLAIM, __x)
+#define MCDE_IMSCERR 0x00000110
+#define MCDE_IMSCERR_FUAIM_SHIFT 0
+#define MCDE_IMSCERR_FUAIM_MASK 0x00000001
+#define MCDE_IMSCERR_FUAIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, FUAIM, __x)
+#define MCDE_IMSCERR_FUBIM_SHIFT 1
+#define MCDE_IMSCERR_FUBIM_MASK 0x00000002
+#define MCDE_IMSCERR_FUBIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, FUBIM, __x)
+#define MCDE_IMSCERR_SCHBLCKDIM_SHIFT 2
+#define MCDE_IMSCERR_SCHBLCKDIM_MASK 0x00000004
+#define MCDE_IMSCERR_SCHBLCKDIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, SCHBLCKDIM, __x)
+#define MCDE_IMSCERR_ROTAFEIM_WRITE_SHIFT 3
+#define MCDE_IMSCERR_ROTAFEIM_WRITE_MASK 0x00000008
+#define MCDE_IMSCERR_ROTAFEIM_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, ROTAFEIM_WRITE, __x)
+#define MCDE_IMSCERR_ROTAFEIM_READ_SHIFT 4
+#define MCDE_IMSCERR_ROTAFEIM_READ_MASK 0x00000010
+#define MCDE_IMSCERR_ROTAFEIM_READ(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, ROTAFEIM_READ, __x)
+#define MCDE_IMSCERR_ROTBFEIM_WRITE_SHIFT 5
+#define MCDE_IMSCERR_ROTBFEIM_WRITE_MASK 0x00000020
+#define MCDE_IMSCERR_ROTBFEIM_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, ROTBFEIM_WRITE, __x)
+#define MCDE_IMSCERR_ROTBFEIM_READ_SHIFT 6
+#define MCDE_IMSCERR_ROTBFEIM_READ_MASK 0x00000040
+#define MCDE_IMSCERR_ROTBFEIM_READ(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, ROTBFEIM_READ, __x)
+#define MCDE_IMSCERR_FUC0IM_SHIFT 7
+#define MCDE_IMSCERR_FUC0IM_MASK 0x00000080
+#define MCDE_IMSCERR_FUC0IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, FUC0IM, __x)
+#define MCDE_IMSCERR_FUC1IM_SHIFT 8
+#define MCDE_IMSCERR_FUC1IM_MASK 0x00000100
+#define MCDE_IMSCERR_FUC1IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, FUC1IM, __x)
+#define MCDE_IMSCERR_OVLFERRIM_SHIFT 16
+#define MCDE_IMSCERR_OVLFERRIM_MASK 0xFFFF0000
+#define MCDE_IMSCERR_OVLFERRIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, OVLFERRIM, __x)
+#define MCDE_RISPP 0x00000114
+#define MCDE_RISPP_VCMPARIS_SHIFT 0
+#define MCDE_RISPP_VCMPARIS_MASK 0x00000001
+#define MCDE_RISPP_VCMPARIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VCMPARIS, __x)
+#define MCDE_RISPP_VCMPBRIS_SHIFT 1
+#define MCDE_RISPP_VCMPBRIS_MASK 0x00000002
+#define MCDE_RISPP_VCMPBRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VCMPBRIS, __x)
+#define MCDE_RISPP_VSCC0RIS_SHIFT 2
+#define MCDE_RISPP_VSCC0RIS_MASK 0x00000004
+#define MCDE_RISPP_VSCC0RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VSCC0RIS, __x)
+#define MCDE_RISPP_VSCC1RIS_SHIFT 3
+#define MCDE_RISPP_VSCC1RIS_MASK 0x00000008
+#define MCDE_RISPP_VSCC1RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VSCC1RIS, __x)
+#define MCDE_RISPP_VCMPC0RIS_SHIFT 4
+#define MCDE_RISPP_VCMPC0RIS_MASK 0x00000010
+#define MCDE_RISPP_VCMPC0RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VCMPC0RIS, __x)
+#define MCDE_RISPP_VCMPC1RIS_SHIFT 5
+#define MCDE_RISPP_VCMPC1RIS_MASK 0x00000020
+#define MCDE_RISPP_VCMPC1RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VCMPC1RIS, __x)
+#define MCDE_RISPP_ROTFDRIS_B_SHIFT 6
+#define MCDE_RISPP_ROTFDRIS_B_MASK 0x00000040
+#define MCDE_RISPP_ROTFDRIS_B(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, ROTFDRIS_B, __x)
+#define MCDE_RISPP_ROTFDRIS_A_SHIFT 7
+#define MCDE_RISPP_ROTFDRIS_A_MASK 0x00000080
+#define MCDE_RISPP_ROTFDRIS_A(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, ROTFDRIS_A, __x)
+#define MCDE_RISOVL 0x00000118
+#define MCDE_RISOVL_OVLRDRIS_SHIFT 0
+#define MCDE_RISOVL_OVLRDRIS_MASK 0x0000FFFF
+#define MCDE_RISOVL_OVLRDRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISOVL, OVLRDRIS, __x)
+#define MCDE_RISOVL_OVLFDRIS_SHIFT 16
+#define MCDE_RISOVL_OVLFDRIS_MASK 0xFFFF0000
+#define MCDE_RISOVL_OVLFDRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISOVL, OVLFDRIS, __x)
+#define MCDE_RISCHNL 0x0000011C
+#define MCDE_RISCHNL_CHNLRDRIS_SHIFT 0
+#define MCDE_RISCHNL_CHNLRDRIS_MASK 0x0000FFFF
+#define MCDE_RISCHNL_CHNLRDRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISCHNL, CHNLRDRIS, __x)
+#define MCDE_RISCHNL_CHNLARIS_SHIFT 16
+#define MCDE_RISCHNL_CHNLARIS_MASK 0xFFFF0000
+#define MCDE_RISCHNL_CHNLARIS(__x) \
+ MCDE_VAL2REG(MCDE_RISCHNL, CHNLARIS, __x)
+#define MCDE_RISERR 0x00000120
+#define MCDE_RISERR_FUARIS_SHIFT 0
+#define MCDE_RISERR_FUARIS_MASK 0x00000001
+#define MCDE_RISERR_FUARIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, FUARIS, __x)
+#define MCDE_RISERR_FUBRIS_SHIFT 1
+#define MCDE_RISERR_FUBRIS_MASK 0x00000002
+#define MCDE_RISERR_FUBRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, FUBRIS, __x)
+#define MCDE_RISERR_SCHBLCKDRIS_SHIFT 2
+#define MCDE_RISERR_SCHBLCKDRIS_MASK 0x00000004
+#define MCDE_RISERR_SCHBLCKDRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, SCHBLCKDRIS, __x)
+#define MCDE_RISERR_ROTAFERIS_WRITE_SHIFT 3
+#define MCDE_RISERR_ROTAFERIS_WRITE_MASK 0x00000008
+#define MCDE_RISERR_ROTAFERIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, ROTAFERIS_WRITE, __x)
+#define MCDE_RISERR_ROTAFERIS_READ_SHIFT 4
+#define MCDE_RISERR_ROTAFERIS_READ_MASK 0x00000010
+#define MCDE_RISERR_ROTAFERIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, ROTAFERIS_READ, __x)
+#define MCDE_RISERR_ROTBFERIS_WRITE_SHIFT 5
+#define MCDE_RISERR_ROTBFERIS_WRITE_MASK 0x00000020
+#define MCDE_RISERR_ROTBFERIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, ROTBFERIS_WRITE, __x)
+#define MCDE_RISERR_ROTBFERIS_READ_SHIFT 6
+#define MCDE_RISERR_ROTBFERIS_READ_MASK 0x00000040
+#define MCDE_RISERR_ROTBFERIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, ROTBFERIS_READ, __x)
+#define MCDE_RISERR_FUC0RIS_SHIFT 7
+#define MCDE_RISERR_FUC0RIS_MASK 0x00000080
+#define MCDE_RISERR_FUC0RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, FUC0RIS, __x)
+#define MCDE_RISERR_FUC1RIS_SHIFT 8
+#define MCDE_RISERR_FUC1RIS_MASK 0x00000100
+#define MCDE_RISERR_FUC1RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, FUC1RIS, __x)
+#define MCDE_RISERR_OVLFERRRIS_SHIFT 16
+#define MCDE_RISERR_OVLFERRRIS_MASK 0xFFFF0000
+#define MCDE_RISERR_OVLFERRRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, OVLFERRRIS, __x)
+#define MCDE_MISPP 0x00000124
+#define MCDE_MISPP_VCMPAMIS_SHIFT 0
+#define MCDE_MISPP_VCMPAMIS_MASK 0x00000001
+#define MCDE_MISPP_VCMPAMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VCMPAMIS, __x)
+#define MCDE_MISPP_VCMPBMIS_SHIFT 1
+#define MCDE_MISPP_VCMPBMIS_MASK 0x00000002
+#define MCDE_MISPP_VCMPBMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VCMPBMIS, __x)
+#define MCDE_MISPP_VSCC0MIS_SHIFT 2
+#define MCDE_MISPP_VSCC0MIS_MASK 0x00000004
+#define MCDE_MISPP_VSCC0MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VSCC0MIS, __x)
+#define MCDE_MISPP_VSCC1MIS_SHIFT 3
+#define MCDE_MISPP_VSCC1MIS_MASK 0x00000008
+#define MCDE_MISPP_VSCC1MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VSCC1MIS, __x)
+#define MCDE_MISPP_VCMPC0MIS_SHIFT 4
+#define MCDE_MISPP_VCMPC0MIS_MASK 0x00000010
+#define MCDE_MISPP_VCMPC0MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VCMPC0MIS, __x)
+#define MCDE_MISPP_VCMPC1MIS_SHIFT 5
+#define MCDE_MISPP_VCMPC1MIS_MASK 0x00000020
+#define MCDE_MISPP_VCMPC1MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VCMPC1MIS, __x)
+#define MCDE_MISPP_ROTFDMIS_A_SHIFT 6
+#define MCDE_MISPP_ROTFDMIS_A_MASK 0x00000040
+#define MCDE_MISPP_ROTFDMIS_A(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, ROTFDMIS_A, __x)
+#define MCDE_MISPP_ROTFDMIS_B_SHIFT 7
+#define MCDE_MISPP_ROTFDMIS_B_MASK 0x00000080
+#define MCDE_MISPP_ROTFDMIS_B(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, ROTFDMIS_B, __x)
+#define MCDE_MISOVL 0x00000128
+#define MCDE_MISOVL_OVLRDMIS_SHIFT 0
+#define MCDE_MISOVL_OVLRDMIS_MASK 0x0000FFFF
+#define MCDE_MISOVL_OVLRDMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISOVL, OVLRDMIS, __x)
+#define MCDE_MISOVL_OVLFDMIS_SHIFT 16
+#define MCDE_MISOVL_OVLFDMIS_MASK 0xFFFF0000
+#define MCDE_MISOVL_OVLFDMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISOVL, OVLFDMIS, __x)
+#define MCDE_MISCHNL 0x0000012C
+#define MCDE_MISCHNL_CHNLRDMIS_SHIFT 0
+#define MCDE_MISCHNL_CHNLRDMIS_MASK 0x0000FFFF
+#define MCDE_MISCHNL_CHNLRDMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISCHNL, CHNLRDMIS, __x)
+#define MCDE_MISCHNL_CHNLAMIS_SHIFT 16
+#define MCDE_MISCHNL_CHNLAMIS_MASK 0xFFFF0000
+#define MCDE_MISCHNL_CHNLAMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISCHNL, CHNLAMIS, __x)
+#define MCDE_MISERR 0x00000130
+#define MCDE_MISERR_FUAMIS_SHIFT 0
+#define MCDE_MISERR_FUAMIS_MASK 0x00000001
+#define MCDE_MISERR_FUAMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, FUAMIS, __x)
+#define MCDE_MISERR_FUBMIS_SHIFT 1
+#define MCDE_MISERR_FUBMIS_MASK 0x00000002
+#define MCDE_MISERR_FUBMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, FUBMIS, __x)
+#define MCDE_MISERR_SCHBLCKDMIS_SHIFT 2
+#define MCDE_MISERR_SCHBLCKDMIS_MASK 0x00000004
+#define MCDE_MISERR_SCHBLCKDMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, SCHBLCKDMIS, __x)
+#define MCDE_MISERR_ROTAFEMIS_WRITE_SHIFT 3
+#define MCDE_MISERR_ROTAFEMIS_WRITE_MASK 0x00000008
+#define MCDE_MISERR_ROTAFEMIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, ROTAFEMIS_WRITE, __x)
+#define MCDE_MISERR_ROTAFEMIS_READ_SHIFT 4
+#define MCDE_MISERR_ROTAFEMIS_READ_MASK 0x00000010
+#define MCDE_MISERR_ROTAFEMIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, ROTAFEMIS_READ, __x)
+#define MCDE_MISERR_ROTBFEMIS_WRITE_SHIFT 5
+#define MCDE_MISERR_ROTBFEMIS_WRITE_MASK 0x00000020
+#define MCDE_MISERR_ROTBFEMIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, ROTBFEMIS_WRITE, __x)
+#define MCDE_MISERR_ROTBFEMIS_READ_SHIFT 6
+#define MCDE_MISERR_ROTBFEMIS_READ_MASK 0x00000040
+#define MCDE_MISERR_ROTBFEMIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, ROTBFEMIS_READ, __x)
+#define MCDE_MISERR_FUC0MIS_SHIFT 7
+#define MCDE_MISERR_FUC0MIS_MASK 0x00000080
+#define MCDE_MISERR_FUC0MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, FUC0MIS, __x)
+#define MCDE_MISERR_FUC1MIS_SHIFT 8
+#define MCDE_MISERR_FUC1MIS_MASK 0x00000100
+#define MCDE_MISERR_FUC1MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, FUC1MIS, __x)
+#define MCDE_MISERR_OVLFERMIS_SHIFT 16
+#define MCDE_MISERR_OVLFERMIS_MASK 0xFFFF0000
+#define MCDE_MISERR_OVLFERMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, OVLFERMIS, __x)
+#define MCDE_SISPP 0x00000134
+#define MCDE_SISPP_VCMPASIS_SHIFT 0
+#define MCDE_SISPP_VCMPASIS_MASK 0x00000001
+#define MCDE_SISPP_VCMPASIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VCMPASIS, __x)
+#define MCDE_SISPP_VCMPBSIS_SHIFT 1
+#define MCDE_SISPP_VCMPBSIS_MASK 0x00000002
+#define MCDE_SISPP_VCMPBSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VCMPBSIS, __x)
+#define MCDE_SISPP_VSCC0SIS_SHIFT 2
+#define MCDE_SISPP_VSCC0SIS_MASK 0x00000004
+#define MCDE_SISPP_VSCC0SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VSCC0SIS, __x)
+#define MCDE_SISPP_VSCC1SIS_SHIFT 3
+#define MCDE_SISPP_VSCC1SIS_MASK 0x00000008
+#define MCDE_SISPP_VSCC1SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VSCC1SIS, __x)
+#define MCDE_SISPP_VCMPC0SIS_SHIFT 4
+#define MCDE_SISPP_VCMPC0SIS_MASK 0x00000010
+#define MCDE_SISPP_VCMPC0SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VCMPC0SIS, __x)
+#define MCDE_SISPP_VCMPC1SIS_SHIFT 5
+#define MCDE_SISPP_VCMPC1SIS_MASK 0x00000020
+#define MCDE_SISPP_VCMPC1SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VCMPC1SIS, __x)
+#define MCDE_SISPP_ROTFDSIS_A_SHIFT 6
+#define MCDE_SISPP_ROTFDSIS_A_MASK 0x00000040
+#define MCDE_SISPP_ROTFDSIS_A(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, ROTFDSIS_A, __x)
+#define MCDE_SISPP_ROTFDSIS_B_SHIFT 7
+#define MCDE_SISPP_ROTFDSIS_B_MASK 0x00000080
+#define MCDE_SISPP_ROTFDSIS_B(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, ROTFDSIS_B, __x)
+#define MCDE_SISOVL 0x00000138
+#define MCDE_SISOVL_OVLRDSIS_SHIFT 0
+#define MCDE_SISOVL_OVLRDSIS_MASK 0x0000FFFF
+#define MCDE_SISOVL_OVLRDSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISOVL, OVLRDSIS, __x)
+#define MCDE_SISOVL_OVLFDSIS_SHIFT 16
+#define MCDE_SISOVL_OVLFDSIS_MASK 0xFFFF0000
+#define MCDE_SISOVL_OVLFDSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISOVL, OVLFDSIS, __x)
+#define MCDE_SISCHNL 0x0000013C
+#define MCDE_SISCHNL_CHNLRDSIS_SHIFT 0
+#define MCDE_SISCHNL_CHNLRDSIS_MASK 0x0000FFFF
+#define MCDE_SISCHNL_CHNLRDSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISCHNL, CHNLRDSIS, __x)
+#define MCDE_SISCHNL_CHNLASIS_SHIFT 16
+#define MCDE_SISCHNL_CHNLASIS_MASK 0xFFFF0000
+#define MCDE_SISCHNL_CHNLASIS(__x) \
+ MCDE_VAL2REG(MCDE_SISCHNL, CHNLASIS, __x)
+#define MCDE_SISERR 0x00000140
+#define MCDE_SISERR_FUASIS_SHIFT 0
+#define MCDE_SISERR_FUASIS_MASK 0x00000001
+#define MCDE_SISERR_FUASIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, FUASIS, __x)
+#define MCDE_SISERR_FUBSIS_SHIFT 1
+#define MCDE_SISERR_FUBSIS_MASK 0x00000002
+#define MCDE_SISERR_FUBSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, FUBSIS, __x)
+#define MCDE_SISERR_SCHBLCKDSIS_SHIFT 2
+#define MCDE_SISERR_SCHBLCKDSIS_MASK 0x00000004
+#define MCDE_SISERR_SCHBLCKDSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, SCHBLCKDSIS, __x)
+#define MCDE_SISERR_ROTAFESIS_WRITE_SHIFT 3
+#define MCDE_SISERR_ROTAFESIS_WRITE_MASK 0x00000008
+#define MCDE_SISERR_ROTAFESIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, ROTAFESIS_WRITE, __x)
+#define MCDE_SISERR_ROTAFESIS_READ_SHIFT 4
+#define MCDE_SISERR_ROTAFESIS_READ_MASK 0x00000010
+#define MCDE_SISERR_ROTAFESIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, ROTAFESIS_READ, __x)
+#define MCDE_SISERR_ROTBFESIS_WRITE_SHIFT 5
+#define MCDE_SISERR_ROTBFESIS_WRITE_MASK 0x00000020
+#define MCDE_SISERR_ROTBFESIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, ROTBFESIS_WRITE, __x)
+#define MCDE_SISERR_ROTBFESIS_READ_SHIFT 6
+#define MCDE_SISERR_ROTBFESIS_READ_MASK 0x00000040
+#define MCDE_SISERR_ROTBFESIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, ROTBFESIS_READ, __x)
+#define MCDE_SISERR_FUC0SIS_SHIFT 7
+#define MCDE_SISERR_FUC0SIS_MASK 0x00000080
+#define MCDE_SISERR_FUC0SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, FUC0SIS, __x)
+#define MCDE_SISERR_FUC1SIS_SHIFT 8
+#define MCDE_SISERR_FUC1SIS_MASK 0x00000100
+#define MCDE_SISERR_FUC1SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, FUC1SIS, __x)
+#define MCDE_SISERR_OVLFERSIS_SHIFT 16
+#define MCDE_SISERR_OVLFERSIS_MASK 0xFFFF0000
+#define MCDE_SISERR_OVLFERSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, OVLFERSIS, __x)
+#define MCDE_PID 0x000001FC
+#define MCDE_PID_METALFIX_VERSION_SHIFT 0
+#define MCDE_PID_METALFIX_VERSION_MASK 0x000000FF
+#define MCDE_PID_METALFIX_VERSION(__x) \
+ MCDE_VAL2REG(MCDE_PID, METALFIX_VERSION, __x)
+#define MCDE_PID_DEVELOPMENT_VERSION_SHIFT 8
+#define MCDE_PID_DEVELOPMENT_VERSION_MASK 0x0000FF00
+#define MCDE_PID_DEVELOPMENT_VERSION(__x) \
+ MCDE_VAL2REG(MCDE_PID, DEVELOPMENT_VERSION, __x)
+#define MCDE_PID_MINOR_VERSION_SHIFT 16
+#define MCDE_PID_MINOR_VERSION_MASK 0x00FF0000
+#define MCDE_PID_MINOR_VERSION(__x) \
+ MCDE_VAL2REG(MCDE_PID, MINOR_VERSION, __x)
+#define MCDE_PID_MAJOR_VERSION_SHIFT 24
+#define MCDE_PID_MAJOR_VERSION_MASK 0xFF000000
+#define MCDE_PID_MAJOR_VERSION(__x) \
+ MCDE_VAL2REG(MCDE_PID, MAJOR_VERSION, __x)
+#define MCDE_EXTSRC0A0 0x00000200
+#define MCDE_EXTSRC0A0_GROUPOFFSET 0x20
+#define MCDE_EXTSRC0A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC0A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC0A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC1A0 0x00000220
+#define MCDE_EXTSRC1A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC1A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC1A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC2A0 0x00000240
+#define MCDE_EXTSRC2A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC2A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC2A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC3A0 0x00000260
+#define MCDE_EXTSRC3A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC3A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC3A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC4A0 0x00000280
+#define MCDE_EXTSRC4A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC4A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC4A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC5A0 0x000002A0
+#define MCDE_EXTSRC5A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC5A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC5A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC6A0 0x000002C0
+#define MCDE_EXTSRC6A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC6A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC6A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC7A0 0x000002E0
+#define MCDE_EXTSRC7A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC7A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC7A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC8A0 0x00000300
+#define MCDE_EXTSRC8A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC8A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC8A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC9A0 0x00000320
+#define MCDE_EXTSRC9A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC9A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC9A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC0A1 0x00000204
+#define MCDE_EXTSRC0A1_GROUPOFFSET 0x20
+#define MCDE_EXTSRC0A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC0A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC0A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC1A1 0x00000224
+#define MCDE_EXTSRC1A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC1A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC1A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC2A1 0x00000244
+#define MCDE_EXTSRC2A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC2A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC2A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC3A1 0x00000264
+#define MCDE_EXTSRC3A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC3A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC3A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC4A1 0x00000284
+#define MCDE_EXTSRC4A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC4A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC4A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC5A1 0x000002A4
+#define MCDE_EXTSRC5A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC5A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC5A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC6A1 0x000002C4
+#define MCDE_EXTSRC6A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC6A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC6A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC7A1 0x000002E4
+#define MCDE_EXTSRC7A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC7A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC7A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC8A1 0x00000304
+#define MCDE_EXTSRC8A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC8A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC8A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC9A1 0x00000324
+#define MCDE_EXTSRC9A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC9A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC9A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC6A2 0x000002C8
+#define MCDE_EXTSRC6A2_BASEADDRESS2_SHIFT 3
+#define MCDE_EXTSRC6A2_BASEADDRESS2_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC6A2_BASEADDRESS2(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6A2, BASEADDRESS2, __x)
+#define MCDE_EXTSRC0CONF 0x0000020C
+#define MCDE_EXTSRC0CONF_GROUPOFFSET 0x20
+#define MCDE_EXTSRC0CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC0CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC0CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BUF_ID, __x)
+#define MCDE_EXTSRC0CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC0CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC0CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BUF_NB, __x)
+#define MCDE_EXTSRC0CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC0CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC0CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC0CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC0CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC0CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC0CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC0CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC0CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC0CONF_BPP_RGB444 4
+#define MCDE_EXTSRC0CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC0CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC0CONF_BPP_RGB565 7
+#define MCDE_EXTSRC0CONF_BPP_RGB888 8
+#define MCDE_EXTSRC0CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC0CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC0CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC0CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BPP, MCDE_EXTSRC0CONF_BPP_##__x)
+#define MCDE_EXTSRC0CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BPP, __x)
+#define MCDE_EXTSRC0CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC0CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC0CONF_BGR_RGB 0
+#define MCDE_EXTSRC0CONF_BGR_BGR 1
+#define MCDE_EXTSRC0CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BGR, MCDE_EXTSRC0CONF_BGR_##__x)
+#define MCDE_EXTSRC0CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BGR, __x)
+#define MCDE_EXTSRC0CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC0CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC0CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC0CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC0CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEBO, MCDE_EXTSRC0CONF_BEBO_##__x)
+#define MCDE_EXTSRC0CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEBO, __x)
+#define MCDE_EXTSRC0CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC0CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC0CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC0CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC0CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEPO, MCDE_EXTSRC0CONF_BEPO_##__x)
+#define MCDE_EXTSRC0CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEPO, __x)
+#define MCDE_EXTSRC1CONF 0x0000022C
+#define MCDE_EXTSRC1CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC1CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC1CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BUF_ID, __x)
+#define MCDE_EXTSRC1CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC1CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC1CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BUF_NB, __x)
+#define MCDE_EXTSRC1CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC1CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC1CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC1CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC1CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC1CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC1CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC1CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC1CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC1CONF_BPP_RGB444 4
+#define MCDE_EXTSRC1CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC1CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC1CONF_BPP_RGB565 7
+#define MCDE_EXTSRC1CONF_BPP_RGB888 8
+#define MCDE_EXTSRC1CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC1CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC1CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC1CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BPP, MCDE_EXTSRC1CONF_BPP_##__x)
+#define MCDE_EXTSRC1CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BPP, __x)
+#define MCDE_EXTSRC1CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC1CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC1CONF_BGR_RGB 0
+#define MCDE_EXTSRC1CONF_BGR_BGR 1
+#define MCDE_EXTSRC1CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BGR, MCDE_EXTSRC1CONF_BGR_##__x)
+#define MCDE_EXTSRC1CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BGR, __x)
+#define MCDE_EXTSRC1CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC1CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC1CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC1CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC1CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEBO, MCDE_EXTSRC1CONF_BEBO_##__x)
+#define MCDE_EXTSRC1CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEBO, __x)
+#define MCDE_EXTSRC1CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC1CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC1CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC1CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC1CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEPO, MCDE_EXTSRC1CONF_BEPO_##__x)
+#define MCDE_EXTSRC1CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEPO, __x)
+#define MCDE_EXTSRC2CONF 0x0000024C
+#define MCDE_EXTSRC2CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC2CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC2CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BUF_ID, __x)
+#define MCDE_EXTSRC2CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC2CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC2CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BUF_NB, __x)
+#define MCDE_EXTSRC2CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC2CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC2CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC2CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC2CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC2CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC2CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC2CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC2CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC2CONF_BPP_RGB444 4
+#define MCDE_EXTSRC2CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC2CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC2CONF_BPP_RGB565 7
+#define MCDE_EXTSRC2CONF_BPP_RGB888 8
+#define MCDE_EXTSRC2CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC2CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC2CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC2CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BPP, MCDE_EXTSRC2CONF_BPP_##__x)
+#define MCDE_EXTSRC2CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BPP, __x)
+#define MCDE_EXTSRC2CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC2CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC2CONF_BGR_RGB 0
+#define MCDE_EXTSRC2CONF_BGR_BGR 1
+#define MCDE_EXTSRC2CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BGR, MCDE_EXTSRC2CONF_BGR_##__x)
+#define MCDE_EXTSRC2CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BGR, __x)
+#define MCDE_EXTSRC2CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC2CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC2CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC2CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC2CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEBO, MCDE_EXTSRC2CONF_BEBO_##__x)
+#define MCDE_EXTSRC2CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEBO, __x)
+#define MCDE_EXTSRC2CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC2CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC2CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC2CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC2CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEPO, MCDE_EXTSRC2CONF_BEPO_##__x)
+#define MCDE_EXTSRC2CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEPO, __x)
+#define MCDE_EXTSRC3CONF 0x0000026C
+#define MCDE_EXTSRC3CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC3CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC3CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BUF_ID, __x)
+#define MCDE_EXTSRC3CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC3CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC3CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BUF_NB, __x)
+#define MCDE_EXTSRC3CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC3CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC3CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC3CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC3CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC3CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC3CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC3CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC3CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC3CONF_BPP_RGB444 4
+#define MCDE_EXTSRC3CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC3CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC3CONF_BPP_RGB565 7
+#define MCDE_EXTSRC3CONF_BPP_RGB888 8
+#define MCDE_EXTSRC3CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC3CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC3CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC3CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BPP, MCDE_EXTSRC3CONF_BPP_##__x)
+#define MCDE_EXTSRC3CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BPP, __x)
+#define MCDE_EXTSRC3CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC3CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC3CONF_BGR_RGB 0
+#define MCDE_EXTSRC3CONF_BGR_BGR 1
+#define MCDE_EXTSRC3CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BGR, MCDE_EXTSRC3CONF_BGR_##__x)
+#define MCDE_EXTSRC3CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BGR, __x)
+#define MCDE_EXTSRC3CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC3CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC3CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC3CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC3CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEBO, MCDE_EXTSRC3CONF_BEBO_##__x)
+#define MCDE_EXTSRC3CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEBO, __x)
+#define MCDE_EXTSRC3CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC3CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC3CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC3CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC3CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEPO, MCDE_EXTSRC3CONF_BEPO_##__x)
+#define MCDE_EXTSRC3CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEPO, __x)
+#define MCDE_EXTSRC4CONF 0x0000028C
+#define MCDE_EXTSRC4CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC4CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC4CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BUF_ID, __x)
+#define MCDE_EXTSRC4CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC4CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC4CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BUF_NB, __x)
+#define MCDE_EXTSRC4CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC4CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC4CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC4CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC4CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC4CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC4CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC4CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC4CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC4CONF_BPP_RGB444 4
+#define MCDE_EXTSRC4CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC4CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC4CONF_BPP_RGB565 7
+#define MCDE_EXTSRC4CONF_BPP_RGB888 8
+#define MCDE_EXTSRC4CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC4CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC4CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC4CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BPP, MCDE_EXTSRC4CONF_BPP_##__x)
+#define MCDE_EXTSRC4CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BPP, __x)
+#define MCDE_EXTSRC4CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC4CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC4CONF_BGR_RGB 0
+#define MCDE_EXTSRC4CONF_BGR_BGR 1
+#define MCDE_EXTSRC4CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BGR, MCDE_EXTSRC4CONF_BGR_##__x)
+#define MCDE_EXTSRC4CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BGR, __x)
+#define MCDE_EXTSRC4CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC4CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC4CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC4CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC4CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEBO, MCDE_EXTSRC4CONF_BEBO_##__x)
+#define MCDE_EXTSRC4CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEBO, __x)
+#define MCDE_EXTSRC4CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC4CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC4CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC4CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC4CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEPO, MCDE_EXTSRC4CONF_BEPO_##__x)
+#define MCDE_EXTSRC4CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEPO, __x)
+#define MCDE_EXTSRC5CONF 0x000002AC
+#define MCDE_EXTSRC5CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC5CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC5CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BUF_ID, __x)
+#define MCDE_EXTSRC5CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC5CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC5CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BUF_NB, __x)
+#define MCDE_EXTSRC5CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC5CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC5CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC5CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC5CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC5CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC5CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC5CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC5CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC5CONF_BPP_RGB444 4
+#define MCDE_EXTSRC5CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC5CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC5CONF_BPP_RGB565 7
+#define MCDE_EXTSRC5CONF_BPP_RGB888 8
+#define MCDE_EXTSRC5CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC5CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC5CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC5CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BPP, MCDE_EXTSRC5CONF_BPP_##__x)
+#define MCDE_EXTSRC5CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BPP, __x)
+#define MCDE_EXTSRC5CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC5CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC5CONF_BGR_RGB 0
+#define MCDE_EXTSRC5CONF_BGR_BGR 1
+#define MCDE_EXTSRC5CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BGR, MCDE_EXTSRC5CONF_BGR_##__x)
+#define MCDE_EXTSRC5CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BGR, __x)
+#define MCDE_EXTSRC5CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC5CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC5CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC5CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC5CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEBO, MCDE_EXTSRC5CONF_BEBO_##__x)
+#define MCDE_EXTSRC5CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEBO, __x)
+#define MCDE_EXTSRC5CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC5CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC5CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC5CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC5CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEPO, MCDE_EXTSRC5CONF_BEPO_##__x)
+#define MCDE_EXTSRC5CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEPO, __x)
+#define MCDE_EXTSRC6CONF 0x000002CC
+#define MCDE_EXTSRC6CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC6CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC6CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BUF_ID, __x)
+#define MCDE_EXTSRC6CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC6CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC6CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BUF_NB, __x)
+#define MCDE_EXTSRC6CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC6CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC6CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC6CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC6CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC6CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC6CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC6CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC6CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC6CONF_BPP_RGB444 4
+#define MCDE_EXTSRC6CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC6CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC6CONF_BPP_RGB565 7
+#define MCDE_EXTSRC6CONF_BPP_RGB888 8
+#define MCDE_EXTSRC6CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC6CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC6CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC6CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BPP, MCDE_EXTSRC6CONF_BPP_##__x)
+#define MCDE_EXTSRC6CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BPP, __x)
+#define MCDE_EXTSRC6CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC6CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC6CONF_BGR_RGB 0
+#define MCDE_EXTSRC6CONF_BGR_BGR 1
+#define MCDE_EXTSRC6CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BGR, MCDE_EXTSRC6CONF_BGR_##__x)
+#define MCDE_EXTSRC6CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BGR, __x)
+#define MCDE_EXTSRC6CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC6CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC6CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC6CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC6CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEBO, MCDE_EXTSRC6CONF_BEBO_##__x)
+#define MCDE_EXTSRC6CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEBO, __x)
+#define MCDE_EXTSRC6CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC6CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC6CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC6CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC6CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEPO, MCDE_EXTSRC6CONF_BEPO_##__x)
+#define MCDE_EXTSRC6CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEPO, __x)
+#define MCDE_EXTSRC7CONF 0x000002EC
+#define MCDE_EXTSRC7CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC7CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC7CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BUF_ID, __x)
+#define MCDE_EXTSRC7CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC7CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC7CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BUF_NB, __x)
+#define MCDE_EXTSRC7CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC7CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC7CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC7CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC7CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC7CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC7CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC7CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC7CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC7CONF_BPP_RGB444 4
+#define MCDE_EXTSRC7CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC7CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC7CONF_BPP_RGB565 7
+#define MCDE_EXTSRC7CONF_BPP_RGB888 8
+#define MCDE_EXTSRC7CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC7CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC7CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC7CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BPP, MCDE_EXTSRC7CONF_BPP_##__x)
+#define MCDE_EXTSRC7CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BPP, __x)
+#define MCDE_EXTSRC7CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC7CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC7CONF_BGR_RGB 0
+#define MCDE_EXTSRC7CONF_BGR_BGR 1
+#define MCDE_EXTSRC7CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BGR, MCDE_EXTSRC7CONF_BGR_##__x)
+#define MCDE_EXTSRC7CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BGR, __x)
+#define MCDE_EXTSRC7CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC7CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC7CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC7CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC7CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEBO, MCDE_EXTSRC7CONF_BEBO_##__x)
+#define MCDE_EXTSRC7CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEBO, __x)
+#define MCDE_EXTSRC7CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC7CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC7CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC7CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC7CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEPO, MCDE_EXTSRC7CONF_BEPO_##__x)
+#define MCDE_EXTSRC7CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEPO, __x)
+#define MCDE_EXTSRC8CONF 0x0000030C
+#define MCDE_EXTSRC8CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC8CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC8CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BUF_ID, __x)
+#define MCDE_EXTSRC8CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC8CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC8CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BUF_NB, __x)
+#define MCDE_EXTSRC8CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC8CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC8CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC8CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC8CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC8CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC8CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC8CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC8CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC8CONF_BPP_RGB444 4
+#define MCDE_EXTSRC8CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC8CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC8CONF_BPP_RGB565 7
+#define MCDE_EXTSRC8CONF_BPP_RGB888 8
+#define MCDE_EXTSRC8CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC8CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC8CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC8CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BPP, MCDE_EXTSRC8CONF_BPP_##__x)
+#define MCDE_EXTSRC8CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BPP, __x)
+#define MCDE_EXTSRC8CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC8CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC8CONF_BGR_RGB 0
+#define MCDE_EXTSRC8CONF_BGR_BGR 1
+#define MCDE_EXTSRC8CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BGR, MCDE_EXTSRC8CONF_BGR_##__x)
+#define MCDE_EXTSRC8CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BGR, __x)
+#define MCDE_EXTSRC8CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC8CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC8CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC8CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC8CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEBO, MCDE_EXTSRC8CONF_BEBO_##__x)
+#define MCDE_EXTSRC8CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEBO, __x)
+#define MCDE_EXTSRC8CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC8CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC8CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC8CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC8CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEPO, MCDE_EXTSRC8CONF_BEPO_##__x)
+#define MCDE_EXTSRC8CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEPO, __x)
+#define MCDE_EXTSRC9CONF 0x0000032C
+#define MCDE_EXTSRC9CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC9CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC9CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BUF_ID, __x)
+#define MCDE_EXTSRC9CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC9CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC9CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BUF_NB, __x)
+#define MCDE_EXTSRC9CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC9CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC9CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC9CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC9CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC9CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC9CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC9CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC9CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC9CONF_BPP_RGB444 4
+#define MCDE_EXTSRC9CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC9CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC9CONF_BPP_RGB565 7
+#define MCDE_EXTSRC9CONF_BPP_RGB888 8
+#define MCDE_EXTSRC9CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC9CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC9CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC9CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BPP, MCDE_EXTSRC9CONF_BPP_##__x)
+#define MCDE_EXTSRC9CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BPP, __x)
+#define MCDE_EXTSRC9CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC9CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC9CONF_BGR_RGB 0
+#define MCDE_EXTSRC9CONF_BGR_BGR 1
+#define MCDE_EXTSRC9CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BGR, MCDE_EXTSRC9CONF_BGR_##__x)
+#define MCDE_EXTSRC9CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BGR, __x)
+#define MCDE_EXTSRC9CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC9CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC9CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC9CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC9CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEBO, MCDE_EXTSRC9CONF_BEBO_##__x)
+#define MCDE_EXTSRC9CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEBO, __x)
+#define MCDE_EXTSRC9CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC9CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC9CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC9CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC9CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEPO, MCDE_EXTSRC9CONF_BEPO_##__x)
+#define MCDE_EXTSRC9CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEPO, __x)
+#define MCDE_EXTSRC0CR 0x00000210
+#define MCDE_EXTSRC0CR_GROUPOFFSET 0x20
+#define MCDE_EXTSRC0CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC0CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC0CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC0CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC0CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, SEL_MOD, MCDE_EXTSRC0CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC0CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, SEL_MOD, __x)
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC0CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC0CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC0CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC0CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC0CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC0CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC0CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC1CR 0x00000230
+#define MCDE_EXTSRC1CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC1CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC1CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC1CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC1CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC1CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, SEL_MOD, MCDE_EXTSRC1CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC1CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, SEL_MOD, __x)
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC1CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC1CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC1CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC1CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC1CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC1CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC1CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC2CR 0x00000250
+#define MCDE_EXTSRC2CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC2CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC2CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC2CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC2CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC2CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, SEL_MOD, MCDE_EXTSRC2CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC2CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, SEL_MOD, __x)
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC2CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC2CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC2CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC2CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC2CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC2CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC2CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC3CR 0x00000270
+#define MCDE_EXTSRC3CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC3CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC3CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC3CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC3CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC3CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, SEL_MOD, MCDE_EXTSRC3CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC3CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, SEL_MOD, __x)
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC3CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC3CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC3CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC3CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC3CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC3CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC3CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC4CR 0x00000290
+#define MCDE_EXTSRC4CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC4CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC4CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC4CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC4CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC4CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, SEL_MOD, MCDE_EXTSRC4CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC4CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, SEL_MOD, __x)
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC4CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC4CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC4CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC4CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC4CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC4CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC4CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC5CR 0x000002B0
+#define MCDE_EXTSRC5CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC5CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC5CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC5CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC5CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC5CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, SEL_MOD, MCDE_EXTSRC5CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC5CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, SEL_MOD, __x)
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC5CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC5CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC5CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC5CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC5CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC5CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC5CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC6CR 0x000002D0
+#define MCDE_EXTSRC6CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC6CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC6CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC6CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC6CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC6CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, SEL_MOD, MCDE_EXTSRC6CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC6CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, SEL_MOD, __x)
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC6CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC6CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC6CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC6CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC6CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC6CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC6CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC7CR 0x000002F0
+#define MCDE_EXTSRC7CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC7CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC7CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC7CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC7CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC7CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, SEL_MOD, MCDE_EXTSRC7CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC7CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, SEL_MOD, __x)
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC7CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC7CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC7CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC7CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC7CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC7CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC7CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC8CR 0x00000310
+#define MCDE_EXTSRC8CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC8CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC8CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC8CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC8CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC8CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, SEL_MOD, MCDE_EXTSRC8CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC8CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, SEL_MOD, __x)
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC8CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC8CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC8CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC8CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC8CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC8CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC8CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC9CR 0x00000330
+#define MCDE_EXTSRC9CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC9CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC9CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC9CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC9CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC9CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, SEL_MOD, MCDE_EXTSRC9CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC9CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, SEL_MOD, __x)
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC9CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC9CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC9CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC9CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC9CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC9CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC9CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, FORCE_FS_DIV, __x)
+#define MCDE_OVL0CR 0x00000400
+#define MCDE_OVL0CR_GROUPOFFSET 0x20
+#define MCDE_OVL0CR_OVLEN_SHIFT 0
+#define MCDE_OVL0CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL0CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, OVLEN, __x)
+#define MCDE_OVL0CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL0CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL0CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL0CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL0CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL0CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, COLCCTRL, MCDE_OVL0CR_COLCCTRL_##__x)
+#define MCDE_OVL0CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, COLCCTRL, __x)
+#define MCDE_OVL0CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL0CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL0CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, CKEYGEN, __x)
+#define MCDE_OVL0CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL0CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL0CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, ALPHAPMEN, __x)
+#define MCDE_OVL0CR_OVLF_SHIFT 5
+#define MCDE_OVL0CR_OVLF_MASK 0x00000020
+#define MCDE_OVL0CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, OVLF, __x)
+#define MCDE_OVL0CR_OVLR_SHIFT 6
+#define MCDE_OVL0CR_OVLR_MASK 0x00000040
+#define MCDE_OVL0CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, OVLR, __x)
+#define MCDE_OVL0CR_OVLB_SHIFT 7
+#define MCDE_OVL0CR_OVLB_MASK 0x00000080
+#define MCDE_OVL0CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, OVLB, __x)
+#define MCDE_OVL0CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL0CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL0CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, FETCH_ROPC, __x)
+#define MCDE_OVL0CR_STBPRIO_SHIFT 16
+#define MCDE_OVL0CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL0CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, STBPRIO, __x)
+#define MCDE_OVL0CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL0CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL0CR_BURSTSIZE_1W 0
+#define MCDE_OVL0CR_BURSTSIZE_2W 1
+#define MCDE_OVL0CR_BURSTSIZE_4W 2
+#define MCDE_OVL0CR_BURSTSIZE_8W 3
+#define MCDE_OVL0CR_BURSTSIZE_16W 4
+#define MCDE_OVL0CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL0CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL0CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL0CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL0CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL0CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, BURSTSIZE, MCDE_OVL0CR_BURSTSIZE_##__x)
+#define MCDE_OVL0CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, BURSTSIZE, __x)
+#define MCDE_OVL0CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL0CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL0CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL0CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL0CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL0CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL0CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL0CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, MAXOUTSTANDING, \
+ MCDE_OVL0CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL0CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL0CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL0CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL0CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL0CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL0CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL0CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL0CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL0CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, ROTBURSTSIZE, MCDE_OVL0CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL0CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL1CR 0x00000420
+#define MCDE_OVL1CR_OVLEN_SHIFT 0
+#define MCDE_OVL1CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL1CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, OVLEN, __x)
+#define MCDE_OVL1CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL1CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL1CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL1CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL1CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL1CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, COLCCTRL, MCDE_OVL1CR_COLCCTRL_##__x)
+#define MCDE_OVL1CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, COLCCTRL, __x)
+#define MCDE_OVL1CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL1CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL1CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, CKEYGEN, __x)
+#define MCDE_OVL1CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL1CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL1CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, ALPHAPMEN, __x)
+#define MCDE_OVL1CR_OVLF_SHIFT 5
+#define MCDE_OVL1CR_OVLF_MASK 0x00000020
+#define MCDE_OVL1CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, OVLF, __x)
+#define MCDE_OVL1CR_OVLR_SHIFT 6
+#define MCDE_OVL1CR_OVLR_MASK 0x00000040
+#define MCDE_OVL1CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, OVLR, __x)
+#define MCDE_OVL1CR_OVLB_SHIFT 7
+#define MCDE_OVL1CR_OVLB_MASK 0x00000080
+#define MCDE_OVL1CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, OVLB, __x)
+#define MCDE_OVL1CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL1CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL1CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, FETCH_ROPC, __x)
+#define MCDE_OVL1CR_STBPRIO_SHIFT 16
+#define MCDE_OVL1CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL1CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, STBPRIO, __x)
+#define MCDE_OVL1CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL1CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL1CR_BURSTSIZE_1W 0
+#define MCDE_OVL1CR_BURSTSIZE_2W 1
+#define MCDE_OVL1CR_BURSTSIZE_4W 2
+#define MCDE_OVL1CR_BURSTSIZE_8W 3
+#define MCDE_OVL1CR_BURSTSIZE_16W 4
+#define MCDE_OVL1CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL1CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL1CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL1CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL1CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL1CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, BURSTSIZE, MCDE_OVL1CR_BURSTSIZE_##__x)
+#define MCDE_OVL1CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, BURSTSIZE, __x)
+#define MCDE_OVL1CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL1CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL1CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL1CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL1CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL1CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL1CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL1CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, MAXOUTSTANDING, \
+ MCDE_OVL1CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL1CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL1CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL1CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL1CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL1CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL1CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL1CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL1CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL1CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, ROTBURSTSIZE, MCDE_OVL1CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL1CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL2CR 0x00000440
+#define MCDE_OVL2CR_OVLEN_SHIFT 0
+#define MCDE_OVL2CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL2CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, OVLEN, __x)
+#define MCDE_OVL2CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL2CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL2CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL2CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL2CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL2CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, COLCCTRL, MCDE_OVL2CR_COLCCTRL_##__x)
+#define MCDE_OVL2CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, COLCCTRL, __x)
+#define MCDE_OVL2CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL2CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL2CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, CKEYGEN, __x)
+#define MCDE_OVL2CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL2CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL2CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, ALPHAPMEN, __x)
+#define MCDE_OVL2CR_OVLF_SHIFT 5
+#define MCDE_OVL2CR_OVLF_MASK 0x00000020
+#define MCDE_OVL2CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, OVLF, __x)
+#define MCDE_OVL2CR_OVLR_SHIFT 6
+#define MCDE_OVL2CR_OVLR_MASK 0x00000040
+#define MCDE_OVL2CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, OVLR, __x)
+#define MCDE_OVL2CR_OVLB_SHIFT 7
+#define MCDE_OVL2CR_OVLB_MASK 0x00000080
+#define MCDE_OVL2CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, OVLB, __x)
+#define MCDE_OVL2CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL2CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL2CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, FETCH_ROPC, __x)
+#define MCDE_OVL2CR_STBPRIO_SHIFT 16
+#define MCDE_OVL2CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL2CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, STBPRIO, __x)
+#define MCDE_OVL2CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL2CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL2CR_BURSTSIZE_1W 0
+#define MCDE_OVL2CR_BURSTSIZE_2W 1
+#define MCDE_OVL2CR_BURSTSIZE_4W 2
+#define MCDE_OVL2CR_BURSTSIZE_8W 3
+#define MCDE_OVL2CR_BURSTSIZE_16W 4
+#define MCDE_OVL2CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL2CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL2CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL2CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL2CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL2CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, BURSTSIZE, MCDE_OVL2CR_BURSTSIZE_##__x)
+#define MCDE_OVL2CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, BURSTSIZE, __x)
+#define MCDE_OVL2CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL2CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL2CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL2CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL2CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL2CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL2CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL2CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, MAXOUTSTANDING, \
+ MCDE_OVL2CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL2CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL2CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL2CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL2CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL2CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL2CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL2CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL2CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL2CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, ROTBURSTSIZE, MCDE_OVL2CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL2CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL3CR 0x00000460
+#define MCDE_OVL3CR_OVLEN_SHIFT 0
+#define MCDE_OVL3CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL3CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, OVLEN, __x)
+#define MCDE_OVL3CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL3CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL3CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL3CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL3CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL3CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, COLCCTRL, MCDE_OVL3CR_COLCCTRL_##__x)
+#define MCDE_OVL3CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, COLCCTRL, __x)
+#define MCDE_OVL3CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL3CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL3CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, CKEYGEN, __x)
+#define MCDE_OVL3CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL3CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL3CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, ALPHAPMEN, __x)
+#define MCDE_OVL3CR_OVLF_SHIFT 5
+#define MCDE_OVL3CR_OVLF_MASK 0x00000020
+#define MCDE_OVL3CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, OVLF, __x)
+#define MCDE_OVL3CR_OVLR_SHIFT 6
+#define MCDE_OVL3CR_OVLR_MASK 0x00000040
+#define MCDE_OVL3CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, OVLR, __x)
+#define MCDE_OVL3CR_OVLB_SHIFT 7
+#define MCDE_OVL3CR_OVLB_MASK 0x00000080
+#define MCDE_OVL3CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, OVLB, __x)
+#define MCDE_OVL3CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL3CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL3CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, FETCH_ROPC, __x)
+#define MCDE_OVL3CR_STBPRIO_SHIFT 16
+#define MCDE_OVL3CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL3CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, STBPRIO, __x)
+#define MCDE_OVL3CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL3CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL3CR_BURSTSIZE_1W 0
+#define MCDE_OVL3CR_BURSTSIZE_2W 1
+#define MCDE_OVL3CR_BURSTSIZE_4W 2
+#define MCDE_OVL3CR_BURSTSIZE_8W 3
+#define MCDE_OVL3CR_BURSTSIZE_16W 4
+#define MCDE_OVL3CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL3CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL3CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL3CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL3CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL3CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, BURSTSIZE, MCDE_OVL3CR_BURSTSIZE_##__x)
+#define MCDE_OVL3CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, BURSTSIZE, __x)
+#define MCDE_OVL3CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL3CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL3CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL3CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL3CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL3CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL3CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL3CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, MAXOUTSTANDING, \
+ MCDE_OVL3CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL3CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL3CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL3CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL3CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL3CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL3CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL3CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL3CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL3CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, ROTBURSTSIZE, MCDE_OVL3CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL3CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL4CR 0x00000480
+#define MCDE_OVL4CR_OVLEN_SHIFT 0
+#define MCDE_OVL4CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL4CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, OVLEN, __x)
+#define MCDE_OVL4CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL4CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL4CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL4CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL4CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL4CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, COLCCTRL, MCDE_OVL4CR_COLCCTRL_##__x)
+#define MCDE_OVL4CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, COLCCTRL, __x)
+#define MCDE_OVL4CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL4CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL4CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, CKEYGEN, __x)
+#define MCDE_OVL4CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL4CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL4CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, ALPHAPMEN, __x)
+#define MCDE_OVL4CR_OVLF_SHIFT 5
+#define MCDE_OVL4CR_OVLF_MASK 0x00000020
+#define MCDE_OVL4CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, OVLF, __x)
+#define MCDE_OVL4CR_OVLR_SHIFT 6
+#define MCDE_OVL4CR_OVLR_MASK 0x00000040
+#define MCDE_OVL4CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, OVLR, __x)
+#define MCDE_OVL4CR_OVLB_SHIFT 7
+#define MCDE_OVL4CR_OVLB_MASK 0x00000080
+#define MCDE_OVL4CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, OVLB, __x)
+#define MCDE_OVL4CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL4CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL4CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, FETCH_ROPC, __x)
+#define MCDE_OVL4CR_STBPRIO_SHIFT 16
+#define MCDE_OVL4CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL4CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, STBPRIO, __x)
+#define MCDE_OVL4CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL4CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL4CR_BURSTSIZE_1W 0
+#define MCDE_OVL4CR_BURSTSIZE_2W 1
+#define MCDE_OVL4CR_BURSTSIZE_4W 2
+#define MCDE_OVL4CR_BURSTSIZE_8W 3
+#define MCDE_OVL4CR_BURSTSIZE_16W 4
+#define MCDE_OVL4CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL4CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL4CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL4CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL4CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL4CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, BURSTSIZE, MCDE_OVL4CR_BURSTSIZE_##__x)
+#define MCDE_OVL4CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, BURSTSIZE, __x)
+#define MCDE_OVL4CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL4CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL4CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL4CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL4CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL4CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL4CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL4CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, MAXOUTSTANDING, \
+ MCDE_OVL4CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL4CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL4CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL4CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL4CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL4CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL4CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL4CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL4CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL4CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, ROTBURSTSIZE, MCDE_OVL4CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL4CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL5CR 0x000004A0
+#define MCDE_OVL5CR_OVLEN_SHIFT 0
+#define MCDE_OVL5CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL5CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, OVLEN, __x)
+#define MCDE_OVL5CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL5CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL5CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL5CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL5CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL5CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, COLCCTRL, MCDE_OVL5CR_COLCCTRL_##__x)
+#define MCDE_OVL5CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, COLCCTRL, __x)
+#define MCDE_OVL5CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL5CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL5CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, CKEYGEN, __x)
+#define MCDE_OVL5CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL5CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL5CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, ALPHAPMEN, __x)
+#define MCDE_OVL5CR_OVLF_SHIFT 5
+#define MCDE_OVL5CR_OVLF_MASK 0x00000020
+#define MCDE_OVL5CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, OVLF, __x)
+#define MCDE_OVL5CR_OVLR_SHIFT 6
+#define MCDE_OVL5CR_OVLR_MASK 0x00000040
+#define MCDE_OVL5CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, OVLR, __x)
+#define MCDE_OVL5CR_OVLB_SHIFT 7
+#define MCDE_OVL5CR_OVLB_MASK 0x00000080
+#define MCDE_OVL5CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, OVLB, __x)
+#define MCDE_OVL5CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL5CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL5CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, FETCH_ROPC, __x)
+#define MCDE_OVL5CR_STBPRIO_SHIFT 16
+#define MCDE_OVL5CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL5CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, STBPRIO, __x)
+#define MCDE_OVL5CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL5CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL5CR_BURSTSIZE_1W 0
+#define MCDE_OVL5CR_BURSTSIZE_2W 1
+#define MCDE_OVL5CR_BURSTSIZE_4W 2
+#define MCDE_OVL5CR_BURSTSIZE_8W 3
+#define MCDE_OVL5CR_BURSTSIZE_16W 4
+#define MCDE_OVL5CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL5CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL5CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL5CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL5CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL5CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, BURSTSIZE, MCDE_OVL5CR_BURSTSIZE_##__x)
+#define MCDE_OVL5CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, BURSTSIZE, __x)
+#define MCDE_OVL5CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL5CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL5CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL5CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL5CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL5CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL5CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL5CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, MAXOUTSTANDING, \
+ MCDE_OVL5CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL5CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL5CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL5CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL5CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL5CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL5CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL5CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL5CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL5CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, ROTBURSTSIZE, MCDE_OVL5CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL5CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL0CONF 0x00000404
+#define MCDE_OVL0CONF_GROUPOFFSET 0x20
+#define MCDE_OVL0CONF_PPL_SHIFT 0
+#define MCDE_OVL0CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL0CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF, PPL, __x)
+#define MCDE_OVL0CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL0CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL0CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF, EXTSRC_ID, __x)
+#define MCDE_OVL0CONF_LPF_SHIFT 16
+#define MCDE_OVL0CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL0CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF, LPF, __x)
+#define MCDE_OVL1CONF 0x00000424
+#define MCDE_OVL1CONF_PPL_SHIFT 0
+#define MCDE_OVL1CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL1CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF, PPL, __x)
+#define MCDE_OVL1CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL1CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL1CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF, EXTSRC_ID, __x)
+#define MCDE_OVL1CONF_LPF_SHIFT 16
+#define MCDE_OVL1CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL1CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF, LPF, __x)
+#define MCDE_OVL2CONF 0x00000444
+#define MCDE_OVL2CONF_PPL_SHIFT 0
+#define MCDE_OVL2CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL2CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF, PPL, __x)
+#define MCDE_OVL2CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL2CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL2CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF, EXTSRC_ID, __x)
+#define MCDE_OVL2CONF_LPF_SHIFT 16
+#define MCDE_OVL2CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL2CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF, LPF, __x)
+#define MCDE_OVL3CONF 0x00000464
+#define MCDE_OVL3CONF_PPL_SHIFT 0
+#define MCDE_OVL3CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL3CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF, PPL, __x)
+#define MCDE_OVL3CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL3CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL3CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF, EXTSRC_ID, __x)
+#define MCDE_OVL3CONF_LPF_SHIFT 16
+#define MCDE_OVL3CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL3CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF, LPF, __x)
+#define MCDE_OVL4CONF 0x00000484
+#define MCDE_OVL4CONF_PPL_SHIFT 0
+#define MCDE_OVL4CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL4CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF, PPL, __x)
+#define MCDE_OVL4CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL4CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL4CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF, EXTSRC_ID, __x)
+#define MCDE_OVL4CONF_LPF_SHIFT 16
+#define MCDE_OVL4CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL4CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF, LPF, __x)
+#define MCDE_OVL5CONF 0x000004A4
+#define MCDE_OVL5CONF_PPL_SHIFT 0
+#define MCDE_OVL5CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL5CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF, PPL, __x)
+#define MCDE_OVL5CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL5CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL5CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF, EXTSRC_ID, __x)
+#define MCDE_OVL5CONF_LPF_SHIFT 16
+#define MCDE_OVL5CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL5CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF, LPF, __x)
+#define MCDE_OVL0CONF2 0x00000408
+#define MCDE_OVL0CONF2_GROUPOFFSET 0x20
+#define MCDE_OVL0CONF2_BP_SHIFT 0
+#define MCDE_OVL0CONF2_BP_MASK 0x00000001
+#define MCDE_OVL0CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL0CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL0CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, BP, MCDE_OVL0CONF2_BP_##__x)
+#define MCDE_OVL0CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, BP, __x)
+#define MCDE_OVL0CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL0CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL0CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL0CONF2_OPQ_SHIFT 9
+#define MCDE_OVL0CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL0CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, OPQ, __x)
+#define MCDE_OVL0CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL0CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL0CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, PIXOFF, __x)
+#define MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL1CONF2 0x00000428
+#define MCDE_OVL1CONF2_BP_SHIFT 0
+#define MCDE_OVL1CONF2_BP_MASK 0x00000001
+#define MCDE_OVL1CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL1CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL1CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, BP, MCDE_OVL1CONF2_BP_##__x)
+#define MCDE_OVL1CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, BP, __x)
+#define MCDE_OVL1CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL1CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL1CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL1CONF2_OPQ_SHIFT 9
+#define MCDE_OVL1CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL1CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, OPQ, __x)
+#define MCDE_OVL1CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL1CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL1CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, PIXOFF, __x)
+#define MCDE_OVL1CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL1CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL1CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL2CONF2 0x00000448
+#define MCDE_OVL2CONF2_BP_SHIFT 0
+#define MCDE_OVL2CONF2_BP_MASK 0x00000001
+#define MCDE_OVL2CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL2CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL2CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, BP, MCDE_OVL2CONF2_BP_##__x)
+#define MCDE_OVL2CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, BP, __x)
+#define MCDE_OVL2CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL2CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL2CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL2CONF2_OPQ_SHIFT 9
+#define MCDE_OVL2CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL2CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, OPQ, __x)
+#define MCDE_OVL2CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL2CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL2CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, PIXOFF, __x)
+#define MCDE_OVL2CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL2CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL2CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL3CONF2 0x00000468
+#define MCDE_OVL3CONF2_BP_SHIFT 0
+#define MCDE_OVL3CONF2_BP_MASK 0x00000001
+#define MCDE_OVL3CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL3CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL3CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, BP, MCDE_OVL3CONF2_BP_##__x)
+#define MCDE_OVL3CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, BP, __x)
+#define MCDE_OVL3CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL3CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL3CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL3CONF2_OPQ_SHIFT 9
+#define MCDE_OVL3CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL3CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, OPQ, __x)
+#define MCDE_OVL3CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL3CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL3CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, PIXOFF, __x)
+#define MCDE_OVL3CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL3CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL3CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL4CONF2 0x00000488
+#define MCDE_OVL4CONF2_BP_SHIFT 0
+#define MCDE_OVL4CONF2_BP_MASK 0x00000001
+#define MCDE_OVL4CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL4CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL4CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, BP, MCDE_OVL4CONF2_BP_##__x)
+#define MCDE_OVL4CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, BP, __x)
+#define MCDE_OVL4CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL4CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL4CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL4CONF2_OPQ_SHIFT 9
+#define MCDE_OVL4CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL4CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, OPQ, __x)
+#define MCDE_OVL4CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL4CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL4CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, PIXOFF, __x)
+#define MCDE_OVL4CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL4CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL4CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL5CONF2 0x000004A8
+#define MCDE_OVL5CONF2_BP_SHIFT 0
+#define MCDE_OVL5CONF2_BP_MASK 0x00000001
+#define MCDE_OVL5CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL5CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL5CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, BP, MCDE_OVL5CONF2_BP_##__x)
+#define MCDE_OVL5CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, BP, __x)
+#define MCDE_OVL5CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL5CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL5CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL5CONF2_OPQ_SHIFT 9
+#define MCDE_OVL5CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL5CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, OPQ, __x)
+#define MCDE_OVL5CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL5CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL5CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, PIXOFF, __x)
+#define MCDE_OVL5CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL5CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL5CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL0LJINC 0x0000040C
+#define MCDE_OVL0LJINC_GROUPOFFSET 0x20
+#define MCDE_OVL0LJINC_LJINC_SHIFT 0
+#define MCDE_OVL0LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL0LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL0LJINC, LJINC, __x)
+#define MCDE_OVL1LJINC 0x0000042C
+#define MCDE_OVL1LJINC_LJINC_SHIFT 0
+#define MCDE_OVL1LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL1LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL1LJINC, LJINC, __x)
+#define MCDE_OVL2LJINC 0x0000044C
+#define MCDE_OVL2LJINC_LJINC_SHIFT 0
+#define MCDE_OVL2LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL2LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL2LJINC, LJINC, __x)
+#define MCDE_OVL3LJINC 0x0000046C
+#define MCDE_OVL3LJINC_LJINC_SHIFT 0
+#define MCDE_OVL3LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL3LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL3LJINC, LJINC, __x)
+#define MCDE_OVL4LJINC 0x0000048C
+#define MCDE_OVL4LJINC_LJINC_SHIFT 0
+#define MCDE_OVL4LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL4LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL4LJINC, LJINC, __x)
+#define MCDE_OVL5LJINC 0x000004AC
+#define MCDE_OVL5LJINC_LJINC_SHIFT 0
+#define MCDE_OVL5LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL5LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL5LJINC, LJINC, __x)
+#define MCDE_OVL0CROP 0x00000410
+#define MCDE_OVL0CROP_GROUPOFFSET 0x20
+#define MCDE_OVL0CROP_TMRGN_SHIFT 0
+#define MCDE_OVL0CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL0CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CROP, TMRGN, __x)
+#define MCDE_OVL0CROP_LMRGN_SHIFT 22
+#define MCDE_OVL0CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL0CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CROP, LMRGN, __x)
+#define MCDE_OVL1CROP 0x00000430
+#define MCDE_OVL1CROP_TMRGN_SHIFT 0
+#define MCDE_OVL1CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL1CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CROP, TMRGN, __x)
+#define MCDE_OVL1CROP_LMRGN_SHIFT 22
+#define MCDE_OVL1CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL1CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CROP, LMRGN, __x)
+#define MCDE_OVL2CROP 0x00000450
+#define MCDE_OVL2CROP_TMRGN_SHIFT 0
+#define MCDE_OVL2CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL2CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CROP, TMRGN, __x)
+#define MCDE_OVL2CROP_LMRGN_SHIFT 22
+#define MCDE_OVL2CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL2CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CROP, LMRGN, __x)
+#define MCDE_OVL3CROP 0x00000470
+#define MCDE_OVL3CROP_TMRGN_SHIFT 0
+#define MCDE_OVL3CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL3CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CROP, TMRGN, __x)
+#define MCDE_OVL3CROP_LMRGN_SHIFT 22
+#define MCDE_OVL3CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL3CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CROP, LMRGN, __x)
+#define MCDE_OVL4CROP 0x00000490
+#define MCDE_OVL4CROP_TMRGN_SHIFT 0
+#define MCDE_OVL4CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL4CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CROP, TMRGN, __x)
+#define MCDE_OVL4CROP_LMRGN_SHIFT 22
+#define MCDE_OVL4CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL4CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CROP, LMRGN, __x)
+#define MCDE_OVL5CROP 0x000004B0
+#define MCDE_OVL5CROP_TMRGN_SHIFT 0
+#define MCDE_OVL5CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL5CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CROP, TMRGN, __x)
+#define MCDE_OVL5CROP_LMRGN_SHIFT 22
+#define MCDE_OVL5CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL5CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CROP, LMRGN, __x)
+#define MCDE_OVL0COMP 0x00000414
+#define MCDE_OVL0COMP_GROUPOFFSET 0x20
+#define MCDE_OVL0COMP_XPOS_SHIFT 0
+#define MCDE_OVL0COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL0COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL0COMP, XPOS, __x)
+#define MCDE_OVL0COMP_CH_ID_SHIFT 11
+#define MCDE_OVL0COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL0COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL0COMP, CH_ID, __x)
+#define MCDE_OVL0COMP_YPOS_SHIFT 16
+#define MCDE_OVL0COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL0COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL0COMP, YPOS, __x)
+#define MCDE_OVL0COMP_Z_SHIFT 27
+#define MCDE_OVL0COMP_Z_MASK 0x78000000
+#define MCDE_OVL0COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL0COMP, Z, __x)
+#define MCDE_OVL1COMP 0x00000434
+#define MCDE_OVL1COMP_XPOS_SHIFT 0
+#define MCDE_OVL1COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL1COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL1COMP, XPOS, __x)
+#define MCDE_OVL1COMP_CH_ID_SHIFT 11
+#define MCDE_OVL1COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL1COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL1COMP, CH_ID, __x)
+#define MCDE_OVL1COMP_YPOS_SHIFT 16
+#define MCDE_OVL1COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL1COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL1COMP, YPOS, __x)
+#define MCDE_OVL1COMP_Z_SHIFT 27
+#define MCDE_OVL1COMP_Z_MASK 0x78000000
+#define MCDE_OVL1COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL1COMP, Z, __x)
+#define MCDE_OVL2COMP 0x00000454
+#define MCDE_OVL2COMP_XPOS_SHIFT 0
+#define MCDE_OVL2COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL2COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL2COMP, XPOS, __x)
+#define MCDE_OVL2COMP_CH_ID_SHIFT 11
+#define MCDE_OVL2COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL2COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL2COMP, CH_ID, __x)
+#define MCDE_OVL2COMP_YPOS_SHIFT 16
+#define MCDE_OVL2COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL2COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL2COMP, YPOS, __x)
+#define MCDE_OVL2COMP_Z_SHIFT 27
+#define MCDE_OVL2COMP_Z_MASK 0x78000000
+#define MCDE_OVL2COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL2COMP, Z, __x)
+#define MCDE_OVL3COMP 0x00000474
+#define MCDE_OVL3COMP_XPOS_SHIFT 0
+#define MCDE_OVL3COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL3COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL3COMP, XPOS, __x)
+#define MCDE_OVL3COMP_CH_ID_SHIFT 11
+#define MCDE_OVL3COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL3COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL3COMP, CH_ID, __x)
+#define MCDE_OVL3COMP_YPOS_SHIFT 16
+#define MCDE_OVL3COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL3COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL3COMP, YPOS, __x)
+#define MCDE_OVL3COMP_Z_SHIFT 27
+#define MCDE_OVL3COMP_Z_MASK 0x78000000
+#define MCDE_OVL3COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL3COMP, Z, __x)
+#define MCDE_OVL4COMP 0x00000494
+#define MCDE_OVL4COMP_XPOS_SHIFT 0
+#define MCDE_OVL4COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL4COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL4COMP, XPOS, __x)
+#define MCDE_OVL4COMP_CH_ID_SHIFT 11
+#define MCDE_OVL4COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL4COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL4COMP, CH_ID, __x)
+#define MCDE_OVL4COMP_YPOS_SHIFT 16
+#define MCDE_OVL4COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL4COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL4COMP, YPOS, __x)
+#define MCDE_OVL4COMP_Z_SHIFT 27
+#define MCDE_OVL4COMP_Z_MASK 0x78000000
+#define MCDE_OVL4COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL4COMP, Z, __x)
+#define MCDE_OVL5COMP 0x000004B4
+#define MCDE_OVL5COMP_XPOS_SHIFT 0
+#define MCDE_OVL5COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL5COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL5COMP, XPOS, __x)
+#define MCDE_OVL5COMP_CH_ID_SHIFT 11
+#define MCDE_OVL5COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL5COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL5COMP, CH_ID, __x)
+#define MCDE_OVL5COMP_YPOS_SHIFT 16
+#define MCDE_OVL5COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL5COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL5COMP, YPOS, __x)
+#define MCDE_OVL5COMP_Z_SHIFT 27
+#define MCDE_OVL5COMP_Z_MASK 0x78000000
+#define MCDE_OVL5COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL5COMP, Z, __x)
+#define MCDE_CHNL0CONF 0x00000600
+#define MCDE_CHNL0CONF_GROUPOFFSET 0x20
+#define MCDE_CHNL0CONF_PPL_SHIFT 0
+#define MCDE_CHNL0CONF_PPL_MASK 0x000007FF
+#define MCDE_CHNL0CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0CONF, PPL, __x)
+#define MCDE_CHNL0CONF_LPF_SHIFT 16
+#define MCDE_CHNL0CONF_LPF_MASK 0x07FF0000
+#define MCDE_CHNL0CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0CONF, LPF, __x)
+#define MCDE_CHNL1CONF 0x00000620
+#define MCDE_CHNL1CONF_PPL_SHIFT 0
+#define MCDE_CHNL1CONF_PPL_MASK 0x000007FF
+#define MCDE_CHNL1CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1CONF, PPL, __x)
+#define MCDE_CHNL1CONF_LPF_SHIFT 16
+#define MCDE_CHNL1CONF_LPF_MASK 0x07FF0000
+#define MCDE_CHNL1CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1CONF, LPF, __x)
+#define MCDE_CHNL2CONF 0x00000640
+#define MCDE_CHNL2CONF_PPL_SHIFT 0
+#define MCDE_CHNL2CONF_PPL_MASK 0x000007FF
+#define MCDE_CHNL2CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2CONF, PPL, __x)
+#define MCDE_CHNL2CONF_LPF_SHIFT 16
+#define MCDE_CHNL2CONF_LPF_MASK 0x07FF0000
+#define MCDE_CHNL2CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2CONF, LPF, __x)
+#define MCDE_CHNL3CONF 0x00000660
+#define MCDE_CHNL3CONF_PPL_SHIFT 0
+#define MCDE_CHNL3CONF_PPL_MASK 0x000007FF
+#define MCDE_CHNL3CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3CONF, PPL, __x)
+#define MCDE_CHNL3CONF_LPF_SHIFT 16
+#define MCDE_CHNL3CONF_LPF_MASK 0x07FF0000
+#define MCDE_CHNL3CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3CONF, LPF, __x)
+#define MCDE_CHNL0STAT 0x00000604
+#define MCDE_CHNL0STAT_GROUPOFFSET 0x20
+#define MCDE_CHNL0STAT_CHNLRD_SHIFT 0
+#define MCDE_CHNL0STAT_CHNLRD_MASK 0x00000001
+#define MCDE_CHNL0STAT_CHNLRD(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0STAT, CHNLRD, __x)
+#define MCDE_CHNL0STAT_CHNLA_SHIFT 1
+#define MCDE_CHNL0STAT_CHNLA_MASK 0x00000002
+#define MCDE_CHNL0STAT_CHNLA(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0STAT, CHNLA, __x)
+#define MCDE_CHNL0STAT_CHNLBLBCKGND_EN_SHIFT 16
+#define MCDE_CHNL0STAT_CHNLBLBCKGND_EN_MASK 0x00010000
+#define MCDE_CHNL0STAT_CHNLBLBCKGND_EN(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0STAT, CHNLBLBCKGND_EN, __x)
+#define MCDE_CHNL1STAT 0x00000624
+#define MCDE_CHNL1STAT_CHNLRD_SHIFT 0
+#define MCDE_CHNL1STAT_CHNLRD_MASK 0x00000001
+#define MCDE_CHNL1STAT_CHNLRD(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1STAT, CHNLRD, __x)
+#define MCDE_CHNL1STAT_CHNLA_SHIFT 1
+#define MCDE_CHNL1STAT_CHNLA_MASK 0x00000002
+#define MCDE_CHNL1STAT_CHNLA(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1STAT, CHNLA, __x)
+#define MCDE_CHNL1STAT_CHNLBLBCKGND_EN_SHIFT 16
+#define MCDE_CHNL1STAT_CHNLBLBCKGND_EN_MASK 0x00010000
+#define MCDE_CHNL1STAT_CHNLBLBCKGND_EN(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1STAT, CHNLBLBCKGND_EN, __x)
+#define MCDE_CHNL2STAT 0x00000644
+#define MCDE_CHNL2STAT_CHNLRD_SHIFT 0
+#define MCDE_CHNL2STAT_CHNLRD_MASK 0x00000001
+#define MCDE_CHNL2STAT_CHNLRD(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2STAT, CHNLRD, __x)
+#define MCDE_CHNL2STAT_CHNLA_SHIFT 1
+#define MCDE_CHNL2STAT_CHNLA_MASK 0x00000002
+#define MCDE_CHNL2STAT_CHNLA(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2STAT, CHNLA, __x)
+#define MCDE_CHNL2STAT_CHNLBLBCKGND_EN_SHIFT 16
+#define MCDE_CHNL2STAT_CHNLBLBCKGND_EN_MASK 0x00010000
+#define MCDE_CHNL2STAT_CHNLBLBCKGND_EN(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2STAT, CHNLBLBCKGND_EN, __x)
+#define MCDE_CHNL3STAT 0x00000664
+#define MCDE_CHNL3STAT_CHNLRD_SHIFT 0
+#define MCDE_CHNL3STAT_CHNLRD_MASK 0x00000001
+#define MCDE_CHNL3STAT_CHNLRD(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3STAT, CHNLRD, __x)
+#define MCDE_CHNL3STAT_CHNLA_SHIFT 1
+#define MCDE_CHNL3STAT_CHNLA_MASK 0x00000002
+#define MCDE_CHNL3STAT_CHNLA(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3STAT, CHNLA, __x)
+#define MCDE_CHNL3STAT_CHNLBLBCKGND_EN_SHIFT 16
+#define MCDE_CHNL3STAT_CHNLBLBCKGND_EN_MASK 0x00010000
+#define MCDE_CHNL3STAT_CHNLBLBCKGND_EN(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3STAT, CHNLBLBCKGND_EN, __x)
+#define MCDE_CHNL0SYNCHMOD 0x00000608
+#define MCDE_CHNL0SYNCHMOD_GROUPOFFSET 0x20
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_OUTPUT 0
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_AUTO 1
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_EXTERNAL 3
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, SRC_SYNCH, \
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_##__x)
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, SRC_SYNCH, __x)
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_VSYNC0 1
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_VSYNC1 2
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, OUT_SYNCH_SRC, \
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_##__x)
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, OUT_SYNCH_SRC, __x)
+#define MCDE_CHNL1SYNCHMOD 0x00000628
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_OUTPUT 0
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_AUTO 1
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_EXTERNAL 3
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, SRC_SYNCH, \
+ MCDE_CHNL1SYNCHMOD_SRC_SYNCH_##__x)
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, SRC_SYNCH, __x)
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_VSYNC0 1
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_VSYNC1 2
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, OUT_SYNCH_SRC, \
+ MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_##__x)
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, OUT_SYNCH_SRC, __x)
+#define MCDE_CHNL2SYNCHMOD 0x00000648
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_OUTPUT 0
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_AUTO 1
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_EXTERNAL 3
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, SRC_SYNCH, \
+ MCDE_CHNL2SYNCHMOD_SRC_SYNCH_##__x)
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, SRC_SYNCH, __x)
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_VSYNC0 1
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_VSYNC1 2
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, OUT_SYNCH_SRC, \
+ MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_##__x)
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, OUT_SYNCH_SRC, __x)
+#define MCDE_CHNL3SYNCHMOD 0x00000668
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_OUTPUT 0
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_AUTO 1
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_EXTERNAL 3
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, SRC_SYNCH, \
+ MCDE_CHNL3SYNCHMOD_SRC_SYNCH_##__x)
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, SRC_SYNCH, __x)
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_VSYNC0 1
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_VSYNC1 2
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, OUT_SYNCH_SRC, \
+ MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_##__x)
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, OUT_SYNCH_SRC, __x)
+#define MCDE_CHNL0SYNCHSW 0x0000060C
+#define MCDE_CHNL0SYNCHSW_GROUPOFFSET 0x20
+#define MCDE_CHNL0SYNCHSW_SW_TRIG_SHIFT 0
+#define MCDE_CHNL0SYNCHSW_SW_TRIG_MASK 0x00000001
+#define MCDE_CHNL0SYNCHSW_SW_TRIG(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHSW, SW_TRIG, __x)
+#define MCDE_CHNL1SYNCHSW 0x0000062C
+#define MCDE_CHNL1SYNCHSW_SW_TRIG_SHIFT 0
+#define MCDE_CHNL1SYNCHSW_SW_TRIG_MASK 0x00000001
+#define MCDE_CHNL1SYNCHSW_SW_TRIG(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHSW, SW_TRIG, __x)
+#define MCDE_CHNL2SYNCHSW 0x0000064C
+#define MCDE_CHNL2SYNCHSW_SW_TRIG_SHIFT 0
+#define MCDE_CHNL2SYNCHSW_SW_TRIG_MASK 0x00000001
+#define MCDE_CHNL2SYNCHSW_SW_TRIG(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHSW, SW_TRIG, __x)
+#define MCDE_CHNL3SYNCHSW 0x0000066C
+#define MCDE_CHNL3SYNCHSW_SW_TRIG_SHIFT 0
+#define MCDE_CHNL3SYNCHSW_SW_TRIG_MASK 0x00000001
+#define MCDE_CHNL3SYNCHSW_SW_TRIG(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHSW, SW_TRIG, __x)
+#define MCDE_CHNL0BCKGNDCOL 0x00000610
+#define MCDE_CHNL0BCKGNDCOL_GROUPOFFSET 0x20
+#define MCDE_CHNL0BCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNL0BCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNL0BCKGNDCOL_B(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0BCKGNDCOL, B, __x)
+#define MCDE_CHNL0BCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNL0BCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNL0BCKGNDCOL_G(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0BCKGNDCOL, G, __x)
+#define MCDE_CHNL0BCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNL0BCKGNDCOL_R_MASK 0x00FF0000
+#define MCDE_CHNL0BCKGNDCOL_R(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0BCKGNDCOL, R, __x)
+#define MCDE_CHNL1BCKGNDCOL 0x00000630
+#define MCDE_CHNL1BCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNL1BCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNL1BCKGNDCOL_B(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1BCKGNDCOL, B, __x)
+#define MCDE_CHNL1BCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNL1BCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNL1BCKGNDCOL_G(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1BCKGNDCOL, G, __x)
+#define MCDE_CHNL1BCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNL1BCKGNDCOL_R_MASK 0x00FF0000
+#define MCDE_CHNL1BCKGNDCOL_R(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1BCKGNDCOL, R, __x)
+#define MCDE_CHNL2BCKGNDCOL 0x00000650
+#define MCDE_CHNL2BCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNL2BCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNL2BCKGNDCOL_B(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2BCKGNDCOL, B, __x)
+#define MCDE_CHNL2BCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNL2BCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNL2BCKGNDCOL_G(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2BCKGNDCOL, G, __x)
+#define MCDE_CHNL2BCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNL2BCKGNDCOL_R_MASK 0x00FF0000
+#define MCDE_CHNL2BCKGNDCOL_R(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2BCKGNDCOL, R, __x)
+#define MCDE_CHNL3BCKGNDCOL 0x00000670
+#define MCDE_CHNL3BCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNL3BCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNL3BCKGNDCOL_B(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3BCKGNDCOL, B, __x)
+#define MCDE_CHNL3BCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNL3BCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNL3BCKGNDCOL_G(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3BCKGNDCOL, G, __x)
+#define MCDE_CHNL3BCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNL3BCKGNDCOL_R_MASK 0x00FF0000
+#define MCDE_CHNL3BCKGNDCOL_R(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3BCKGNDCOL, R, __x)
+#define MCDE_CHNL0PRIO_V1 0x00000614
+#define MCDE_CHNL0PRIO_V1_GROUPOFFSET 0x20
+#define MCDE_CHNL0PRIO_V1_CHNLPRIO_SHIFT 0
+#define MCDE_CHNL0PRIO_V1_CHNLPRIO_MASK 0x0000000F
+#define MCDE_CHNL0PRIO_V1_CHNLPRIO(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0PRIO_V1, CHNLPRIO, __x)
+#define MCDE_CHNL1PRIO_V1 0x00000634
+#define MCDE_CHNL1PRIO_V1_CHNLPRIO_SHIFT 0
+#define MCDE_CHNL1PRIO_V1_CHNLPRIO_MASK 0x0000000F
+#define MCDE_CHNL1PRIO_V1_CHNLPRIO(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1PRIO_V1, CHNLPRIO, __x)
+#define MCDE_CHNL2PRIO_V1 0x00000654
+#define MCDE_CHNL2PRIO_V1_CHNLPRIO_SHIFT 0
+#define MCDE_CHNL2PRIO_V1_CHNLPRIO_MASK 0x0000000F
+#define MCDE_CHNL2PRIO_V1_CHNLPRIO(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2PRIO_V1, CHNLPRIO, __x)
+#define MCDE_CHNL3PRIO_V1 0x00000674
+#define MCDE_CHNL3PRIO_V1_CHNLPRIO_SHIFT 0
+#define MCDE_CHNL3PRIO_V1_CHNLPRIO_MASK 0x0000000F
+#define MCDE_CHNL3PRIO_V1_CHNLPRIO(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3PRIO_V1, CHNLPRIO, __x)
+#define MCDE_CHNL0MUXING_V2 0x00000614
+#define MCDE_CHNL0MUXING_V2_GROUPOFFSET 0x20
+#define MCDE_CHNL0MUXING_V2_FIFO_ID_SHIFT 0
+#define MCDE_CHNL0MUXING_V2_FIFO_ID_MASK 0x00000007
+#define MCDE_CHNL0MUXING_V2_FIFO_ID_FIFO_A 0
+#define MCDE_CHNL0MUXING_V2_FIFO_ID_FIFO_B 1
+#define MCDE_CHNL0MUXING_V2_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNL0MUXING_V2_FIFO_ID_FIFO_C1 3
+#define MCDE_CHNL0MUXING_V2_FIFO_ID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0MUXING_V2, FIFO_ID, \
+ MCDE_CHNL0MUXING_V2_FIFO_ID_##__x)
+#define MCDE_CHNL0MUXING_V2_FIFO_ID(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0MUXING_V2, FIFO_ID, __x)
+#define MCDE_CHNL1MUXING_V2 0x00000634
+#define MCDE_CHNL1MUXING_V2_FIFO_ID_SHIFT 0
+#define MCDE_CHNL1MUXING_V2_FIFO_ID_MASK 0x00000007
+#define MCDE_CHNL1MUXING_V2_FIFO_ID_FIFO_A 0
+#define MCDE_CHNL1MUXING_V2_FIFO_ID_FIFO_B 1
+#define MCDE_CHNL1MUXING_V2_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNL1MUXING_V2_FIFO_ID_FIFO_C1 3
+#define MCDE_CHNL1MUXING_V2_FIFO_ID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1MUXING_V2, FIFO_ID, \
+ MCDE_CHNL1MUXING_V2_FIFO_ID_##__x)
+#define MCDE_CHNL1MUXING_V2_FIFO_ID(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1MUXING_V2, FIFO_ID, __x)
+#define MCDE_CHNL2MUXING_V2 0x00000654
+#define MCDE_CHNL2MUXING_V2_FIFO_ID_SHIFT 0
+#define MCDE_CHNL2MUXING_V2_FIFO_ID_MASK 0x00000007
+#define MCDE_CHNL2MUXING_V2_FIFO_ID_FIFO_A 0
+#define MCDE_CHNL2MUXING_V2_FIFO_ID_FIFO_B 1
+#define MCDE_CHNL2MUXING_V2_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNL2MUXING_V2_FIFO_ID_FIFO_C1 3
+#define MCDE_CHNL2MUXING_V2_FIFO_ID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2MUXING_V2, FIFO_ID, \
+ MCDE_CHNL2MUXING_V2_FIFO_ID_##__x)
+#define MCDE_CHNL2MUXING_V2_FIFO_ID(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2MUXING_V2, FIFO_ID, __x)
+#define MCDE_CHNL3MUXING_V2 0x00000674
+#define MCDE_CHNL3MUXING_V2_FIFO_ID_SHIFT 0
+#define MCDE_CHNL3MUXING_V2_FIFO_ID_MASK 0x00000007
+#define MCDE_CHNL3MUXING_V2_FIFO_ID_FIFO_A 0
+#define MCDE_CHNL3MUXING_V2_FIFO_ID_FIFO_B 1
+#define MCDE_CHNL3MUXING_V2_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNL3MUXING_V2_FIFO_ID_FIFO_C1 3
+#define MCDE_CHNL3MUXING_V2_FIFO_ID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3MUXING_V2, FIFO_ID, \
+ MCDE_CHNL3MUXING_V2_FIFO_ID_##__x)
+#define MCDE_CHNL3MUXING_V2_FIFO_ID(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3MUXING_V2, FIFO_ID, __x)
+#define MCDE_CRA0 0x00000800
+#define MCDE_CRA0_GROUPOFFSET 0x200
+#define MCDE_CRA0_FLOEN_SHIFT 0
+#define MCDE_CRA0_FLOEN_MASK 0x00000001
+#define MCDE_CRA0_FLOEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLOEN, __x)
+#define MCDE_CRA0_POWEREN_SHIFT 1
+#define MCDE_CRA0_POWEREN_MASK 0x00000002
+#define MCDE_CRA0_POWEREN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, POWEREN, __x)
+#define MCDE_CRA0_BLENDEN_SHIFT 2
+#define MCDE_CRA0_BLENDEN_MASK 0x00000004
+#define MCDE_CRA0_BLENDEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, BLENDEN, __x)
+#define MCDE_CRA0_AFLICKEN_SHIFT 3
+#define MCDE_CRA0_AFLICKEN_MASK 0x00000008
+#define MCDE_CRA0_AFLICKEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, AFLICKEN, __x)
+#define MCDE_CRA0_PALEN_SHIFT 4
+#define MCDE_CRA0_PALEN_MASK 0x00000010
+#define MCDE_CRA0_PALEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, PALEN, __x)
+#define MCDE_CRA0_DITHEN_SHIFT 5
+#define MCDE_CRA0_DITHEN_MASK 0x00000020
+#define MCDE_CRA0_DITHEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, DITHEN, __x)
+#define MCDE_CRA0_GAMEN_SHIFT 6
+#define MCDE_CRA0_GAMEN_MASK 0x00000040
+#define MCDE_CRA0_GAMEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, GAMEN, __x)
+#define MCDE_CRA0_KEYCTRL_SHIFT 7
+#define MCDE_CRA0_KEYCTRL_MASK 0x00000380
+#define MCDE_CRA0_KEYCTRL_OFF 0
+#define MCDE_CRA0_KEYCTRL_ALPHA_RGB 1
+#define MCDE_CRA0_KEYCTRL_RGB 2
+#define MCDE_CRA0_KEYCTRL_FALPHA_FRGB 4
+#define MCDE_CRA0_KEYCTRL_FRGB 5
+#define MCDE_CRA0_KEYCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, KEYCTRL, MCDE_CRA0_KEYCTRL_##__x)
+#define MCDE_CRA0_KEYCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, KEYCTRL, __x)
+#define MCDE_CRA0_BLENDCTRL_SHIFT 10
+#define MCDE_CRA0_BLENDCTRL_MASK 0x00000400
+#define MCDE_CRA0_BLENDCTRL_SOURCE 0
+#define MCDE_CRA0_BLENDCTRL_CONSTANT 1
+#define MCDE_CRA0_BLENDCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, BLENDCTRL, MCDE_CRA0_BLENDCTRL_##__x)
+#define MCDE_CRA0_BLENDCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, BLENDCTRL, __x)
+#define MCDE_CRA0_FLICKMODE_SHIFT 11
+#define MCDE_CRA0_FLICKMODE_MASK 0x00001800
+#define MCDE_CRA0_FLICKMODE_FORCE_FILTER_0 0
+#define MCDE_CRA0_FLICKMODE_ADAPTIVE 1
+#define MCDE_CRA0_FLICKMODE_TEST_MODE 2
+#define MCDE_CRA0_FLICKMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLICKMODE, MCDE_CRA0_FLICKMODE_##__x)
+#define MCDE_CRA0_FLICKMODE(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLICKMODE, __x)
+#define MCDE_CRA0_FLOCKFORMAT_SHIFT 13
+#define MCDE_CRA0_FLOCKFORMAT_MASK 0x00002000
+#define MCDE_CRA0_FLOCKFORMAT_YCBCR 0
+#define MCDE_CRA0_FLOCKFORMAT_RGB 1
+#define MCDE_CRA0_FLOCKFORMAT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLOCKFORMAT, MCDE_CRA0_FLOCKFORMAT_##__x)
+#define MCDE_CRA0_FLOCKFORMAT(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLOCKFORMAT, __x)
+#define MCDE_CRA0_PALMODE_SHIFT 14
+#define MCDE_CRA0_PALMODE_MASK 0x00004000
+#define MCDE_CRA0_PALMODE_PALETTE 0
+#define MCDE_CRA0_PALMODE_GAMMA 1
+#define MCDE_CRA0_PALMODE(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, PALMODE, __x)
+#define MCDE_CRA0_OLEDEN_SHIFT 15
+#define MCDE_CRA0_OLEDEN_MASK 0x00008000
+#define MCDE_CRA0_OLEDEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, OLEDEN, __x)
+#define MCDE_CRA0_ALPHABLEND_SHIFT 16
+#define MCDE_CRA0_ALPHABLEND_MASK 0x00FF0000
+#define MCDE_CRA0_ALPHABLEND(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, ALPHABLEND, __x)
+#define MCDE_CRA0_ROTEN_SHIFT 24
+#define MCDE_CRA0_ROTEN_MASK 0x01000000
+#define MCDE_CRA0_ROTEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, ROTEN, __x)
+#define MCDE_CRA0_ROTBURSTSIZE_V1_SHIFT 25
+#define MCDE_CRA0_ROTBURSTSIZE_V1_MASK 0x0E000000
+#define MCDE_CRA0_ROTBURSTSIZE_V1_1W 0
+#define MCDE_CRA0_ROTBURSTSIZE_V1_2W 1
+#define MCDE_CRA0_ROTBURSTSIZE_V1_4W 2
+#define MCDE_CRA0_ROTBURSTSIZE_V1_8W 3
+#define MCDE_CRA0_ROTBURSTSIZE_V1_16W 4
+#define MCDE_CRA0_ROTBURSTSIZE_V1_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, ROTBURSTSIZE_V1, \
+ MCDE_CRA0_ROTBURSTSIZE_V1_##__x)
+#define MCDE_CRA0_ROTBURSTSIZE_V1(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, ROTBURSTSIZE_V1, __x)
+#define MCDE_CRA0_ROTBURSTSIZE_HW_V1_SHIFT 28
+#define MCDE_CRA0_ROTBURSTSIZE_HW_V1_MASK 0x10000000
+#define MCDE_CRA0_ROTBURSTSIZE_HW_V1(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, ROTBURSTSIZE_HW_V1, __x)
+#define MCDE_CRB0 0x00000A00
+#define MCDE_CRB0_FLOEN_SHIFT 0
+#define MCDE_CRB0_FLOEN_MASK 0x00000001
+#define MCDE_CRB0_FLOEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLOEN, __x)
+#define MCDE_CRB0_POWEREN_SHIFT 1
+#define MCDE_CRB0_POWEREN_MASK 0x00000002
+#define MCDE_CRB0_POWEREN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, POWEREN, __x)
+#define MCDE_CRB0_BLENDEN_SHIFT 2
+#define MCDE_CRB0_BLENDEN_MASK 0x00000004
+#define MCDE_CRB0_BLENDEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, BLENDEN, __x)
+#define MCDE_CRB0_AFLICKEN_SHIFT 3
+#define MCDE_CRB0_AFLICKEN_MASK 0x00000008
+#define MCDE_CRB0_AFLICKEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, AFLICKEN, __x)
+#define MCDE_CRB0_PALEN_SHIFT 4
+#define MCDE_CRB0_PALEN_MASK 0x00000010
+#define MCDE_CRB0_PALEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, PALEN, __x)
+#define MCDE_CRB0_DITHEN_SHIFT 5
+#define MCDE_CRB0_DITHEN_MASK 0x00000020
+#define MCDE_CRB0_DITHEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, DITHEN, __x)
+#define MCDE_CRB0_GAMEN_SHIFT 6
+#define MCDE_CRB0_GAMEN_MASK 0x00000040
+#define MCDE_CRB0_GAMEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, GAMEN, __x)
+#define MCDE_CRB0_KEYCTRL_SHIFT 7
+#define MCDE_CRB0_KEYCTRL_MASK 0x00000380
+#define MCDE_CRB0_KEYCTRL_OFF 0
+#define MCDE_CRB0_KEYCTRL_ALPHA_RGB 1
+#define MCDE_CRB0_KEYCTRL_RGB 2
+#define MCDE_CRB0_KEYCTRL_FALPHA_FRGB 4
+#define MCDE_CRB0_KEYCTRL_FRGB 5
+#define MCDE_CRB0_KEYCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, KEYCTRL, MCDE_CRB0_KEYCTRL_##__x)
+#define MCDE_CRB0_KEYCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, KEYCTRL, __x)
+#define MCDE_CRB0_BLENDCTRL_SHIFT 10
+#define MCDE_CRB0_BLENDCTRL_MASK 0x00000400
+#define MCDE_CRB0_BLENDCTRL_SOURCE 0
+#define MCDE_CRB0_BLENDCTRL_CONSTANT 1
+#define MCDE_CRB0_BLENDCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, BLENDCTRL, MCDE_CRB0_BLENDCTRL_##__x)
+#define MCDE_CRB0_BLENDCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, BLENDCTRL, __x)
+#define MCDE_CRB0_FLICKMODE_SHIFT 11
+#define MCDE_CRB0_FLICKMODE_MASK 0x00001800
+#define MCDE_CRB0_FLICKMODE_FORCE_FILTER_0 0
+#define MCDE_CRB0_FLICKMODE_ADAPTIVE 1
+#define MCDE_CRB0_FLICKMODE_TEST_MODE 2
+#define MCDE_CRB0_FLICKMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLICKMODE, MCDE_CRB0_FLICKMODE_##__x)
+#define MCDE_CRB0_FLICKMODE(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLICKMODE, __x)
+#define MCDE_CRB0_FLOCKFORMAT_SHIFT 13
+#define MCDE_CRB0_FLOCKFORMAT_MASK 0x00002000
+#define MCDE_CRB0_FLOCKFORMAT_YCBCR 0
+#define MCDE_CRB0_FLOCKFORMAT_RGB 1
+#define MCDE_CRB0_FLOCKFORMAT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLOCKFORMAT, MCDE_CRB0_FLOCKFORMAT_##__x)
+#define MCDE_CRB0_FLOCKFORMAT(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLOCKFORMAT, __x)
+#define MCDE_CRB0_PALMODE_SHIFT 14
+#define MCDE_CRB0_PALMODE_MASK 0x00004000
+#define MCDE_CRB0_PALMODE_PALETTE 0
+#define MCDE_CRB0_PALMODE_GAMMA 1
+#define MCDE_CRB0_PALMODE(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, PALMODE, __x)
+#define MCDE_CRB0_OLEDEN_SHIFT 15
+#define MCDE_CRB0_OLEDEN_MASK 0x00008000
+#define MCDE_CRB0_OLEDEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, OLEDEN, __x)
+#define MCDE_CRB0_ALPHABLEND_SHIFT 16
+#define MCDE_CRB0_ALPHABLEND_MASK 0x00FF0000
+#define MCDE_CRB0_ALPHABLEND(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, ALPHABLEND, __x)
+#define MCDE_CRB0_ROTEN_SHIFT 24
+#define MCDE_CRB0_ROTEN_MASK 0x01000000
+#define MCDE_CRB0_ROTEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, ROTEN, __x)
+#define MCDE_CRB0_ROTBURSTSIZE_V1_SHIFT 25
+#define MCDE_CRB0_ROTBURSTSIZE_V1_MASK 0x0E000000
+#define MCDE_CRB0_ROTBURSTSIZE_V1_1W 0
+#define MCDE_CRB0_ROTBURSTSIZE_V1_2W 1
+#define MCDE_CRB0_ROTBURSTSIZE_V1_4W 2
+#define MCDE_CRB0_ROTBURSTSIZE_V1_8W 3
+#define MCDE_CRB0_ROTBURSTSIZE_V1_16W 4
+#define MCDE_CRB0_ROTBURSTSIZE_V1_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, ROTBURSTSIZE_V1, \
+ MCDE_CRB0_ROTBURSTSIZE_V1_##__x)
+#define MCDE_CRB0_ROTBURSTSIZE_V1(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, ROTBURSTSIZE_V1, __x)
+#define MCDE_CRB0_ROTBURSTSIZE_HW_V1_SHIFT 28
+#define MCDE_CRB0_ROTBURSTSIZE_HW_V1_MASK 0x10000000
+#define MCDE_CRB0_ROTBURSTSIZE_HW_V1(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, ROTBURSTSIZE_HW_V1, __x)
+#define MCDE_CRA1 0x00000804
+#define MCDE_CRA1_GROUPOFFSET 0x200
+#define MCDE_CRA1_PCD_SHIFT 0
+#define MCDE_CRA1_PCD_MASK 0x000003FF
+#define MCDE_CRA1_PCD(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, PCD, __x)
+#define MCDE_CRA1_CLKSEL_SHIFT 10
+#define MCDE_CRA1_CLKSEL_MASK 0x00001C00
+#define MCDE_CRA1_CLKSEL_LCD 0
+#define MCDE_CRA1_CLKSEL_HDMI 1
+#define MCDE_CRA1_CLKSEL_TV 2
+#define MCDE_CRA1_CLKSEL_EXT_TV1 3
+#define MCDE_CRA1_CLKSEL_EXT_TV2 4
+#define MCDE_CRA1_CLKSEL_166MHZ 5
+#define MCDE_CRA1_CLKSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CLKSEL, MCDE_CRA1_CLKSEL_##__x)
+#define MCDE_CRA1_CLKSEL(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CLKSEL, __x)
+#define MCDE_CRA1_CDWIN_SHIFT 13
+#define MCDE_CRA1_CDWIN_MASK 0x0001E000
+#define MCDE_CRA1_CDWIN_8BPP_C1 0
+#define MCDE_CRA1_CDWIN_12BPP_C1 1
+#define MCDE_CRA1_CDWIN_12BPP_C2 2
+#define MCDE_CRA1_CDWIN_16BPP_C1 3
+#define MCDE_CRA1_CDWIN_16BPP_C2 4
+#define MCDE_CRA1_CDWIN_16BPP_C3 5
+#define MCDE_CRA1_CDWIN_18BPP_C1 6
+#define MCDE_CRA1_CDWIN_18BPP_C2 7
+#define MCDE_CRA1_CDWIN_24BPP 8
+#define MCDE_CRA1_CDWIN_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CDWIN, MCDE_CRA1_CDWIN_##__x)
+#define MCDE_CRA1_CDWIN(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CDWIN, __x)
+#define MCDE_CRA1_OUTBPP_SHIFT 25
+#define MCDE_CRA1_OUTBPP_MASK 0x1E000000
+#define MCDE_CRA1_OUTBPP_MONO1 0
+#define MCDE_CRA1_OUTBPP_MONO2 1
+#define MCDE_CRA1_OUTBPP_MONO4 2
+#define MCDE_CRA1_OUTBPP_MONO8 3
+#define MCDE_CRA1_OUTBPP_8BPP 4
+#define MCDE_CRA1_OUTBPP_12BPP 5
+#define MCDE_CRA1_OUTBPP_15BPP 6
+#define MCDE_CRA1_OUTBPP_16BPP 7
+#define MCDE_CRA1_OUTBPP_18BPP 8
+#define MCDE_CRA1_OUTBPP_24BPP 9
+#define MCDE_CRA1_OUTBPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, OUTBPP, MCDE_CRA1_OUTBPP_##__x)
+#define MCDE_CRA1_OUTBPP(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, OUTBPP, __x)
+#define MCDE_CRA1_BCD_SHIFT 29
+#define MCDE_CRA1_BCD_MASK 0x20000000
+#define MCDE_CRA1_BCD(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, BCD, __x)
+#define MCDE_CRA1_CLKTYPE_SHIFT 30
+#define MCDE_CRA1_CLKTYPE_MASK 0x40000000
+#define MCDE_CRA1_CLKTYPE_EXTERNAL 0
+#define MCDE_CRA1_CLKTYPE_INTERNAL 1
+#define MCDE_CRA1_CLKTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CLKTYPE, MCDE_CRA1_CLKTYPE_##__x)
+#define MCDE_CRA1_CLKTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CLKTYPE, __x)
+#define MCDE_CRA1_TEFFECTEN_V1_SHIFT 31
+#define MCDE_CRA1_TEFFECTEN_V1_MASK 0x80000000
+#define MCDE_CRA1_TEFFECTEN_V1(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, TEFFECTEN_V1, __x)
+#define MCDE_CRB1 0x00000A04
+#define MCDE_CRB1_PCD_SHIFT 0
+#define MCDE_CRB1_PCD_MASK 0x000003FF
+#define MCDE_CRB1_PCD(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, PCD, __x)
+#define MCDE_CRB1_CLKSEL_SHIFT 10
+#define MCDE_CRB1_CLKSEL_MASK 0x00001C00
+#define MCDE_CRB1_CLKSEL_LCD 0
+#define MCDE_CRB1_CLKSEL_HDMI 1
+#define MCDE_CRB1_CLKSEL_TV 2
+#define MCDE_CRB1_CLKSEL_EXT_TV1 3
+#define MCDE_CRB1_CLKSEL_EXT_TV2 4
+#define MCDE_CRB1_CLKSEL_166MHZ 5
+#define MCDE_CRB1_CLKSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CLKSEL, MCDE_CRB1_CLKSEL_##__x)
+#define MCDE_CRB1_CLKSEL(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CLKSEL, __x)
+#define MCDE_CRB1_CDWIN_SHIFT 13
+#define MCDE_CRB1_CDWIN_MASK 0x0001E000
+#define MCDE_CRB1_CDWIN_8BPP_C1 0
+#define MCDE_CRB1_CDWIN_12BPP_C1 1
+#define MCDE_CRB1_CDWIN_12BPP_C2 2
+#define MCDE_CRB1_CDWIN_16BPP_C1 3
+#define MCDE_CRB1_CDWIN_16BPP_C2 4
+#define MCDE_CRB1_CDWIN_16BPP_C3 5
+#define MCDE_CRB1_CDWIN_18BPP_C1 6
+#define MCDE_CRB1_CDWIN_18BPP_C2 7
+#define MCDE_CRB1_CDWIN_24BPP 8
+#define MCDE_CRB1_CDWIN_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CDWIN, MCDE_CRB1_CDWIN_##__x)
+#define MCDE_CRB1_CDWIN(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CDWIN, __x)
+#define MCDE_CRB1_OUTBPP_SHIFT 25
+#define MCDE_CRB1_OUTBPP_MASK 0x1E000000
+#define MCDE_CRB1_OUTBPP_MONO1 0
+#define MCDE_CRB1_OUTBPP_MONO2 1
+#define MCDE_CRB1_OUTBPP_MONO4 2
+#define MCDE_CRB1_OUTBPP_MONO8 3
+#define MCDE_CRB1_OUTBPP_8BPP 4
+#define MCDE_CRB1_OUTBPP_12BPP 5
+#define MCDE_CRB1_OUTBPP_15BPP 6
+#define MCDE_CRB1_OUTBPP_16BPP 7
+#define MCDE_CRB1_OUTBPP_18BPP 8
+#define MCDE_CRB1_OUTBPP_24BPP 9
+#define MCDE_CRB1_OUTBPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, OUTBPP, MCDE_CRB1_OUTBPP_##__x)
+#define MCDE_CRB1_OUTBPP(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, OUTBPP, __x)
+#define MCDE_CRB1_BCD_SHIFT 29
+#define MCDE_CRB1_BCD_MASK 0x20000000
+#define MCDE_CRB1_BCD(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, BCD, __x)
+#define MCDE_CRB1_CLKTYPE_SHIFT 30
+#define MCDE_CRB1_CLKTYPE_MASK 0x40000000
+#define MCDE_CRB1_CLKTYPE_EXTERNAL 0
+#define MCDE_CRB1_CLKTYPE_INTERNAL 1
+#define MCDE_CRB1_CLKTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CLKTYPE, MCDE_CRB1_CLKTYPE_##__x)
+#define MCDE_CRB1_CLKTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CLKTYPE, __x)
+#define MCDE_CRB1_TEFFECTEN_V1_SHIFT 31
+#define MCDE_CRB1_TEFFECTEN_V1_MASK 0x80000000
+#define MCDE_CRB1_TEFFECTEN_V1(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, TEFFECTEN_V1, __x)
+#define MCDE_COLKEYA 0x00000808
+#define MCDE_COLKEYA_GROUPOFFSET 0x200
+#define MCDE_COLKEYA_KEYB_SHIFT 0
+#define MCDE_COLKEYA_KEYB_MASK 0x000000FF
+#define MCDE_COLKEYA_KEYB(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYA, KEYB, __x)
+#define MCDE_COLKEYA_KEYG_SHIFT 8
+#define MCDE_COLKEYA_KEYG_MASK 0x0000FF00
+#define MCDE_COLKEYA_KEYG(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYA, KEYG, __x)
+#define MCDE_COLKEYA_KEYR_SHIFT 16
+#define MCDE_COLKEYA_KEYR_MASK 0x00FF0000
+#define MCDE_COLKEYA_KEYR(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYA, KEYR, __x)
+#define MCDE_COLKEYA_KEYA_SHIFT 24
+#define MCDE_COLKEYA_KEYA_MASK 0xFF000000
+#define MCDE_COLKEYA_KEYA(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYA, KEYA, __x)
+#define MCDE_COLKEYB 0x00000A08
+#define MCDE_COLKEYB_KEYB_SHIFT 0
+#define MCDE_COLKEYB_KEYB_MASK 0x000000FF
+#define MCDE_COLKEYB_KEYB(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYB, KEYB, __x)
+#define MCDE_COLKEYB_KEYG_SHIFT 8
+#define MCDE_COLKEYB_KEYG_MASK 0x0000FF00
+#define MCDE_COLKEYB_KEYG(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYB, KEYG, __x)
+#define MCDE_COLKEYB_KEYR_SHIFT 16
+#define MCDE_COLKEYB_KEYR_MASK 0x00FF0000
+#define MCDE_COLKEYB_KEYR(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYB, KEYR, __x)
+#define MCDE_COLKEYB_KEYA_SHIFT 24
+#define MCDE_COLKEYB_KEYA_MASK 0xFF000000
+#define MCDE_COLKEYB_KEYA(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYB, KEYA, __x)
+#define MCDE_FCOLKEYA 0x0000080C
+#define MCDE_FCOLKEYA_GROUPOFFSET 0x200
+#define MCDE_FCOLKEYA_FKEYB_SHIFT 0
+#define MCDE_FCOLKEYA_FKEYB_MASK 0x000000FF
+#define MCDE_FCOLKEYA_FKEYB(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYB, __x)
+#define MCDE_FCOLKEYA_FKEYG_SHIFT 8
+#define MCDE_FCOLKEYA_FKEYG_MASK 0x0000FF00
+#define MCDE_FCOLKEYA_FKEYG(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYG, __x)
+#define MCDE_FCOLKEYA_FKEYR_SHIFT 16
+#define MCDE_FCOLKEYA_FKEYR_MASK 0x00FF0000
+#define MCDE_FCOLKEYA_FKEYR(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYR, __x)
+#define MCDE_FCOLKEYA_FKEYA_SHIFT 24
+#define MCDE_FCOLKEYA_FKEYA_MASK 0xFF000000
+#define MCDE_FCOLKEYA_FKEYA(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYA, __x)
+#define MCDE_FCOLKEYB 0x00000A0C
+#define MCDE_FCOLKEYB_FKEYB_SHIFT 0
+#define MCDE_FCOLKEYB_FKEYB_MASK 0x000000FF
+#define MCDE_FCOLKEYB_FKEYB(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYB, __x)
+#define MCDE_FCOLKEYB_FKEYG_SHIFT 8
+#define MCDE_FCOLKEYB_FKEYG_MASK 0x0000FF00
+#define MCDE_FCOLKEYB_FKEYG(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYG, __x)
+#define MCDE_FCOLKEYB_FKEYR_SHIFT 16
+#define MCDE_FCOLKEYB_FKEYR_MASK 0x00FF0000
+#define MCDE_FCOLKEYB_FKEYR(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYR, __x)
+#define MCDE_FCOLKEYB_FKEYA_SHIFT 24
+#define MCDE_FCOLKEYB_FKEYA_MASK 0xFF000000
+#define MCDE_FCOLKEYB_FKEYA(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYA, __x)
+#define MCDE_RGBCONV1A 0x00000810
+#define MCDE_RGBCONV1A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV1A_YR_GREEN_SHIFT 0
+#define MCDE_RGBCONV1A_YR_GREEN_MASK 0x000007FF
+#define MCDE_RGBCONV1A_YR_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV1A, YR_GREEN, __x)
+#define MCDE_RGBCONV1A_YR_RED_SHIFT 16
+#define MCDE_RGBCONV1A_YR_RED_MASK 0x07FF0000
+#define MCDE_RGBCONV1A_YR_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV1A, YR_RED, __x)
+#define MCDE_RGBCONV1B 0x00000A10
+#define MCDE_RGBCONV1B_YR_GREEN_SHIFT 0
+#define MCDE_RGBCONV1B_YR_GREEN_MASK 0x000007FF
+#define MCDE_RGBCONV1B_YR_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV1B, YR_GREEN, __x)
+#define MCDE_RGBCONV1B_YR_RED_SHIFT 16
+#define MCDE_RGBCONV1B_YR_RED_MASK 0x07FF0000
+#define MCDE_RGBCONV1B_YR_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV1B, YR_RED, __x)
+#define MCDE_RGBCONV2A 0x00000814
+#define MCDE_RGBCONV2A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV2A_CR_RED_SHIFT 0
+#define MCDE_RGBCONV2A_CR_RED_MASK 0x000007FF
+#define MCDE_RGBCONV2A_CR_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV2A, CR_RED, __x)
+#define MCDE_RGBCONV2A_YR_BLUE_SHIFT 16
+#define MCDE_RGBCONV2A_YR_BLUE_MASK 0x07FF0000
+#define MCDE_RGBCONV2A_YR_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV2A, YR_BLUE, __x)
+#define MCDE_RGBCONV2B 0x00000A14
+#define MCDE_RGBCONV2B_CR_RED_SHIFT 0
+#define MCDE_RGBCONV2B_CR_RED_MASK 0x000007FF
+#define MCDE_RGBCONV2B_CR_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV2B, CR_RED, __x)
+#define MCDE_RGBCONV2B_YR_BLUE_SHIFT 16
+#define MCDE_RGBCONV2B_YR_BLUE_MASK 0x07FF0000
+#define MCDE_RGBCONV2B_YR_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV2B, YR_BLUE, __x)
+#define MCDE_RGBCONV3A 0x00000818
+#define MCDE_RGBCONV3A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV3A_CR_BLUE_SHIFT 0
+#define MCDE_RGBCONV3A_CR_BLUE_MASK 0x000007FF
+#define MCDE_RGBCONV3A_CR_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV3A, CR_BLUE, __x)
+#define MCDE_RGBCONV3A_CR_GREEN_SHIFT 16
+#define MCDE_RGBCONV3A_CR_GREEN_MASK 0x07FF0000
+#define MCDE_RGBCONV3A_CR_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV3A, CR_GREEN, __x)
+#define MCDE_RGBCONV3B 0x00000A18
+#define MCDE_RGBCONV3B_CR_BLUE_SHIFT 0
+#define MCDE_RGBCONV3B_CR_BLUE_MASK 0x000007FF
+#define MCDE_RGBCONV3B_CR_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV3B, CR_BLUE, __x)
+#define MCDE_RGBCONV3B_CR_GREEN_SHIFT 16
+#define MCDE_RGBCONV3B_CR_GREEN_MASK 0x07FF0000
+#define MCDE_RGBCONV3B_CR_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV3B, CR_GREEN, __x)
+#define MCDE_RGBCONV4A 0x0000081C
+#define MCDE_RGBCONV4A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV4A_CB_GREEN_SHIFT 0
+#define MCDE_RGBCONV4A_CB_GREEN_MASK 0x000007FF
+#define MCDE_RGBCONV4A_CB_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV4A, CB_GREEN, __x)
+#define MCDE_RGBCONV4A_CB_RED_SHIFT 16
+#define MCDE_RGBCONV4A_CB_RED_MASK 0x07FF0000
+#define MCDE_RGBCONV4A_CB_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV4A, CB_RED, __x)
+#define MCDE_RGBCONV4B 0x00000A1C
+#define MCDE_RGBCONV4B_CB_GREEN_SHIFT 0
+#define MCDE_RGBCONV4B_CB_GREEN_MASK 0x000007FF
+#define MCDE_RGBCONV4B_CB_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV4B, CB_GREEN, __x)
+#define MCDE_RGBCONV4B_CB_RED_SHIFT 16
+#define MCDE_RGBCONV4B_CB_RED_MASK 0x07FF0000
+#define MCDE_RGBCONV4B_CB_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV4B, CB_RED, __x)
+#define MCDE_RGBCONV5A 0x00000820
+#define MCDE_RGBCONV5A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV5A_OFF_RED_SHIFT 0
+#define MCDE_RGBCONV5A_OFF_RED_MASK 0x000007FF
+#define MCDE_RGBCONV5A_OFF_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV5A, OFF_RED, __x)
+#define MCDE_RGBCONV5A_CB_BLUE_SHIFT 16
+#define MCDE_RGBCONV5A_CB_BLUE_MASK 0x07FF0000
+#define MCDE_RGBCONV5A_CB_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV5A, CB_BLUE, __x)
+#define MCDE_RGBCONV5B 0x00000A20
+#define MCDE_RGBCONV5B_OFF_RED_SHIFT 0
+#define MCDE_RGBCONV5B_OFF_RED_MASK 0x000007FF
+#define MCDE_RGBCONV5B_OFF_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV5B, OFF_RED, __x)
+#define MCDE_RGBCONV5B_CB_BLUE_SHIFT 16
+#define MCDE_RGBCONV5B_CB_BLUE_MASK 0x07FF0000
+#define MCDE_RGBCONV5B_CB_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV5B, CB_BLUE, __x)
+#define MCDE_RGBCONV6A 0x00000824
+#define MCDE_RGBCONV6A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV6A_OFF_BLUE_SHIFT 0
+#define MCDE_RGBCONV6A_OFF_BLUE_MASK 0x000007FF
+#define MCDE_RGBCONV6A_OFF_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV6A, OFF_BLUE, __x)
+#define MCDE_RGBCONV6A_OFF_GREEN_SHIFT 16
+#define MCDE_RGBCONV6A_OFF_GREEN_MASK 0x07FF0000
+#define MCDE_RGBCONV6A_OFF_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV6A, OFF_GREEN, __x)
+#define MCDE_RGBCONV6B 0x00000A24
+#define MCDE_RGBCONV6B_OFF_BLUE_SHIFT 0
+#define MCDE_RGBCONV6B_OFF_BLUE_MASK 0x000007FF
+#define MCDE_RGBCONV6B_OFF_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV6B, OFF_BLUE, __x)
+#define MCDE_RGBCONV6B_OFF_GREEN_SHIFT 16
+#define MCDE_RGBCONV6B_OFF_GREEN_MASK 0x07FF0000
+#define MCDE_RGBCONV6B_OFF_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV6B, OFF_GREEN, __x)
+#define MCDE_FFCOEF0 0x00000828
+#define MCDE_FFCOEF0_COEFF0_N1_SHIFT 0
+#define MCDE_FFCOEF0_COEFF0_N1_MASK 0x000000FF
+#define MCDE_FFCOEF0_COEFF0_N1(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF0, COEFF0_N1, __x)
+#define MCDE_FFCOEF0_COEFF0_N2_SHIFT 8
+#define MCDE_FFCOEF0_COEFF0_N2_MASK 0x0000FF00
+#define MCDE_FFCOEF0_COEFF0_N2(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF0, COEFF0_N2, __x)
+#define MCDE_FFCOEF0_COEFF0_N3_SHIFT 16
+#define MCDE_FFCOEF0_COEFF0_N3_MASK 0x00FF0000
+#define MCDE_FFCOEF0_COEFF0_N3(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF0, COEFF0_N3, __x)
+#define MCDE_FFCOEF0_T0_SHIFT 24
+#define MCDE_FFCOEF0_T0_MASK 0x0F000000
+#define MCDE_FFCOEF0_T0(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF0, T0, __x)
+#define MCDE_FFCOEF1 0x0000082C
+#define MCDE_FFCOEF1_COEFF1_N1_SHIFT 0
+#define MCDE_FFCOEF1_COEFF1_N1_MASK 0x000000FF
+#define MCDE_FFCOEF1_COEFF1_N1(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF1, COEFF1_N1, __x)
+#define MCDE_FFCOEF1_COEFF1_N2_SHIFT 8
+#define MCDE_FFCOEF1_COEFF1_N2_MASK 0x0000FF00
+#define MCDE_FFCOEF1_COEFF1_N2(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF1, COEFF1_N2, __x)
+#define MCDE_FFCOEF1_COEFF1_N3_SHIFT 16
+#define MCDE_FFCOEF1_COEFF1_N3_MASK 0x00FF0000
+#define MCDE_FFCOEF1_COEFF1_N3(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF1, COEFF1_N3, __x)
+#define MCDE_FFCOEF1_T1_SHIFT 24
+#define MCDE_FFCOEF1_T1_MASK 0x0F000000
+#define MCDE_FFCOEF1_T1(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF1, T1, __x)
+#define MCDE_FFCOEF2 0x00000830
+#define MCDE_FFCOEF2_COEFF2_N1_SHIFT 0
+#define MCDE_FFCOEF2_COEFF2_N1_MASK 0x000000FF
+#define MCDE_FFCOEF2_COEFF2_N1(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF2, COEFF2_N1, __x)
+#define MCDE_FFCOEF2_COEFF2_N2_SHIFT 8
+#define MCDE_FFCOEF2_COEFF2_N2_MASK 0x0000FF00
+#define MCDE_FFCOEF2_COEFF2_N2(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF2, COEFF2_N2, __x)
+#define MCDE_FFCOEF2_COEFF2_N3_SHIFT 16
+#define MCDE_FFCOEF2_COEFF2_N3_MASK 0x00FF0000
+#define MCDE_FFCOEF2_COEFF2_N3(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF2, COEFF2_N3, __x)
+#define MCDE_FFCOEF2_T2_SHIFT 24
+#define MCDE_FFCOEF2_T2_MASK 0x0F000000
+#define MCDE_FFCOEF2_T2(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF2, T2, __x)
+#define MCDE_MCDE_WDATAA_V2 0x00000834
+#define MCDE_MCDE_WDATAA_V2_GROUPOFFSET 0x200
+#define MCDE_MCDE_WDATAA_V2_DC_SHIFT 24
+#define MCDE_MCDE_WDATAA_V2_DC_MASK 0x01000000
+#define MCDE_MCDE_WDATAA_V2_DC(__x) \
+ MCDE_VAL2REG(MCDE_MCDE_WDATAA_V2, DC, __x)
+#define MCDE_MCDE_WDATAA_V2_DATAVALUE_SHIFT 0
+#define MCDE_MCDE_WDATAA_V2_DATAVALUE_MASK 0x00FFFFFF
+#define MCDE_MCDE_WDATAA_V2_DATAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_MCDE_WDATAA_V2, DATAVALUE, __x)
+#define MCDE_MCDE_WDATAB_V2 0x00000A34
+#define MCDE_MCDE_WDATAB_V2_DC_SHIFT 24
+#define MCDE_MCDE_WDATAB_V2_DC_MASK 0x01000000
+#define MCDE_MCDE_WDATAB_V2_DC(__x) \
+ MCDE_VAL2REG(MCDE_MCDE_WDATAB_V2, DC, __x)
+#define MCDE_MCDE_WDATAB_V2_DATAVALUE_SHIFT 0
+#define MCDE_MCDE_WDATAB_V2_DATAVALUE_MASK 0x00FFFFFF
+#define MCDE_MCDE_WDATAB_V2_DATAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_MCDE_WDATAB_V2, DATAVALUE, __x)
+#define MCDE_TVCRA 0x00000838
+#define MCDE_TVCRA_GROUPOFFSET 0x200
+#define MCDE_TVCRA_SEL_MOD_SHIFT 0
+#define MCDE_TVCRA_SEL_MOD_MASK 0x00000001
+#define MCDE_TVCRA_SEL_MOD_LCD 0
+#define MCDE_TVCRA_SEL_MOD_TV 1
+#define MCDE_TVCRA_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, SEL_MOD, MCDE_TVCRA_SEL_MOD_##__x)
+#define MCDE_TVCRA_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, SEL_MOD, __x)
+#define MCDE_TVCRA_INTEREN_SHIFT 1
+#define MCDE_TVCRA_INTEREN_MASK 0x00000002
+#define MCDE_TVCRA_INTEREN(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, INTEREN, __x)
+#define MCDE_TVCRA_IFIELD_SHIFT 2
+#define MCDE_TVCRA_IFIELD_MASK 0x00000004
+#define MCDE_TVCRA_IFIELD(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, IFIELD, __x)
+#define MCDE_TVCRA_TVMODE_SHIFT 3
+#define MCDE_TVCRA_TVMODE_MASK 0x00000038
+#define MCDE_TVCRA_TVMODE_SDTV_656P 0
+#define MCDE_TVCRA_TVMODE_HDTV_480P 1
+#define MCDE_TVCRA_TVMODE_HDTV_720P 2
+#define MCDE_TVCRA_TVMODE_SDTV_656P_LE 3
+#define MCDE_TVCRA_TVMODE_SDTV_656P_BE 4
+#define MCDE_TVCRA_TVMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, TVMODE, MCDE_TVCRA_TVMODE_##__x)
+#define MCDE_TVCRA_TVMODE(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, TVMODE, __x)
+#define MCDE_TVCRA_SDTVMODE_SHIFT 6
+#define MCDE_TVCRA_SDTVMODE_MASK 0x000000C0
+#define MCDE_TVCRA_SDTVMODE_Y0CBY1CR 0
+#define MCDE_TVCRA_SDTVMODE_CBY0CRY1 1
+#define MCDE_TVCRA_SDTVMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, SDTVMODE, MCDE_TVCRA_SDTVMODE_##__x)
+#define MCDE_TVCRA_SDTVMODE(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, SDTVMODE, __x)
+#define MCDE_TVCRA_AVRGEN_SHIFT 8
+#define MCDE_TVCRA_AVRGEN_MASK 0x00000100
+#define MCDE_TVCRA_AVRGEN(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, AVRGEN, __x)
+#define MCDE_TVCRA_CKINV_SHIFT 9
+#define MCDE_TVCRA_CKINV_MASK 0x00000200
+#define MCDE_TVCRA_CKINV(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, CKINV, __x)
+#define MCDE_TVCRB 0x00000A38
+#define MCDE_TVCRB_SEL_MOD_SHIFT 0
+#define MCDE_TVCRB_SEL_MOD_MASK 0x00000001
+#define MCDE_TVCRB_SEL_MOD_LCD 0
+#define MCDE_TVCRB_SEL_MOD_TV 1
+#define MCDE_TVCRB_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, SEL_MOD, MCDE_TVCRB_SEL_MOD_##__x)
+#define MCDE_TVCRB_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, SEL_MOD, __x)
+#define MCDE_TVCRB_INTEREN_SHIFT 1
+#define MCDE_TVCRB_INTEREN_MASK 0x00000002
+#define MCDE_TVCRB_INTEREN(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, INTEREN, __x)
+#define MCDE_TVCRB_IFIELD_SHIFT 2
+#define MCDE_TVCRB_IFIELD_MASK 0x00000004
+#define MCDE_TVCRB_IFIELD(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, IFIELD, __x)
+#define MCDE_TVCRB_TVMODE_SHIFT 3
+#define MCDE_TVCRB_TVMODE_MASK 0x00000038
+#define MCDE_TVCRB_TVMODE_SDTV_656P 0
+#define MCDE_TVCRB_TVMODE_HDTV_480P 1
+#define MCDE_TVCRB_TVMODE_HDTV_720P 2
+#define MCDE_TVCRB_TVMODE_SDTV_656P_LE 3
+#define MCDE_TVCRB_TVMODE_SDTV_656P_BE 4
+#define MCDE_TVCRB_TVMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, TVMODE, MCDE_TVCRB_TVMODE_##__x)
+#define MCDE_TVCRB_TVMODE(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, TVMODE, __x)
+#define MCDE_TVCRB_SDTVMODE_SHIFT 6
+#define MCDE_TVCRB_SDTVMODE_MASK 0x000000C0
+#define MCDE_TVCRB_SDTVMODE_Y0CBY1CR 0
+#define MCDE_TVCRB_SDTVMODE_CBY0CRY1 1
+#define MCDE_TVCRB_SDTVMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, SDTVMODE, MCDE_TVCRB_SDTVMODE_##__x)
+#define MCDE_TVCRB_SDTVMODE(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, SDTVMODE, __x)
+#define MCDE_TVCRB_AVRGEN_SHIFT 8
+#define MCDE_TVCRB_AVRGEN_MASK 0x00000100
+#define MCDE_TVCRB_AVRGEN(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, AVRGEN, __x)
+#define MCDE_TVCRB_CKINV_SHIFT 9
+#define MCDE_TVCRB_CKINV_MASK 0x00000200
+#define MCDE_TVCRB_CKINV(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, CKINV, __x)
+#define MCDE_TVBL1A 0x0000083C
+#define MCDE_TVBL1A_GROUPOFFSET 0x200
+#define MCDE_TVBL1A_BEL1_SHIFT 0
+#define MCDE_TVBL1A_BEL1_MASK 0x000007FF
+#define MCDE_TVBL1A_BEL1(__x) \
+ MCDE_VAL2REG(MCDE_TVBL1A, BEL1, __x)
+#define MCDE_TVBL1A_BSL1_SHIFT 16
+#define MCDE_TVBL1A_BSL1_MASK 0x07FF0000
+#define MCDE_TVBL1A_BSL1(__x) \
+ MCDE_VAL2REG(MCDE_TVBL1A, BSL1, __x)
+#define MCDE_TVBL1B 0x00000A3C
+#define MCDE_TVBL1B_BEL1_SHIFT 0
+#define MCDE_TVBL1B_BEL1_MASK 0x000007FF
+#define MCDE_TVBL1B_BEL1(__x) \
+ MCDE_VAL2REG(MCDE_TVBL1B, BEL1, __x)
+#define MCDE_TVBL1B_BSL1_SHIFT 16
+#define MCDE_TVBL1B_BSL1_MASK 0x07FF0000
+#define MCDE_TVBL1B_BSL1(__x) \
+ MCDE_VAL2REG(MCDE_TVBL1B, BSL1, __x)
+#define MCDE_TVISLA 0x00000840
+#define MCDE_TVISLA_GROUPOFFSET 0x200
+#define MCDE_TVISLA_FSL1_SHIFT 0
+#define MCDE_TVISLA_FSL1_MASK 0x000007FF
+#define MCDE_TVISLA_FSL1(__x) \
+ MCDE_VAL2REG(MCDE_TVISLA, FSL1, __x)
+#define MCDE_TVISLA_FSL2_SHIFT 16
+#define MCDE_TVISLA_FSL2_MASK 0x07FF0000
+#define MCDE_TVISLA_FSL2(__x) \
+ MCDE_VAL2REG(MCDE_TVISLA, FSL2, __x)
+#define MCDE_TVISLB 0x00000A40
+#define MCDE_TVISLB_FSL1_SHIFT 0
+#define MCDE_TVISLB_FSL1_MASK 0x000007FF
+#define MCDE_TVISLB_FSL1(__x) \
+ MCDE_VAL2REG(MCDE_TVISLB, FSL1, __x)
+#define MCDE_TVISLB_FSL2_SHIFT 16
+#define MCDE_TVISLB_FSL2_MASK 0x07FF0000
+#define MCDE_TVISLB_FSL2(__x) \
+ MCDE_VAL2REG(MCDE_TVISLB, FSL2, __x)
+#define MCDE_TVDVOA 0x00000844
+#define MCDE_TVDVOA_GROUPOFFSET 0x200
+#define MCDE_TVDVOA_DVO1_SHIFT 0
+#define MCDE_TVDVOA_DVO1_MASK 0x000007FF
+#define MCDE_TVDVOA_DVO1(__x) \
+ MCDE_VAL2REG(MCDE_TVDVOA, DVO1, __x)
+#define MCDE_TVDVOA_DVO2_SHIFT 16
+#define MCDE_TVDVOA_DVO2_MASK 0x07FF0000
+#define MCDE_TVDVOA_DVO2(__x) \
+ MCDE_VAL2REG(MCDE_TVDVOA, DVO2, __x)
+#define MCDE_TVDVOB 0x00000A44
+#define MCDE_TVDVOB_DVO1_SHIFT 0
+#define MCDE_TVDVOB_DVO1_MASK 0x000007FF
+#define MCDE_TVDVOB_DVO1(__x) \
+ MCDE_VAL2REG(MCDE_TVDVOB, DVO1, __x)
+#define MCDE_TVDVOB_DVO2_SHIFT 16
+#define MCDE_TVDVOB_DVO2_MASK 0x07FF0000
+#define MCDE_TVDVOB_DVO2(__x) \
+ MCDE_VAL2REG(MCDE_TVDVOB, DVO2, __x)
+#define MCDE_TVTIM1A 0x0000084C
+#define MCDE_TVTIM1A_GROUPOFFSET 0x200
+#define MCDE_TVTIM1A_DHO_SHIFT 0
+#define MCDE_TVTIM1A_DHO_MASK 0x000007FF
+#define MCDE_TVTIM1A_DHO(__x) \
+ MCDE_VAL2REG(MCDE_TVTIM1A, DHO, __x)
+#define MCDE_TVTIM1B 0x00000A4C
+#define MCDE_TVTIM1B_DHO_SHIFT 0
+#define MCDE_TVTIM1B_DHO_MASK 0x000007FF
+#define MCDE_TVTIM1B_DHO(__x) \
+ MCDE_VAL2REG(MCDE_TVTIM1B, DHO, __x)
+#define MCDE_TVLBALWA 0x00000850
+#define MCDE_TVLBALWA_GROUPOFFSET 0x200
+#define MCDE_TVLBALWA_LBW_SHIFT 0
+#define MCDE_TVLBALWA_LBW_MASK 0x000007FF
+#define MCDE_TVLBALWA_LBW(__x) \
+ MCDE_VAL2REG(MCDE_TVLBALWA, LBW, __x)
+#define MCDE_TVLBALWA_ALW_SHIFT 16
+#define MCDE_TVLBALWA_ALW_MASK 0x07FF0000
+#define MCDE_TVLBALWA_ALW(__x) \
+ MCDE_VAL2REG(MCDE_TVLBALWA, ALW, __x)
+#define MCDE_TVLBALWB 0x00000A50
+#define MCDE_TVLBALWB_LBW_SHIFT 0
+#define MCDE_TVLBALWB_LBW_MASK 0x000007FF
+#define MCDE_TVLBALWB_LBW(__x) \
+ MCDE_VAL2REG(MCDE_TVLBALWB, LBW, __x)
+#define MCDE_TVLBALWB_ALW_SHIFT 16
+#define MCDE_TVLBALWB_ALW_MASK 0x07FF0000
+#define MCDE_TVLBALWB_ALW(__x) \
+ MCDE_VAL2REG(MCDE_TVLBALWB, ALW, __x)
+#define MCDE_TVBL2A 0x00000854
+#define MCDE_TVBL2A_GROUPOFFSET 0x200
+#define MCDE_TVBL2A_BEL2_SHIFT 0
+#define MCDE_TVBL2A_BEL2_MASK 0x000007FF
+#define MCDE_TVBL2A_BEL2(__x) \
+ MCDE_VAL2REG(MCDE_TVBL2A, BEL2, __x)
+#define MCDE_TVBL2A_BSL2_SHIFT 16
+#define MCDE_TVBL2A_BSL2_MASK 0x07FF0000
+#define MCDE_TVBL2A_BSL2(__x) \
+ MCDE_VAL2REG(MCDE_TVBL2A, BSL2, __x)
+#define MCDE_TVBL2B 0x00000A54
+#define MCDE_TVBL2B_BEL2_SHIFT 0
+#define MCDE_TVBL2B_BEL2_MASK 0x000007FF
+#define MCDE_TVBL2B_BEL2(__x) \
+ MCDE_VAL2REG(MCDE_TVBL2B, BEL2, __x)
+#define MCDE_TVBL2B_BSL2_SHIFT 16
+#define MCDE_TVBL2B_BSL2_MASK 0x07FF0000
+#define MCDE_TVBL2B_BSL2(__x) \
+ MCDE_VAL2REG(MCDE_TVBL2B, BSL2, __x)
+#define MCDE_TVBLUA 0x00000858
+#define MCDE_TVBLUA_GROUPOFFSET 0x200
+#define MCDE_TVBLUA_TVBLU_SHIFT 0
+#define MCDE_TVBLUA_TVBLU_MASK 0x000000FF
+#define MCDE_TVBLUA_TVBLU(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUA, TVBLU, __x)
+#define MCDE_TVBLUA_TVBCB_SHIFT 8
+#define MCDE_TVBLUA_TVBCB_MASK 0x0000FF00
+#define MCDE_TVBLUA_TVBCB(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUA, TVBCB, __x)
+#define MCDE_TVBLUA_TVBCR_SHIFT 16
+#define MCDE_TVBLUA_TVBCR_MASK 0x00FF0000
+#define MCDE_TVBLUA_TVBCR(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUA, TVBCR, __x)
+#define MCDE_TVBLUB 0x00000A58
+#define MCDE_TVBLUB_TVBLU_SHIFT 0
+#define MCDE_TVBLUB_TVBLU_MASK 0x000000FF
+#define MCDE_TVBLUB_TVBLU(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUB, TVBLU, __x)
+#define MCDE_TVBLUB_TVBCB_SHIFT 8
+#define MCDE_TVBLUB_TVBCB_MASK 0x0000FF00
+#define MCDE_TVBLUB_TVBCB(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUB, TVBCB, __x)
+#define MCDE_TVBLUB_TVBCR_SHIFT 16
+#define MCDE_TVBLUB_TVBCR_MASK 0x00FF0000
+#define MCDE_TVBLUB_TVBCR(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUB, TVBCR, __x)
+#define MCDE_LCDTIM1A 0x00000860
+#define MCDE_LCDTIM1A_GROUPOFFSET 0x200
+#define MCDE_LCDTIM1A_IVP_SHIFT 19
+#define MCDE_LCDTIM1A_IVP_MASK 0x00080000
+#define MCDE_LCDTIM1A_IVP(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IVP, __x)
+#define MCDE_LCDTIM1A_IVS_SHIFT 20
+#define MCDE_LCDTIM1A_IVS_MASK 0x00100000
+#define MCDE_LCDTIM1A_IVS(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IVS, __x)
+#define MCDE_LCDTIM1A_IHS_SHIFT 21
+#define MCDE_LCDTIM1A_IHS_MASK 0x00200000
+#define MCDE_LCDTIM1A_IHS(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IHS, __x)
+#define MCDE_LCDTIM1A_IPC_SHIFT 22
+#define MCDE_LCDTIM1A_IPC_MASK 0x00400000
+#define MCDE_LCDTIM1A_IPC(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IPC, __x)
+#define MCDE_LCDTIM1A_IOE_SHIFT 23
+#define MCDE_LCDTIM1A_IOE_MASK 0x00800000
+#define MCDE_LCDTIM1A_IOE(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IOE, __x)
+#define MCDE_LCDTIM1B 0x00000A60
+#define MCDE_LCDTIM1B_IVP_SHIFT 19
+#define MCDE_LCDTIM1B_IVP_MASK 0x00080000
+#define MCDE_LCDTIM1B_IVP(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IVP, __x)
+#define MCDE_LCDTIM1B_IVS_SHIFT 20
+#define MCDE_LCDTIM1B_IVS_MASK 0x00100000
+#define MCDE_LCDTIM1B_IVS(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IVS, __x)
+#define MCDE_LCDTIM1B_IHS_SHIFT 21
+#define MCDE_LCDTIM1B_IHS_MASK 0x00200000
+#define MCDE_LCDTIM1B_IHS(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IHS, __x)
+#define MCDE_LCDTIM1B_IPC_SHIFT 22
+#define MCDE_LCDTIM1B_IPC_MASK 0x00400000
+#define MCDE_LCDTIM1B_IPC(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IPC, __x)
+#define MCDE_LCDTIM1B_IOE_SHIFT 23
+#define MCDE_LCDTIM1B_IOE_MASK 0x00800000
+#define MCDE_LCDTIM1B_IOE(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IOE, __x)
+#define MCDE_DITCTRLA 0x00000864
+#define MCDE_DITCTRLA_GROUPOFFSET 0x200
+#define MCDE_DITCTRLA_TEMP_SHIFT 0
+#define MCDE_DITCTRLA_TEMP_MASK 0x00000001
+#define MCDE_DITCTRLA_TEMP(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, TEMP, __x)
+#define MCDE_DITCTRLA_COMP_SHIFT 1
+#define MCDE_DITCTRLA_COMP_MASK 0x00000002
+#define MCDE_DITCTRLA_COMP(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, COMP, __x)
+#define MCDE_DITCTRLA_MASK_SHIFT 4
+#define MCDE_DITCTRLA_MASK_MASK 0x00000010
+#define MCDE_DITCTRLA_MASK(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, MASK, __x)
+#define MCDE_DITCTRLA_FOFFX_SHIFT 5
+#define MCDE_DITCTRLA_FOFFX_MASK 0x000003E0
+#define MCDE_DITCTRLA_FOFFX(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, FOFFX, __x)
+#define MCDE_DITCTRLA_FOFFY_SHIFT 10
+#define MCDE_DITCTRLA_FOFFY_MASK 0x00007C00
+#define MCDE_DITCTRLA_FOFFY(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, FOFFY, __x)
+#define MCDE_DITCTRLB 0x00000A64
+#define MCDE_DITCTRLB_TEMP_SHIFT 0
+#define MCDE_DITCTRLB_TEMP_MASK 0x00000001
+#define MCDE_DITCTRLB_TEMP(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, TEMP, __x)
+#define MCDE_DITCTRLB_COMP_SHIFT 1
+#define MCDE_DITCTRLB_COMP_MASK 0x00000002
+#define MCDE_DITCTRLB_COMP(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, COMP, __x)
+#define MCDE_DITCTRLB_MASK_SHIFT 4
+#define MCDE_DITCTRLB_MASK_MASK 0x00000010
+#define MCDE_DITCTRLB_MASK(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, MASK, __x)
+#define MCDE_DITCTRLB_FOFFX_SHIFT 5
+#define MCDE_DITCTRLB_FOFFX_MASK 0x000003E0
+#define MCDE_DITCTRLB_FOFFX(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, FOFFX, __x)
+#define MCDE_DITCTRLB_FOFFY_SHIFT 10
+#define MCDE_DITCTRLB_FOFFY_MASK 0x00007C00
+#define MCDE_DITCTRLB_FOFFY(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, FOFFY, __x)
+#define MCDE_DITOFFA 0x00000868
+#define MCDE_DITOFFA_GROUPOFFSET 0x200
+#define MCDE_DITOFFA_XG_SHIFT 0
+#define MCDE_DITOFFA_XG_MASK 0x0000001F
+#define MCDE_DITOFFA_XG(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFA, XG, __x)
+#define MCDE_DITOFFA_YG_SHIFT 8
+#define MCDE_DITOFFA_YG_MASK 0x00001F00
+#define MCDE_DITOFFA_YG(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFA, YG, __x)
+#define MCDE_DITOFFA_XB_SHIFT 16
+#define MCDE_DITOFFA_XB_MASK 0x001F0000
+#define MCDE_DITOFFA_XB(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFA, XB, __x)
+#define MCDE_DITOFFA_YB_SHIFT 24
+#define MCDE_DITOFFA_YB_MASK 0x1F000000
+#define MCDE_DITOFFA_YB(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFA, YB, __x)
+#define MCDE_DITOFFB 0x00000A68
+#define MCDE_DITOFFB_XG_SHIFT 0
+#define MCDE_DITOFFB_XG_MASK 0x0000001F
+#define MCDE_DITOFFB_XG(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFB, XG, __x)
+#define MCDE_DITOFFB_YG_SHIFT 8
+#define MCDE_DITOFFB_YG_MASK 0x00001F00
+#define MCDE_DITOFFB_YG(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFB, YG, __x)
+#define MCDE_DITOFFB_XB_SHIFT 16
+#define MCDE_DITOFFB_XB_MASK 0x001F0000
+#define MCDE_DITOFFB_XB(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFB, XB, __x)
+#define MCDE_DITOFFB_YB_SHIFT 24
+#define MCDE_DITOFFB_YB_MASK 0x1F000000
+#define MCDE_DITOFFB_YB(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFB, YB, __x)
+#define MCDE_PAL0A 0x0000086C
+#define MCDE_PAL0A_GROUPOFFSET 0x200
+#define MCDE_PAL0A_BLUE_SHIFT 0
+#define MCDE_PAL0A_BLUE_MASK 0x00000FFF
+#define MCDE_PAL0A_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_PAL0A, BLUE, __x)
+#define MCDE_PAL0A_GREEN_SHIFT 16
+#define MCDE_PAL0A_GREEN_MASK 0x0FFF0000
+#define MCDE_PAL0A_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_PAL0A, GREEN, __x)
+#define MCDE_PAL0B 0x00000A6C
+#define MCDE_PAL0B_BLUE_SHIFT 0
+#define MCDE_PAL0B_BLUE_MASK 0x00000FFF
+#define MCDE_PAL0B_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_PAL0B, BLUE, __x)
+#define MCDE_PAL0B_GREEN_SHIFT 16
+#define MCDE_PAL0B_GREEN_MASK 0x0FFF0000
+#define MCDE_PAL0B_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_PAL0B, GREEN, __x)
+#define MCDE_PAL1A 0x00000870
+#define MCDE_PAL1A_GROUPOFFSET 0x200
+#define MCDE_PAL1A_RED_SHIFT 0
+#define MCDE_PAL1A_RED_MASK 0x00000FFF
+#define MCDE_PAL1A_RED(__x) \
+ MCDE_VAL2REG(MCDE_PAL1A, RED, __x)
+#define MCDE_PAL1B 0x00000A70
+#define MCDE_PAL1B_RED_SHIFT 0
+#define MCDE_PAL1B_RED_MASK 0x00000FFF
+#define MCDE_PAL1B_RED(__x) \
+ MCDE_VAL2REG(MCDE_PAL1B, RED, __x)
+#define MCDE_ROTADD0A 0x00000874
+#define MCDE_ROTADD0A_GROUPOFFSET 0x200
+#define MCDE_ROTADD0A_ROTADD0_SHIFT 3
+#define MCDE_ROTADD0A_ROTADD0_MASK 0xFFFFFFF8
+#define MCDE_ROTADD0A_ROTADD0(__x) \
+ MCDE_VAL2REG(MCDE_ROTADD0A, ROTADD0, __x)
+#define MCDE_ROTADD0B 0x00000A74
+#define MCDE_ROTADD0B_ROTADD0_SHIFT 3
+#define MCDE_ROTADD0B_ROTADD0_MASK 0xFFFFFFF8
+#define MCDE_ROTADD0B_ROTADD0(__x) \
+ MCDE_VAL2REG(MCDE_ROTADD0B, ROTADD0, __x)
+#define MCDE_ROTADD1A 0x00000878
+#define MCDE_ROTADD1A_GROUPOFFSET 0x200
+#define MCDE_ROTADD1A_ROTADD1_SHIFT 3
+#define MCDE_ROTADD1A_ROTADD1_MASK 0xFFFFFFF8
+#define MCDE_ROTADD1A_ROTADD1(__x) \
+ MCDE_VAL2REG(MCDE_ROTADD1A, ROTADD1, __x)
+#define MCDE_ROTADD1B 0x00000A78
+#define MCDE_ROTADD1B_ROTADD1_SHIFT 3
+#define MCDE_ROTADD1B_ROTADD1_MASK 0xFFFFFFF8
+#define MCDE_ROTADD1B_ROTADD1(__x) \
+ MCDE_VAL2REG(MCDE_ROTADD1B, ROTADD1, __x)
+#define MCDE_ROTACONF 0x0000087C
+#define MCDE_ROTACONF_GROUPOFFSET 0x200
+#define MCDE_ROTACONF_ROTBURSTSIZE_SHIFT 0
+#define MCDE_ROTACONF_ROTBURSTSIZE_MASK 0x00000003
+#define MCDE_ROTACONF_ROTBURSTSIZE_1W 0
+#define MCDE_ROTACONF_ROTBURSTSIZE_2W 1
+#define MCDE_ROTACONF_ROTBURSTSIZE_4W 2
+#define MCDE_ROTACONF_ROTBURSTSIZE_8W 3
+#define MCDE_ROTACONF_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTBURSTSIZE, \
+ MCDE_ROTACONF_ROTBURSTSIZE_##__x)
+#define MCDE_ROTACONF_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTBURSTSIZE, __x)
+#define MCDE_ROTACONF_ROTBURSTSIZE_HW_SHIFT 2
+#define MCDE_ROTACONF_ROTBURSTSIZE_HW_MASK 0x00000004
+#define MCDE_ROTACONF_ROTBURSTSIZE_HW(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTBURSTSIZE_HW, __x)
+#define MCDE_ROTACONF_ROTDIR_SHIFT 3
+#define MCDE_ROTACONF_ROTDIR_MASK 0x00000008
+#define MCDE_ROTACONF_ROTDIR_CCW 0
+#define MCDE_ROTACONF_ROTDIR_CW 1
+#define MCDE_ROTACONF_ROTDIR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTDIR, MCDE_ROTACONF_ROTDIR_##__x)
+#define MCDE_ROTACONF_ROTDIR(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTDIR, __x)
+#define MCDE_ROTACONF_WR_MAXOUT_SHIFT 4
+#define MCDE_ROTACONF_WR_MAXOUT_MASK 0x00000030
+#define MCDE_ROTACONF_WR_MAXOUT_1_REQ 0
+#define MCDE_ROTACONF_WR_MAXOUT_2_REQ 1
+#define MCDE_ROTACONF_WR_MAXOUT_4_REQ 2
+#define MCDE_ROTACONF_WR_MAXOUT_8_REQ 3
+#define MCDE_ROTACONF_WR_MAXOUT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, WR_MAXOUT, MCDE_ROTACONF_WR_MAXOUT_##__x)
+#define MCDE_ROTACONF_WR_MAXOUT(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, WR_MAXOUT, __x)
+#define MCDE_ROTACONF_RD_MAXOUT_SHIFT 6
+#define MCDE_ROTACONF_RD_MAXOUT_MASK 0x000000C0
+#define MCDE_ROTACONF_RD_MAXOUT_1_REQ 0
+#define MCDE_ROTACONF_RD_MAXOUT_2_REQ 1
+#define MCDE_ROTACONF_RD_MAXOUT_4_REQ 2
+#define MCDE_ROTACONF_RD_MAXOUT_8_REQ 3
+#define MCDE_ROTACONF_RD_MAXOUT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, RD_MAXOUT, MCDE_ROTACONF_RD_MAXOUT_##__x)
+#define MCDE_ROTACONF_RD_MAXOUT(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, RD_MAXOUT, __x)
+#define MCDE_ROTACONF_STRIP_WIDTH_SHIFT 8
+#define MCDE_ROTACONF_STRIP_WIDTH_MASK 0x00007F00
+#define MCDE_ROTACONF_STRIP_WIDTH_2PIX 0
+#define MCDE_ROTACONF_STRIP_WIDTH_4PIX 1
+#define MCDE_ROTACONF_STRIP_WIDTH_8PIX 2
+#define MCDE_ROTACONF_STRIP_WIDTH_16PIX 3
+#define MCDE_ROTACONF_STRIP_WIDTH_32PIX 4
+#define MCDE_ROTACONF_STRIP_WIDTH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, STRIP_WIDTH, \
+ MCDE_ROTACONF_STRIP_WIDTH_##__x)
+#define MCDE_ROTACONF_STRIP_WIDTH(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, STRIP_WIDTH, __x)
+#define MCDE_ROTACONF_SINGLE_BUF_SHIFT 15
+#define MCDE_ROTACONF_SINGLE_BUF_MASK 0x00008000
+#define MCDE_ROTACONF_SINGLE_BUF(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, SINGLE_BUF, __x)
+#define MCDE_ROTACONF_WR_ROPC_SHIFT 16
+#define MCDE_ROTACONF_WR_ROPC_MASK 0x00FF0000
+#define MCDE_ROTACONF_WR_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, WR_ROPC, __x)
+#define MCDE_ROTACONF_RD_ROPC_SHIFT 24
+#define MCDE_ROTACONF_RD_ROPC_MASK 0xFF000000
+#define MCDE_ROTACONF_RD_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, RD_ROPC, __x)
+#define MCDE_ROTBCONF 0x00000A7C
+#define MCDE_ROTBCONF_ROTBURSTSIZE_SHIFT 0
+#define MCDE_ROTBCONF_ROTBURSTSIZE_MASK 0x00000003
+#define MCDE_ROTBCONF_ROTBURSTSIZE_1W 0
+#define MCDE_ROTBCONF_ROTBURSTSIZE_2W 1
+#define MCDE_ROTBCONF_ROTBURSTSIZE_4W 2
+#define MCDE_ROTBCONF_ROTBURSTSIZE_8W 3
+#define MCDE_ROTBCONF_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTBURSTSIZE, \
+ MCDE_ROTBCONF_ROTBURSTSIZE_##__x)
+#define MCDE_ROTBCONF_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTBURSTSIZE, __x)
+#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_SHIFT 2
+#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_MASK 0x00000004
+#define MCDE_ROTBCONF_ROTBURSTSIZE_HW(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTBURSTSIZE_HW, __x)
+#define MCDE_ROTBCONF_ROTDIR_SHIFT 3
+#define MCDE_ROTBCONF_ROTDIR_MASK 0x00000008
+#define MCDE_ROTBCONF_ROTDIR_CCW 0
+#define MCDE_ROTBCONF_ROTDIR_CW 1
+#define MCDE_ROTBCONF_ROTDIR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTDIR, MCDE_ROTBCONF_ROTDIR_##__x)
+#define MCDE_ROTBCONF_ROTDIR(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTDIR, __x)
+#define MCDE_ROTBCONF_WR_MAXOUT_SHIFT 4
+#define MCDE_ROTBCONF_WR_MAXOUT_MASK 0x00000030
+#define MCDE_ROTBCONF_WR_MAXOUT_1_REQ 0
+#define MCDE_ROTBCONF_WR_MAXOUT_2_REQ 1
+#define MCDE_ROTBCONF_WR_MAXOUT_4_REQ 2
+#define MCDE_ROTBCONF_WR_MAXOUT_8_REQ 3
+#define MCDE_ROTBCONF_WR_MAXOUT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, WR_MAXOUT, MCDE_ROTBCONF_WR_MAXOUT_##__x)
+#define MCDE_ROTBCONF_WR_MAXOUT(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, WR_MAXOUT, __x)
+#define MCDE_ROTBCONF_RD_MAXOUT_SHIFT 6
+#define MCDE_ROTBCONF_RD_MAXOUT_MASK 0x000000C0
+#define MCDE_ROTBCONF_RD_MAXOUT_1_REQ 0
+#define MCDE_ROTBCONF_RD_MAXOUT_2_REQ 1
+#define MCDE_ROTBCONF_RD_MAXOUT_4_REQ 2
+#define MCDE_ROTBCONF_RD_MAXOUT_8_REQ 3
+#define MCDE_ROTBCONF_RD_MAXOUT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, RD_MAXOUT, MCDE_ROTBCONF_RD_MAXOUT_##__x)
+#define MCDE_ROTBCONF_RD_MAXOUT(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, RD_MAXOUT, __x)
+#define MCDE_ROTBCONF_STRIP_WIDTH_SHIFT 8
+#define MCDE_ROTBCONF_STRIP_WIDTH_MASK 0x00007F00
+#define MCDE_ROTBCONF_STRIP_WIDTH_2PIX 0
+#define MCDE_ROTBCONF_STRIP_WIDTH_4PIX 1
+#define MCDE_ROTBCONF_STRIP_WIDTH_8PIX 2
+#define MCDE_ROTBCONF_STRIP_WIDTH_16PIX 3
+#define MCDE_ROTBCONF_STRIP_WIDTH_32PIX 4
+#define MCDE_ROTBCONF_STRIP_WIDTH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, STRIP_WIDTH, \
+ MCDE_ROTBCONF_STRIP_WIDTH_##__x)
+#define MCDE_ROTBCONF_STRIP_WIDTH(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, STRIP_WIDTH, __x)
+#define MCDE_ROTBCONF_SINGLE_BUF_SHIFT 15
+#define MCDE_ROTBCONF_SINGLE_BUF_MASK 0x00008000
+#define MCDE_ROTBCONF_SINGLE_BUF(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, SINGLE_BUF, __x)
+#define MCDE_ROTBCONF_WR_ROPC_SHIFT 16
+#define MCDE_ROTBCONF_WR_ROPC_MASK 0x00FF0000
+#define MCDE_ROTBCONF_WR_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, WR_ROPC, __x)
+#define MCDE_ROTBCONF_RD_ROPC_SHIFT 24
+#define MCDE_ROTBCONF_RD_ROPC_MASK 0xFF000000
+#define MCDE_ROTBCONF_RD_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, RD_ROPC, __x)
+#define MCDE_SYNCHCONFA 0x00000880
+#define MCDE_SYNCHCONFA_GROUPOFFSET 0x200
+#define MCDE_SYNCHCONFA_HWREQVEVENT_SHIFT 0
+#define MCDE_SYNCHCONFA_HWREQVEVENT_MASK 0x00000003
+#define MCDE_SYNCHCONFA_HWREQVEVENT_VSYNC 0
+#define MCDE_SYNCHCONFA_HWREQVEVENT_BACK_PORCH 1
+#define MCDE_SYNCHCONFA_HWREQVEVENT_ACTIVE_VIDEO 2
+#define MCDE_SYNCHCONFA_HWREQVEVENT_FRONT_PORCH 3
+#define MCDE_SYNCHCONFA_HWREQVEVENT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, HWREQVEVENT, \
+ MCDE_SYNCHCONFA_HWREQVEVENT_##__x)
+#define MCDE_SYNCHCONFA_HWREQVEVENT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, HWREQVEVENT, __x)
+#define MCDE_SYNCHCONFA_HWREQVCNT_SHIFT 2
+#define MCDE_SYNCHCONFA_HWREQVCNT_MASK 0x0000FFFC
+#define MCDE_SYNCHCONFA_HWREQVCNT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, HWREQVCNT, __x)
+#define MCDE_SYNCHCONFA_SWINTVEVENT_SHIFT 16
+#define MCDE_SYNCHCONFA_SWINTVEVENT_MASK 0x00030000
+#define MCDE_SYNCHCONFA_SWINTVEVENT_VSYNC 0
+#define MCDE_SYNCHCONFA_SWINTVEVENT_BACK_PORCH 1
+#define MCDE_SYNCHCONFA_SWINTVEVENT_ACTIVE_VIDEO 2
+#define MCDE_SYNCHCONFA_SWINTVEVENT_FRONT_PORCH 3
+#define MCDE_SYNCHCONFA_SWINTVEVENT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, SWINTVEVENT, \
+ MCDE_SYNCHCONFA_SWINTVEVENT_##__x)
+#define MCDE_SYNCHCONFA_SWINTVEVENT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, SWINTVEVENT, __x)
+#define MCDE_SYNCHCONFA_SWINTVCNT_SHIFT 18
+#define MCDE_SYNCHCONFA_SWINTVCNT_MASK 0xFFFC0000
+#define MCDE_SYNCHCONFA_SWINTVCNT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, SWINTVCNT, __x)
+#define MCDE_SYNCHCONFB 0x00000A80
+#define MCDE_SYNCHCONFB_HWREQVEVENT_SHIFT 0
+#define MCDE_SYNCHCONFB_HWREQVEVENT_MASK 0x00000003
+#define MCDE_SYNCHCONFB_HWREQVEVENT_VSYNC 0
+#define MCDE_SYNCHCONFB_HWREQVEVENT_BACK_PORCH 1
+#define MCDE_SYNCHCONFB_HWREQVEVENT_ACTIVE_VIDEO 2
+#define MCDE_SYNCHCONFB_HWREQVEVENT_FRONT_PORCH 3
+#define MCDE_SYNCHCONFB_HWREQVEVENT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, HWREQVEVENT, \
+ MCDE_SYNCHCONFB_HWREQVEVENT_##__x)
+#define MCDE_SYNCHCONFB_HWREQVEVENT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, HWREQVEVENT, __x)
+#define MCDE_SYNCHCONFB_HWREQVCNT_SHIFT 2
+#define MCDE_SYNCHCONFB_HWREQVCNT_MASK 0x0000FFFC
+#define MCDE_SYNCHCONFB_HWREQVCNT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, HWREQVCNT, __x)
+#define MCDE_SYNCHCONFB_SWINTVEVENT_SHIFT 16
+#define MCDE_SYNCHCONFB_SWINTVEVENT_MASK 0x00030000
+#define MCDE_SYNCHCONFB_SWINTVEVENT_VSYNC 0
+#define MCDE_SYNCHCONFB_SWINTVEVENT_BACK_PORCH 1
+#define MCDE_SYNCHCONFB_SWINTVEVENT_ACTIVE_VIDEO 2
+#define MCDE_SYNCHCONFB_SWINTVEVENT_FRONT_PORCH 3
+#define MCDE_SYNCHCONFB_SWINTVEVENT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, SWINTVEVENT, \
+ MCDE_SYNCHCONFB_SWINTVEVENT_##__x)
+#define MCDE_SYNCHCONFB_SWINTVEVENT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, SWINTVEVENT, __x)
+#define MCDE_SYNCHCONFB_SWINTVCNT_SHIFT 18
+#define MCDE_SYNCHCONFB_SWINTVCNT_MASK 0xFFFC0000
+#define MCDE_SYNCHCONFB_SWINTVCNT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, SWINTVCNT, __x)
+#define MCDE_CTRLA 0x00000884
+#define MCDE_CTRLA_GROUPOFFSET 0x200
+#define MCDE_CTRLA_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLA_FIFOWTRMRK_MASK 0x000003FF
+#define MCDE_CTRLA_FIFOWTRMRK(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FIFOWTRMRK, __x)
+#define MCDE_CTRLA_FIFOEMPTY_SHIFT 12
+#define MCDE_CTRLA_FIFOEMPTY_MASK 0x00001000
+#define MCDE_CTRLA_FIFOEMPTY(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FIFOEMPTY, __x)
+#define MCDE_CTRLA_FIFOFULL_SHIFT 13
+#define MCDE_CTRLA_FIFOFULL_MASK 0x00002000
+#define MCDE_CTRLA_FIFOFULL(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FIFOFULL, __x)
+#define MCDE_CTRLA_FORMID_SHIFT 16
+#define MCDE_CTRLA_FORMID_MASK 0x00070000
+#define MCDE_CTRLA_FORMID_DSI0VID 0
+#define MCDE_CTRLA_FORMID_DSI0CMD 1
+#define MCDE_CTRLA_FORMID_DSI1VID 2
+#define MCDE_CTRLA_FORMID_DSI1CMD 3
+#define MCDE_CTRLA_FORMID_DSI2VID 4
+#define MCDE_CTRLA_FORMID_DSI2CMD 5
+#define MCDE_CTRLA_FORMID_DPIA 0
+#define MCDE_CTRLA_FORMID_DPIB 1
+#define MCDE_CTRLA_FORMID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FORMID, MCDE_CTRLA_FORMID_##__x)
+#define MCDE_CTRLA_FORMID(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FORMID, __x)
+#define MCDE_CTRLA_FORMTYPE_SHIFT 20
+#define MCDE_CTRLA_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLA_FORMTYPE_DPITV 0
+#define MCDE_CTRLA_FORMTYPE_DBI 1
+#define MCDE_CTRLA_FORMTYPE_DSI 2
+#define MCDE_CTRLA_FORMTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FORMTYPE, MCDE_CTRLA_FORMTYPE_##__x)
+#define MCDE_CTRLA_FORMTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FORMTYPE, __x)
+#define MCDE_CTRLB 0x00000A84
+#define MCDE_CTRLB_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLB_FIFOWTRMRK_MASK 0x000003FF
+#define MCDE_CTRLB_FIFOWTRMRK(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FIFOWTRMRK, __x)
+#define MCDE_CTRLB_FIFOEMPTY_SHIFT 12
+#define MCDE_CTRLB_FIFOEMPTY_MASK 0x00001000
+#define MCDE_CTRLB_FIFOEMPTY(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FIFOEMPTY, __x)
+#define MCDE_CTRLB_FIFOFULL_SHIFT 13
+#define MCDE_CTRLB_FIFOFULL_MASK 0x00002000
+#define MCDE_CTRLB_FIFOFULL(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FIFOFULL, __x)
+#define MCDE_CTRLB_FORMID_SHIFT 16
+#define MCDE_CTRLB_FORMID_MASK 0x00070000
+#define MCDE_CTRLB_FORMID_DSI0VID 0
+#define MCDE_CTRLB_FORMID_DSI0CMD 1
+#define MCDE_CTRLB_FORMID_DSI1VID 2
+#define MCDE_CTRLB_FORMID_DSI1CMD 3
+#define MCDE_CTRLB_FORMID_DSI2VID 4
+#define MCDE_CTRLB_FORMID_DSI2CMD 5
+#define MCDE_CTRLB_FORMID_DPIA 0
+#define MCDE_CTRLB_FORMID_DPIB 1
+#define MCDE_CTRLB_FORMID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FORMID, MCDE_CTRLB_FORMID_##__x)
+#define MCDE_CTRLB_FORMID(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FORMID, __x)
+#define MCDE_CTRLB_FORMTYPE_SHIFT 20
+#define MCDE_CTRLB_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLB_FORMTYPE_DPITV 0
+#define MCDE_CTRLB_FORMTYPE_DBI 1
+#define MCDE_CTRLB_FORMTYPE_DSI 2
+#define MCDE_CTRLB_FORMTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FORMTYPE, MCDE_CTRLB_FORMTYPE_##__x)
+#define MCDE_CTRLB_FORMTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FORMTYPE, __x)
+#define MCDE_GAM0A 0x00000888
+#define MCDE_GAM0A_GROUPOFFSET 0x200
+#define MCDE_GAM0A_BLUE_SHIFT 0
+#define MCDE_GAM0A_BLUE_MASK 0x00FFFFFF
+#define MCDE_GAM0A_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_GAM0A, BLUE, __x)
+#define MCDE_GAM0B 0x00000A88
+#define MCDE_GAM0B_BLUE_SHIFT 0
+#define MCDE_GAM0B_BLUE_MASK 0x00FFFFFF
+#define MCDE_GAM0B_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_GAM0B, BLUE, __x)
+#define MCDE_GAM1A 0x0000088C
+#define MCDE_GAM1A_GROUPOFFSET 0x200
+#define MCDE_GAM1A_GREEN_SHIFT 0
+#define MCDE_GAM1A_GREEN_MASK 0x00FFFFFF
+#define MCDE_GAM1A_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_GAM1A, GREEN, __x)
+#define MCDE_GAM1B 0x00000A8C
+#define MCDE_GAM1B_GREEN_SHIFT 0
+#define MCDE_GAM1B_GREEN_MASK 0x00FFFFFF
+#define MCDE_GAM1B_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_GAM1B, GREEN, __x)
+#define MCDE_GAM2A 0x00000890
+#define MCDE_GAM2A_GROUPOFFSET 0x200
+#define MCDE_GAM2A_RED_SHIFT 0
+#define MCDE_GAM2A_RED_MASK 0x00FFFFFF
+#define MCDE_GAM2A_RED(__x) \
+ MCDE_VAL2REG(MCDE_GAM2A, RED, __x)
+#define MCDE_GAM2B 0x00000A90
+#define MCDE_GAM2B_RED_SHIFT 0
+#define MCDE_GAM2B_RED_MASK 0x00FFFFFF
+#define MCDE_GAM2B_RED(__x) \
+ MCDE_VAL2REG(MCDE_GAM2B, RED, __x)
+#define MCDE_OLEDCONV1A 0x00000894
+#define MCDE_OLEDCONV1A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV1A_ALPHA_RED_SHIFT 0
+#define MCDE_OLEDCONV1A_ALPHA_RED_MASK 0x00003FFF
+#define MCDE_OLEDCONV1A_ALPHA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV1A, ALPHA_RED, __x)
+#define MCDE_OLEDCONV1A_ALPHA_GREEN_SHIFT 16
+#define MCDE_OLEDCONV1A_ALPHA_GREEN_MASK 0x3FFF0000
+#define MCDE_OLEDCONV1A_ALPHA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV1A, ALPHA_GREEN, __x)
+#define MCDE_OLEDCONV1B 0x00000A94
+#define MCDE_OLEDCONV1B_ALPHA_RED_SHIFT 0
+#define MCDE_OLEDCONV1B_ALPHA_RED_MASK 0x00003FFF
+#define MCDE_OLEDCONV1B_ALPHA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV1B, ALPHA_RED, __x)
+#define MCDE_OLEDCONV1B_ALPHA_GREEN_SHIFT 16
+#define MCDE_OLEDCONV1B_ALPHA_GREEN_MASK 0x3FFF0000
+#define MCDE_OLEDCONV1B_ALPHA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV1B, ALPHA_GREEN, __x)
+#define MCDE_OLEDCONV2A 0x00000898
+#define MCDE_OLEDCONV2A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV2A_ALPHA_BLUE_SHIFT 0
+#define MCDE_OLEDCONV2A_ALPHA_BLUE_MASK 0x00003FFF
+#define MCDE_OLEDCONV2A_ALPHA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV2A, ALPHA_BLUE, __x)
+#define MCDE_OLEDCONV2A_BETA_RED_SHIFT 16
+#define MCDE_OLEDCONV2A_BETA_RED_MASK 0x3FFF0000
+#define MCDE_OLEDCONV2A_BETA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV2A, BETA_RED, __x)
+#define MCDE_OLEDCONV2B 0x00000A98
+#define MCDE_OLEDCONV2B_ALPHA_BLUE_SHIFT 0
+#define MCDE_OLEDCONV2B_ALPHA_BLUE_MASK 0x00003FFF
+#define MCDE_OLEDCONV2B_ALPHA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV2B, ALPHA_BLUE, __x)
+#define MCDE_OLEDCONV2B_BETA_RED_SHIFT 16
+#define MCDE_OLEDCONV2B_BETA_RED_MASK 0x3FFF0000
+#define MCDE_OLEDCONV2B_BETA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV2B, BETA_RED, __x)
+#define MCDE_OLEDCONV3A 0x0000089C
+#define MCDE_OLEDCONV3A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV3A_BETA_GREEN_SHIFT 0
+#define MCDE_OLEDCONV3A_BETA_GREEN_MASK 0x00003FFF
+#define MCDE_OLEDCONV3A_BETA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV3A, BETA_GREEN, __x)
+#define MCDE_OLEDCONV3A_BETA_BLUE_SHIFT 16
+#define MCDE_OLEDCONV3A_BETA_BLUE_MASK 0x3FFF0000
+#define MCDE_OLEDCONV3A_BETA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV3A, BETA_BLUE, __x)
+#define MCDE_OLEDCONV3B 0x00000A9C
+#define MCDE_OLEDCONV3B_BETA_GREEN_SHIFT 0
+#define MCDE_OLEDCONV3B_BETA_GREEN_MASK 0x00003FFF
+#define MCDE_OLEDCONV3B_BETA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV3B, BETA_GREEN, __x)
+#define MCDE_OLEDCONV3B_BETA_BLUE_SHIFT 16
+#define MCDE_OLEDCONV3B_BETA_BLUE_MASK 0x3FFF0000
+#define MCDE_OLEDCONV3B_BETA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV3B, BETA_BLUE, __x)
+#define MCDE_OLEDCONV4A 0x000008A0
+#define MCDE_OLEDCONV4A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV4A_GAMMA_RED_SHIFT 0
+#define MCDE_OLEDCONV4A_GAMMA_RED_MASK 0x00003FFF
+#define MCDE_OLEDCONV4A_GAMMA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV4A, GAMMA_RED, __x)
+#define MCDE_OLEDCONV4A_GAMMA_GREEN_SHIFT 16
+#define MCDE_OLEDCONV4A_GAMMA_GREEN_MASK 0x3FFF0000
+#define MCDE_OLEDCONV4A_GAMMA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV4A, GAMMA_GREEN, __x)
+#define MCDE_OLEDCONV4B 0x00000AA0
+#define MCDE_OLEDCONV4B_GAMMA_RED_SHIFT 0
+#define MCDE_OLEDCONV4B_GAMMA_RED_MASK 0x00003FFF
+#define MCDE_OLEDCONV4B_GAMMA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV4B, GAMMA_RED, __x)
+#define MCDE_OLEDCONV4B_GAMMA_GREEN_SHIFT 16
+#define MCDE_OLEDCONV4B_GAMMA_GREEN_MASK 0x3FFF0000
+#define MCDE_OLEDCONV4B_GAMMA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV4B, GAMMA_GREEN, __x)
+#define MCDE_OLEDCONV5A 0x000008A4
+#define MCDE_OLEDCONV5A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV5A_GAMMA_BLUE_SHIFT 0
+#define MCDE_OLEDCONV5A_GAMMA_BLUE_MASK 0x00003FFF
+#define MCDE_OLEDCONV5A_GAMMA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV5A, GAMMA_BLUE, __x)
+#define MCDE_OLEDCONV5A_OFF_RED_SHIFT 16
+#define MCDE_OLEDCONV5A_OFF_RED_MASK 0x3FFF0000
+#define MCDE_OLEDCONV5A_OFF_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV5A, OFF_RED, __x)
+#define MCDE_OLEDCONV5B 0x00000AA4
+#define MCDE_OLEDCONV5B_GAMMA_BLUE_SHIFT 0
+#define MCDE_OLEDCONV5B_GAMMA_BLUE_MASK 0x00003FFF
+#define MCDE_OLEDCONV5B_GAMMA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV5B, GAMMA_BLUE, __x)
+#define MCDE_OLEDCONV5B_OFF_RED_SHIFT 16
+#define MCDE_OLEDCONV5B_OFF_RED_MASK 0x3FFF0000
+#define MCDE_OLEDCONV5B_OFF_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV5B, OFF_RED, __x)
+#define MCDE_OLEDCONV6A 0x000008A8
+#define MCDE_OLEDCONV6A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV6A_OFF_GREEN_SHIFT 0
+#define MCDE_OLEDCONV6A_OFF_GREEN_MASK 0x00003FFF
+#define MCDE_OLEDCONV6A_OFF_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV6A, OFF_GREEN, __x)
+#define MCDE_OLEDCONV6A_OFF_BLUE_SHIFT 16
+#define MCDE_OLEDCONV6A_OFF_BLUE_MASK 0x3FFF0000
+#define MCDE_OLEDCONV6A_OFF_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV6A, OFF_BLUE, __x)
+#define MCDE_OLEDCONV6B 0x00000AA8
+#define MCDE_OLEDCONV6B_OFF_GREEN_SHIFT 0
+#define MCDE_OLEDCONV6B_OFF_GREEN_MASK 0x00003FFF
+#define MCDE_OLEDCONV6B_OFF_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV6B, OFF_GREEN, __x)
+#define MCDE_OLEDCONV6B_OFF_BLUE_SHIFT 16
+#define MCDE_OLEDCONV6B_OFF_BLUE_MASK 0x3FFF0000
+#define MCDE_OLEDCONV6B_OFF_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV6B, OFF_BLUE, __x)
+#define MCDE_CRC 0x00000C00
+#define MCDE_CRC_FLOEN_SHIFT 0
+#define MCDE_CRC_FLOEN_MASK 0x00000001
+#define MCDE_CRC_FLOEN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, FLOEN, __x)
+#define MCDE_CRC_POWEREN_SHIFT 1
+#define MCDE_CRC_POWEREN_MASK 0x00000002
+#define MCDE_CRC_POWEREN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, POWEREN, __x)
+#define MCDE_CRC_C1EN_SHIFT 2
+#define MCDE_CRC_C1EN_MASK 0x00000004
+#define MCDE_CRC_C1EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, C1EN, __x)
+#define MCDE_CRC_C2EN_SHIFT 3
+#define MCDE_CRC_C2EN_MASK 0x00000008
+#define MCDE_CRC_C2EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, C2EN, __x)
+#define MCDE_CRC_WMLVL1_SHIFT 4
+#define MCDE_CRC_WMLVL1_MASK 0x00000010
+#define MCDE_CRC_WMLVL1(__x) \
+ MCDE_VAL2REG(MCDE_CRC, WMLVL1, __x)
+#define MCDE_CRC_WMLVL2_SHIFT 5
+#define MCDE_CRC_WMLVL2_MASK 0x00000020
+#define MCDE_CRC_WMLVL2(__x) \
+ MCDE_VAL2REG(MCDE_CRC, WMLVL2, __x)
+#define MCDE_CRC_SYNCSEL_SHIFT 6
+#define MCDE_CRC_SYNCSEL_MASK 0x00000040
+#define MCDE_CRC_SYNCSEL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYNCSEL, __x)
+#define MCDE_CRC_SYCEN0_SHIFT 7
+#define MCDE_CRC_SYCEN0_MASK 0x00000080
+#define MCDE_CRC_SYCEN0(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYCEN0, __x)
+#define MCDE_CRC_SYCEN1_SHIFT 8
+#define MCDE_CRC_SYCEN1_MASK 0x00000100
+#define MCDE_CRC_SYCEN1(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYCEN1, __x)
+#define MCDE_CRC_SIZE1_SHIFT 9
+#define MCDE_CRC_SIZE1_MASK 0x00000200
+#define MCDE_CRC_SIZE1(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SIZE1, __x)
+#define MCDE_CRC_SIZE2_SHIFT 10
+#define MCDE_CRC_SIZE2_MASK 0x00000400
+#define MCDE_CRC_SIZE2(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SIZE2, __x)
+#define MCDE_CRC_INBAND1_SHIFT 11
+#define MCDE_CRC_INBAND1_MASK 0x00000800
+#define MCDE_CRC_INBAND1(__x) \
+ MCDE_VAL2REG(MCDE_CRC, INBAND1, __x)
+#define MCDE_CRC_INBAND2_SHIFT 12
+#define MCDE_CRC_INBAND2_MASK 0x00001000
+#define MCDE_CRC_INBAND2(__x) \
+ MCDE_VAL2REG(MCDE_CRC, INBAND2, __x)
+#define MCDE_CRC_CLKSEL_SHIFT 13
+#define MCDE_CRC_CLKSEL_MASK 0x00006000
+#define MCDE_CRC_CLKSEL_166MHz 0
+#define MCDE_CRC_CLKSEL_48MHz 1
+#define MCDE_CRC_CLKSEL_LCD 2
+#define MCDE_CRC_CLKSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CLKSEL, MCDE_CRC_CLKSEL_##__x)
+#define MCDE_CRC_CLKSEL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CLKSEL, __x)
+#define MCDE_CRC_YUVCONVC1EN_SHIFT 15
+#define MCDE_CRC_YUVCONVC1EN_MASK 0x00008000
+#define MCDE_CRC_YUVCONVC1EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, YUVCONVC1EN, __x)
+#define MCDE_CRC_CS1EN_SHIFT 16
+#define MCDE_CRC_CS1EN_MASK 0x00010000
+#define MCDE_CRC_CS1EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CS1EN, __x)
+#define MCDE_CRC_CS2EN_SHIFT 17
+#define MCDE_CRC_CS2EN_MASK 0x00020000
+#define MCDE_CRC_CS2EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CS2EN, __x)
+#define MCDE_CRC_RESEN_SHIFT 18
+#define MCDE_CRC_RESEN_MASK 0x00040000
+#define MCDE_CRC_RESEN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, RESEN, __x)
+#define MCDE_CRC_CS1POL_SHIFT 19
+#define MCDE_CRC_CS1POL_MASK 0x00080000
+#define MCDE_CRC_CS1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CS1POL, __x)
+#define MCDE_CRC_CS2POL_SHIFT 20
+#define MCDE_CRC_CS2POL_MASK 0x00100000
+#define MCDE_CRC_CS2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CS2POL, __x)
+#define MCDE_CRC_CD1POL_SHIFT 21
+#define MCDE_CRC_CD1POL_MASK 0x00200000
+#define MCDE_CRC_CD1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CD1POL, __x)
+#define MCDE_CRC_CD2POL_SHIFT 22
+#define MCDE_CRC_CD2POL_MASK 0x00400000
+#define MCDE_CRC_CD2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CD2POL, __x)
+#define MCDE_CRC_WR1POL_SHIFT 23
+#define MCDE_CRC_WR1POL_MASK 0x00800000
+#define MCDE_CRC_WR1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, WR1POL, __x)
+#define MCDE_CRC_WR2POL_SHIFT 24
+#define MCDE_CRC_WR2POL_MASK 0x01000000
+#define MCDE_CRC_WR2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, WR2POL, __x)
+#define MCDE_CRC_RD1POL_SHIFT 25
+#define MCDE_CRC_RD1POL_MASK 0x02000000
+#define MCDE_CRC_RD1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, RD1POL, __x)
+#define MCDE_CRC_RD2POL_SHIFT 26
+#define MCDE_CRC_RD2POL_MASK 0x04000000
+#define MCDE_CRC_RD2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, RD2POL, __x)
+#define MCDE_CRC_RES1POL_SHIFT 27
+#define MCDE_CRC_RES1POL_MASK 0x08000000
+#define MCDE_CRC_RES1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, RES1POL, __x)
+#define MCDE_CRC_RES2POL_SHIFT 28
+#define MCDE_CRC_RES2POL_MASK 0x10000000
+#define MCDE_CRC_RES2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, RES2POL, __x)
+#define MCDE_CRC_SYNCCTRL_SHIFT 29
+#define MCDE_CRC_SYNCCTRL_MASK 0x60000000
+#define MCDE_CRC_SYNCCTRL_OFF 0
+#define MCDE_CRC_SYNCCTRL_C0 1
+#define MCDE_CRC_SYNCCTRL_C1 2
+#define MCDE_CRC_SYNCCTRL_PING_PONG 3
+#define MCDE_CRC_SYNCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYNCCTRL, MCDE_CRC_SYNCCTRL_##__x)
+#define MCDE_CRC_SYNCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYNCCTRL, __x)
+#define MCDE_CRC_CLAMPC1EN_SHIFT 31
+#define MCDE_CRC_CLAMPC1EN_MASK 0x80000000
+#define MCDE_CRC_CLAMPC1EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CLAMPC1EN, __x)
+#define MCDE_PBCCRC0 0x00000C04
+#define MCDE_PBCCRC0_GROUPOFFSET 0x4
+#define MCDE_PBCCRC0_BSCM_SHIFT 0
+#define MCDE_PBCCRC0_BSCM_MASK 0x00000007
+#define MCDE_PBCCRC0_BSCM_1_8BIT 0
+#define MCDE_PBCCRC0_BSCM_2_8BIT 1
+#define MCDE_PBCCRC0_BSCM_3_8BIT 2
+#define MCDE_PBCCRC0_BSCM_1_16BIT 3
+#define MCDE_PBCCRC0_BSCM_2_16BIT 4
+#define MCDE_PBCCRC0_BSCM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BSCM, MCDE_PBCCRC0_BSCM_##__x)
+#define MCDE_PBCCRC0_BSCM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BSCM, __x)
+#define MCDE_PBCCRC0_BSDM_SHIFT 3
+#define MCDE_PBCCRC0_BSDM_MASK 0x00000038
+#define MCDE_PBCCRC0_BSDM_1_8BIT 0
+#define MCDE_PBCCRC0_BSDM_2_8BIT 1
+#define MCDE_PBCCRC0_BSDM_3_8BIT 2
+#define MCDE_PBCCRC0_BSDM_1_16BIT 3
+#define MCDE_PBCCRC0_BSDM_2_16BIT 4
+#define MCDE_PBCCRC0_BSDM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BSDM, MCDE_PBCCRC0_BSDM_##__x)
+#define MCDE_PBCCRC0_BSDM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BSDM, __x)
+#define MCDE_PBCCRC0_PDM_SHIFT 6
+#define MCDE_PBCCRC0_PDM_MASK 0x000000C0
+#define MCDE_PBCCRC0_PDM_NORMAL 0
+#define MCDE_PBCCRC0_PDM_16_TO_32 1
+#define MCDE_PBCCRC0_PDM_24_TO_32_RIGHT 2
+#define MCDE_PBCCRC0_PDM_24_TO_32_LEFT 3
+#define MCDE_PBCCRC0_PDM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, PDM, MCDE_PBCCRC0_PDM_##__x)
+#define MCDE_PBCCRC0_PDM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, PDM, __x)
+#define MCDE_PBCCRC0_PDCTRL_SHIFT 12
+#define MCDE_PBCCRC0_PDCTRL_MASK 0x00001000
+#define MCDE_PBCCRC0_PDCTRL(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, PDCTRL, __x)
+#define MCDE_PBCCRC0_BPP_SHIFT 13
+#define MCDE_PBCCRC0_BPP_MASK 0x0000E000
+#define MCDE_PBCCRC0_BPP_8BPP 0
+#define MCDE_PBCCRC0_BPP_12BPP 1
+#define MCDE_PBCCRC0_BPP_15BPP 2
+#define MCDE_PBCCRC0_BPP_16BPP 3
+#define MCDE_PBCCRC0_BPP_18BPP 4
+#define MCDE_PBCCRC0_BPP_24BPP 5
+#define MCDE_PBCCRC0_BPP(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BPP, __x)
+#define MCDE_PBCCRC1 0x00000C08
+#define MCDE_PBCCRC1_BSCM_SHIFT 0
+#define MCDE_PBCCRC1_BSCM_MASK 0x00000007
+#define MCDE_PBCCRC1_BSCM_1_8BIT 0
+#define MCDE_PBCCRC1_BSCM_2_8BIT 1
+#define MCDE_PBCCRC1_BSCM_3_8BIT 2
+#define MCDE_PBCCRC1_BSCM_1_16BIT 3
+#define MCDE_PBCCRC1_BSCM_2_16BIT 4
+#define MCDE_PBCCRC1_BSCM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BSCM, MCDE_PBCCRC1_BSCM_##__x)
+#define MCDE_PBCCRC1_BSCM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BSCM, __x)
+#define MCDE_PBCCRC1_BSDM_SHIFT 3
+#define MCDE_PBCCRC1_BSDM_MASK 0x00000038
+#define MCDE_PBCCRC1_BSDM_1_8BIT 0
+#define MCDE_PBCCRC1_BSDM_2_8BIT 1
+#define MCDE_PBCCRC1_BSDM_3_8BIT 2
+#define MCDE_PBCCRC1_BSDM_1_16BIT 3
+#define MCDE_PBCCRC1_BSDM_2_16BIT 4
+#define MCDE_PBCCRC1_BSDM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BSDM, MCDE_PBCCRC1_BSDM_##__x)
+#define MCDE_PBCCRC1_BSDM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BSDM, __x)
+#define MCDE_PBCCRC1_PDM_SHIFT 6
+#define MCDE_PBCCRC1_PDM_MASK 0x000000C0
+#define MCDE_PBCCRC1_PDM_NORMAL 0
+#define MCDE_PBCCRC1_PDM_16_TO_32 1
+#define MCDE_PBCCRC1_PDM_24_TO_32_RIGHT 2
+#define MCDE_PBCCRC1_PDM_24_TO_32_LEFT 3
+#define MCDE_PBCCRC1_PDM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, PDM, MCDE_PBCCRC1_PDM_##__x)
+#define MCDE_PBCCRC1_PDM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, PDM, __x)
+#define MCDE_PBCCRC1_PDCTRL_SHIFT 12
+#define MCDE_PBCCRC1_PDCTRL_MASK 0x00001000
+#define MCDE_PBCCRC1_PDCTRL(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, PDCTRL, __x)
+#define MCDE_PBCCRC1_BPP_SHIFT 13
+#define MCDE_PBCCRC1_BPP_MASK 0x0000E000
+#define MCDE_PBCCRC1_BPP_8BPP 0
+#define MCDE_PBCCRC1_BPP_12BPP 1
+#define MCDE_PBCCRC1_BPP_15BPP 2
+#define MCDE_PBCCRC1_BPP_16BPP 3
+#define MCDE_PBCCRC1_BPP_18BPP 4
+#define MCDE_PBCCRC1_BPP_24BPP 5
+#define MCDE_PBCCRC1_BPP(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BPP, __x)
+#define MCDE_PBCBMRC00 0x00000C0C
+#define MCDE_PBCBMRC00_GROUPOFFSET 0x4
+#define MCDE_PBCBMRC00_MUXI_SHIFT 0
+#define MCDE_PBCBMRC00_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC00_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC00, MUXI, __x)
+#define MCDE_PBCBMRC01 0x00000C10
+#define MCDE_PBCBMRC01_MUXI_SHIFT 0
+#define MCDE_PBCBMRC01_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC01_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC01, MUXI, __x)
+#define MCDE_PBCBMRC02 0x00000C14
+#define MCDE_PBCBMRC02_MUXI_SHIFT 0
+#define MCDE_PBCBMRC02_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC02_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC02, MUXI, __x)
+#define MCDE_PBCBMRC03 0x00000C18
+#define MCDE_PBCBMRC03_MUXI_SHIFT 0
+#define MCDE_PBCBMRC03_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC03_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC03, MUXI, __x)
+#define MCDE_PBCBMRC04 0x00000C1C
+#define MCDE_PBCBMRC04_MUXI_SHIFT 0
+#define MCDE_PBCBMRC04_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC04_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC04, MUXI, __x)
+#define MCDE_PBCBMRC10 0x00000C20
+#define MCDE_PBCBMRC10_MUXI_SHIFT 0
+#define MCDE_PBCBMRC10_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC10_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC10, MUXI, __x)
+#define MCDE_PBCBMRC11 0x00000C24
+#define MCDE_PBCBMRC11_MUXI_SHIFT 0
+#define MCDE_PBCBMRC11_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC11_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC11, MUXI, __x)
+#define MCDE_PBCBMRC12 0x00000C28
+#define MCDE_PBCBMRC12_MUXI_SHIFT 0
+#define MCDE_PBCBMRC12_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC12_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC12, MUXI, __x)
+#define MCDE_PBCBMRC13 0x00000C2C
+#define MCDE_PBCBMRC13_MUXI_SHIFT 0
+#define MCDE_PBCBMRC13_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC13_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC13, MUXI, __x)
+#define MCDE_PBCBMRC14 0x00000C30
+#define MCDE_PBCBMRC14_MUXI_SHIFT 0
+#define MCDE_PBCBMRC14_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC14_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC14, MUXI, __x)
+#define MCDE_PBCBCRC00 0x00000C34
+#define MCDE_PBCBCRC00_GROUPOFFSET 0x4
+#define MCDE_PBCBCRC00_CTLI_SHIFT 0
+#define MCDE_PBCBCRC00_CTLI_MASK 0xFFFFFFFF
+#define MCDE_PBCBCRC00_CTLI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBCRC00, CTLI, __x)
+#define MCDE_PBCBCRC10 0x00000C38
+#define MCDE_PBCBCRC10_CTLI_SHIFT 0
+#define MCDE_PBCBCRC10_CTLI_MASK 0xFFFFFFFF
+#define MCDE_PBCBCRC10_CTLI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBCRC10, CTLI, __x)
+#define MCDE_PBCBCRC01 0x00000C48
+#define MCDE_PBCBCRC01_GROUPOFFSET 0x4
+#define MCDE_PBCBCRC01_CTLI_SHIFT 0
+#define MCDE_PBCBCRC01_CTLI_MASK 0xFFFFFFFF
+#define MCDE_PBCBCRC01_CTLI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBCRC01, CTLI, __x)
+#define MCDE_PBCBCRC11 0x00000C4C
+#define MCDE_PBCBCRC11_CTLI_SHIFT 0
+#define MCDE_PBCBCRC11_CTLI_MASK 0xFFFFFFFF
+#define MCDE_PBCBCRC11_CTLI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBCRC11, CTLI, __x)
+#define MCDE_VSCRC0 0x00000C5C
+#define MCDE_VSCRC0_GROUPOFFSET 0x4
+#define MCDE_VSCRC0_VSPMIN_SHIFT 0
+#define MCDE_VSCRC0_VSPMIN_MASK 0x00000FFF
+#define MCDE_VSCRC0_VSPMIN(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPMIN, __x)
+#define MCDE_VSCRC0_VSPMAX_SHIFT 12
+#define MCDE_VSCRC0_VSPMAX_MASK 0x00FFF000
+#define MCDE_VSCRC0_VSPMAX(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPMAX, __x)
+#define MCDE_VSCRC0_VSPDIV_SHIFT 24
+#define MCDE_VSCRC0_VSPDIV_MASK 0x07000000
+#define MCDE_VSCRC0_VSPDIV(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPDIV, __x)
+#define MCDE_VSCRC0_VSPOL_SHIFT 27
+#define MCDE_VSCRC0_VSPOL_MASK 0x08000000
+#define MCDE_VSCRC0_VSPOL_ACTIVE_HIGH 0
+#define MCDE_VSCRC0_VSPOL_ACTIVE_LOW 1
+#define MCDE_VSCRC0_VSPOL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPOL, MCDE_VSCRC0_VSPOL_##__x)
+#define MCDE_VSCRC0_VSPOL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPOL, __x)
+#define MCDE_VSCRC0_VSSEL_SHIFT 28
+#define MCDE_VSCRC0_VSSEL_MASK 0x10000000
+#define MCDE_VSCRC0_VSSEL_VSYNC 0
+#define MCDE_VSCRC0_VSSEL_HSYNC 1
+#define MCDE_VSCRC0_VSSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSSEL, MCDE_VSCRC0_VSSEL_##__x)
+#define MCDE_VSCRC0_VSSEL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSSEL, __x)
+#define MCDE_VSCRC0_VSDBL_SHIFT 29
+#define MCDE_VSCRC0_VSDBL_MASK 0xE0000000
+#define MCDE_VSCRC0_VSDBL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSDBL, __x)
+#define MCDE_VSCRC1 0x00000C60
+#define MCDE_VSCRC1_VSPMIN_SHIFT 0
+#define MCDE_VSCRC1_VSPMIN_MASK 0x00000FFF
+#define MCDE_VSCRC1_VSPMIN(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPMIN, __x)
+#define MCDE_VSCRC1_VSPMAX_SHIFT 12
+#define MCDE_VSCRC1_VSPMAX_MASK 0x00FFF000
+#define MCDE_VSCRC1_VSPMAX(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPMAX, __x)
+#define MCDE_VSCRC1_VSPDIV_SHIFT 24
+#define MCDE_VSCRC1_VSPDIV_MASK 0x07000000
+#define MCDE_VSCRC1_VSPDIV(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPDIV, __x)
+#define MCDE_VSCRC1_VSPOL_SHIFT 27
+#define MCDE_VSCRC1_VSPOL_MASK 0x08000000
+#define MCDE_VSCRC1_VSPOL_ACTIVE_HIGH 0
+#define MCDE_VSCRC1_VSPOL_ACTIVE_LOW 1
+#define MCDE_VSCRC1_VSPOL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPOL, MCDE_VSCRC1_VSPOL_##__x)
+#define MCDE_VSCRC1_VSPOL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPOL, __x)
+#define MCDE_VSCRC1_VSSEL_SHIFT 28
+#define MCDE_VSCRC1_VSSEL_MASK 0x10000000
+#define MCDE_VSCRC1_VSSEL_VSYNC 0
+#define MCDE_VSCRC1_VSSEL_HSYNC 1
+#define MCDE_VSCRC1_VSSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSSEL, MCDE_VSCRC1_VSSEL_##__x)
+#define MCDE_VSCRC1_VSSEL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSSEL, __x)
+#define MCDE_VSCRC1_VSDBL_SHIFT 29
+#define MCDE_VSCRC1_VSDBL_MASK 0xE0000000
+#define MCDE_VSCRC1_VSDBL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSDBL, __x)
+#define MCDE_SCTRC 0x00000C64
+#define MCDE_SCTRC_SYNCDELC0_SHIFT 0
+#define MCDE_SCTRC_SYNCDELC0_MASK 0x000000FF
+#define MCDE_SCTRC_SYNCDELC0(__x) \
+ MCDE_VAL2REG(MCDE_SCTRC, SYNCDELC0, __x)
+#define MCDE_SCTRC_SYNCDELC1_SHIFT 8
+#define MCDE_SCTRC_SYNCDELC1_MASK 0x0000FF00
+#define MCDE_SCTRC_SYNCDELC1(__x) \
+ MCDE_VAL2REG(MCDE_SCTRC, SYNCDELC1, __x)
+#define MCDE_SCTRC_TRDELC_SHIFT 16
+#define MCDE_SCTRC_TRDELC_MASK 0x0FFF0000
+#define MCDE_SCTRC_TRDELC(__x) \
+ MCDE_VAL2REG(MCDE_SCTRC, TRDELC, __x)
+#define MCDE_SCSRC 0x00000C68
+#define MCDE_SCSRC_VSTAC0_SHIFT 0
+#define MCDE_SCSRC_VSTAC0_MASK 0x00000001
+#define MCDE_SCSRC_VSTAC0(__x) \
+ MCDE_VAL2REG(MCDE_SCSRC, VSTAC0, __x)
+#define MCDE_SCSRC_VSTAC1_SHIFT 1
+#define MCDE_SCSRC_VSTAC1_MASK 0x00000002
+#define MCDE_SCSRC_VSTAC1(__x) \
+ MCDE_VAL2REG(MCDE_SCSRC, VSTAC1, __x)
+#define MCDE_BCNR0 0x00000C6C
+#define MCDE_BCNR0_GROUPOFFSET 0x4
+#define MCDE_BCNR0_BCN_SHIFT 0
+#define MCDE_BCNR0_BCN_MASK 0x000000FF
+#define MCDE_BCNR0_BCN(__x) \
+ MCDE_VAL2REG(MCDE_BCNR0, BCN, __x)
+#define MCDE_BCNR1 0x00000C70
+#define MCDE_BCNR1_BCN_SHIFT 0
+#define MCDE_BCNR1_BCN_MASK 0x000000FF
+#define MCDE_BCNR1_BCN(__x) \
+ MCDE_VAL2REG(MCDE_BCNR1, BCN, __x)
+#define MCDE_CSCDTR0 0x00000C74
+#define MCDE_CSCDTR0_GROUPOFFSET 0x4
+#define MCDE_CSCDTR0_CSACT_SHIFT 0
+#define MCDE_CSCDTR0_CSACT_MASK 0x000000FF
+#define MCDE_CSCDTR0_CSACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR0, CSACT, __x)
+#define MCDE_CSCDTR0_CSDEACT_SHIFT 8
+#define MCDE_CSCDTR0_CSDEACT_MASK 0x0000FF00
+#define MCDE_CSCDTR0_CSDEACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR0, CSDEACT, __x)
+#define MCDE_CSCDTR0_CDACT_SHIFT 16
+#define MCDE_CSCDTR0_CDACT_MASK 0x00FF0000
+#define MCDE_CSCDTR0_CDACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR0, CDACT, __x)
+#define MCDE_CSCDTR0_CDDEACT_SHIFT 24
+#define MCDE_CSCDTR0_CDDEACT_MASK 0xFF000000
+#define MCDE_CSCDTR0_CDDEACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR0, CDDEACT, __x)
+#define MCDE_CSCDTR1 0x00000C78
+#define MCDE_CSCDTR1_CSACT_SHIFT 0
+#define MCDE_CSCDTR1_CSACT_MASK 0x000000FF
+#define MCDE_CSCDTR1_CSACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR1, CSACT, __x)
+#define MCDE_CSCDTR1_CSDEACT_SHIFT 8
+#define MCDE_CSCDTR1_CSDEACT_MASK 0x0000FF00
+#define MCDE_CSCDTR1_CSDEACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR1, CSDEACT, __x)
+#define MCDE_CSCDTR1_CDACT_SHIFT 16
+#define MCDE_CSCDTR1_CDACT_MASK 0x00FF0000
+#define MCDE_CSCDTR1_CDACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR1, CDACT, __x)
+#define MCDE_CSCDTR1_CDDEACT_SHIFT 24
+#define MCDE_CSCDTR1_CDDEACT_MASK 0xFF000000
+#define MCDE_CSCDTR1_CDDEACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR1, CDDEACT, __x)
+#define MCDE_RDWRTR0 0x00000C7C
+#define MCDE_RDWRTR0_GROUPOFFSET 0x4
+#define MCDE_RDWRTR0_RWACT_SHIFT 0
+#define MCDE_RDWRTR0_RWACT_MASK 0x000000FF
+#define MCDE_RDWRTR0_RWACT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR0, RWACT, __x)
+#define MCDE_RDWRTR0_RWDEACT_SHIFT 8
+#define MCDE_RDWRTR0_RWDEACT_MASK 0x0000FF00
+#define MCDE_RDWRTR0_RWDEACT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR0, RWDEACT, __x)
+#define MCDE_RDWRTR0_MOTINT_SHIFT 16
+#define MCDE_RDWRTR0_MOTINT_MASK 0x00010000
+#define MCDE_RDWRTR0_MOTINT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR0, MOTINT, __x)
+#define MCDE_RDWRTR1 0x00000C80
+#define MCDE_RDWRTR1_RWACT_SHIFT 0
+#define MCDE_RDWRTR1_RWACT_MASK 0x000000FF
+#define MCDE_RDWRTR1_RWACT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR1, RWACT, __x)
+#define MCDE_RDWRTR1_RWDEACT_SHIFT 8
+#define MCDE_RDWRTR1_RWDEACT_MASK 0x0000FF00
+#define MCDE_RDWRTR1_RWDEACT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR1, RWDEACT, __x)
+#define MCDE_RDWRTR1_MOTINT_SHIFT 16
+#define MCDE_RDWRTR1_MOTINT_MASK 0x00010000
+#define MCDE_RDWRTR1_MOTINT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR1, MOTINT, __x)
+#define MCDE_DOTR0 0x00000C84
+#define MCDE_DOTR0_GROUPOFFSET 0x4
+#define MCDE_DOTR0_DOACT_SHIFT 0
+#define MCDE_DOTR0_DOACT_MASK 0x000000FF
+#define MCDE_DOTR0_DOACT(__x) \
+ MCDE_VAL2REG(MCDE_DOTR0, DOACT, __x)
+#define MCDE_DOTR0_DODEACT_SHIFT 8
+#define MCDE_DOTR0_DODEACT_MASK 0x0000FF00
+#define MCDE_DOTR0_DODEACT(__x) \
+ MCDE_VAL2REG(MCDE_DOTR0, DODEACT, __x)
+#define MCDE_DOTR1 0x00000C88
+#define MCDE_DOTR1_DOACT_SHIFT 0
+#define MCDE_DOTR1_DOACT_MASK 0x000000FF
+#define MCDE_DOTR1_DOACT(__x) \
+ MCDE_VAL2REG(MCDE_DOTR1, DOACT, __x)
+#define MCDE_DOTR1_DODEACT_SHIFT 8
+#define MCDE_DOTR1_DODEACT_MASK 0x0000FF00
+#define MCDE_DOTR1_DODEACT(__x) \
+ MCDE_VAL2REG(MCDE_DOTR1, DODEACT, __x)
+#define MCDE_WCMDC0_V1 0x00000C8C
+#define MCDE_WCMDC0_V1_GROUPOFFSET 0x4
+#define MCDE_WCMDC0_V1_COMMANDVALUE_SHIFT 0
+#define MCDE_WCMDC0_V1_COMMANDVALUE_MASK 0x00FFFFFF
+#define MCDE_WCMDC0_V1_COMMANDVALUE(__x) \
+ MCDE_VAL2REG(MCDE_WCMDC0_V1, COMMANDVALUE, __x)
+#define MCDE_WCMDC1_V1 0x00000C90
+#define MCDE_WCMDC1_V1_COMMANDVALUE_SHIFT 0
+#define MCDE_WCMDC1_V1_COMMANDVALUE_MASK 0x00FFFFFF
+#define MCDE_WCMDC1_V1_COMMANDVALUE(__x) \
+ MCDE_VAL2REG(MCDE_WCMDC1_V1, COMMANDVALUE, __x)
+#define MCDE_WDATADC0 0x00000C94
+#define MCDE_WDATADC0_GROUPOFFSET 0x4
+#define MCDE_WDATADC0_DATAVALUE_SHIFT 0
+#define MCDE_WDATADC0_DATAVALUE_MASK 0x00FFFFFF
+#define MCDE_WDATADC0_DATAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_WDATADC0, DATAVALUE, __x)
+#define MCDE_WDATADC1 0x00000C98
+#define MCDE_WDATADC1_DATAVALUE_SHIFT 0
+#define MCDE_WDATADC1_DATAVALUE_MASK 0x00FFFFFF
+#define MCDE_WDATADC1_DATAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_WDATADC1, DATAVALUE, __x)
+#define MCDE_RDATADC0 0x00000C9C
+#define MCDE_RDATADC0_GROUPOFFSET 0x4
+#define MCDE_RDATADC0_DATAREADFROMDISPLAYMODULE_SHIFT 0
+#define MCDE_RDATADC0_DATAREADFROMDISPLAYMODULE_MASK 0x0000FFFF
+#define MCDE_RDATADC0_DATAREADFROMDISPLAYMODULE(__x) \
+ MCDE_VAL2REG(MCDE_RDATADC0, DATAREADFROMDISPLAYMODULE, __x)
+#define MCDE_RDATADC0_STARTREAD_SHIFT 16
+#define MCDE_RDATADC0_STARTREAD_MASK 0x00010000
+#define MCDE_RDATADC0_STARTREAD(__x) \
+ MCDE_VAL2REG(MCDE_RDATADC0, STARTREAD, __x)
+#define MCDE_RDATADC1 0x00000CA0
+#define MCDE_RDATADC1_DATAREADFROMDISPLAYMODULE_SHIFT 0
+#define MCDE_RDATADC1_DATAREADFROMDISPLAYMODULE_MASK 0x0000FFFF
+#define MCDE_RDATADC1_DATAREADFROMDISPLAYMODULE(__x) \
+ MCDE_VAL2REG(MCDE_RDATADC1, DATAREADFROMDISPLAYMODULE, __x)
+#define MCDE_RDATADC1_STARTREAD_SHIFT 16
+#define MCDE_RDATADC1_STARTREAD_MASK 0x00010000
+#define MCDE_RDATADC1_STARTREAD(__x) \
+ MCDE_VAL2REG(MCDE_RDATADC1, STARTREAD, __x)
+#define MCDE_STATC_V1 0x00000CA4
+#define MCDE_STATC_V1_STATBUSY0_SHIFT 0
+#define MCDE_STATC_V1_STATBUSY0_MASK 0x00000001
+#define MCDE_STATC_V1_STATBUSY0(__x) \
+ MCDE_VAL2REG(MCDE_STATC_V1, STATBUSY0, __x)
+#define MCDE_STATC_V1_FIFOEMPTY0_SHIFT 1
+#define MCDE_STATC_V1_FIFOEMPTY0_MASK 0x00000002
+#define MCDE_STATC_V1_FIFOEMPTY0(__x) \
+ MCDE_VAL2REG(MCDE_STATC_V1, FIFOEMPTY0, __x)
+#define MCDE_STATC_V1_FIFOFULL0_SHIFT 2
+#define MCDE_STATC_V1_FIFOFULL0_MASK 0x00000004
+#define MCDE_STATC_V1_FIFOFULL0(__x) \
+ MCDE_VAL2REG(MCDE_STATC_V1, FIFOFULL0, __x)
+#define MCDE_STATC_V1_FIFOCMDEMPTY0_SHIFT 3
+#define MCDE_STATC_V1_FIFOCMDEMPTY0_MASK 0x00000008
+#define MCDE_STATC_V1_FIFOCMDEMPTY0(__x) \
+ MCDE_VAL2REG(MCDE_STATC_V1, FIFOCMDEMPTY0, __x)
+#define MCDE_STATC_V1_FIFOCMDFULL0_SHIFT 4
+#define MCDE_STATC_V1_FIFOCMDFULL0_MASK 0x00000010
+#define MCDE_STATC_V1_FIFOCMDFULL0(__x) \
+ MCDE_VAL2REG(MCDE_STATC_V1, FIFOCMDFULL0, __x)
+#define MCDE_STATC_V1_STATBUSY1_SHIFT 5
+#define MCDE_STATC_V1_STATBUSY1_MASK 0x00000020
+#define MCDE_STATC_V1_STATBUSY1(__x) \
+ MCDE_VAL2REG(MCDE_STATC_V1, STATBUSY1, __x)
+#define MCDE_STATC_V1_FIFOEMPTY1_SHIFT 6
+#define MCDE_STATC_V1_FIFOEMPTY1_MASK 0x00000040
+#define MCDE_STATC_V1_FIFOEMPTY1(__x) \
+ MCDE_VAL2REG(MCDE_STATC_V1, FIFOEMPTY1, __x)
+#define MCDE_STATC_V1_FIFOFULL1_SHIFT 7
+#define MCDE_STATC_V1_FIFOFULL1_MASK 0x00000080
+#define MCDE_STATC_V1_FIFOFULL1(__x) \
+ MCDE_VAL2REG(MCDE_STATC_V1, FIFOFULL1, __x)
+#define MCDE_STATC_V1_FIFOCMDEMPTY1_SHIFT 8
+#define MCDE_STATC_V1_FIFOCMDEMPTY1_MASK 0x00000100
+#define MCDE_STATC_V1_FIFOCMDEMPTY1(__x) \
+ MCDE_VAL2REG(MCDE_STATC_V1, FIFOCMDEMPTY1, __x)
+#define MCDE_STATC_V1_FIFOCMDFULL1_SHIFT 9
+#define MCDE_STATC_V1_FIFOCMDFULL1_MASK 0x00000200
+#define MCDE_STATC_V1_FIFOCMDFULL1(__x) \
+ MCDE_VAL2REG(MCDE_STATC_V1, FIFOCMDFULL1, __x)
+#define MCDE_CTRLC0 0x00000CA8
+#define MCDE_CTRLC0_GROUPOFFSET 0x4
+#define MCDE_CTRLC0_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLC0_FIFOWTRMRK_MASK 0x000000FF
+#define MCDE_CTRLC0_FIFOWTRMRK(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FIFOWTRMRK, __x)
+#define MCDE_CTRLC0_FIFOEMPTY_SHIFT 12
+#define MCDE_CTRLC0_FIFOEMPTY_MASK 0x00001000
+#define MCDE_CTRLC0_FIFOEMPTY(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FIFOEMPTY, __x)
+#define MCDE_CTRLC0_FIFOFULL_SHIFT 13
+#define MCDE_CTRLC0_FIFOFULL_MASK 0x00002000
+#define MCDE_CTRLC0_FIFOFULL(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FIFOFULL, __x)
+#define MCDE_CTRLC0_FORMID_SHIFT 16
+#define MCDE_CTRLC0_FORMID_MASK 0x00070000
+#define MCDE_CTRLC0_FORMID_DSI0VID 0
+#define MCDE_CTRLC0_FORMID_DSI0CMD 1
+#define MCDE_CTRLC0_FORMID_DSI1VID 2
+#define MCDE_CTRLC0_FORMID_DSI1CMD 3
+#define MCDE_CTRLC0_FORMID_DSI2VID 4
+#define MCDE_CTRLC0_FORMID_DSI2CMD 5
+#define MCDE_CTRLC0_FORMID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FORMID, MCDE_CTRLC0_FORMID_##__x)
+#define MCDE_CTRLC0_FORMID(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FORMID, __x)
+#define MCDE_CTRLC0_FORMTYPE_SHIFT 20
+#define MCDE_CTRLC0_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLC0_FORMTYPE_DPITV 0
+#define MCDE_CTRLC0_FORMTYPE_DBI 1
+#define MCDE_CTRLC0_FORMTYPE_DSI 2
+#define MCDE_CTRLC0_FORMTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FORMTYPE, MCDE_CTRLC0_FORMTYPE_##__x)
+#define MCDE_CTRLC0_FORMTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FORMTYPE, __x)
+#define MCDE_CTRLC1 0x00000CAC
+#define MCDE_CTRLC1_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLC1_FIFOWTRMRK_MASK 0x000000FF
+#define MCDE_CTRLC1_FIFOWTRMRK(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FIFOWTRMRK, __x)
+#define MCDE_CTRLC1_FIFOEMPTY_SHIFT 12
+#define MCDE_CTRLC1_FIFOEMPTY_MASK 0x00001000
+#define MCDE_CTRLC1_FIFOEMPTY(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FIFOEMPTY, __x)
+#define MCDE_CTRLC1_FIFOFULL_SHIFT 13
+#define MCDE_CTRLC1_FIFOFULL_MASK 0x00002000
+#define MCDE_CTRLC1_FIFOFULL(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FIFOFULL, __x)
+#define MCDE_CTRLC1_FORMID_SHIFT 16
+#define MCDE_CTRLC1_FORMID_MASK 0x00070000
+#define MCDE_CTRLC1_FORMID_DSI0VID 0
+#define MCDE_CTRLC1_FORMID_DSI0CMD 1
+#define MCDE_CTRLC1_FORMID_DSI1VID 2
+#define MCDE_CTRLC1_FORMID_DSI1CMD 3
+#define MCDE_CTRLC1_FORMID_DSI2VID 4
+#define MCDE_CTRLC1_FORMID_DSI2CMD 5
+#define MCDE_CTRLC1_FORMID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FORMID, MCDE_CTRLC1_FORMID_##__x)
+#define MCDE_CTRLC1_FORMID(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FORMID, __x)
+#define MCDE_CTRLC1_FORMTYPE_SHIFT 20
+#define MCDE_CTRLC1_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLC1_FORMTYPE_DPITV 0
+#define MCDE_CTRLC1_FORMTYPE_DBI 1
+#define MCDE_CTRLC1_FORMTYPE_DSI 2
+#define MCDE_CTRLC1_FORMTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FORMTYPE, MCDE_CTRLC1_FORMTYPE_##__x)
+#define MCDE_CTRLC1_FORMTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FORMTYPE, __x)
+#define MCDE_DSIVID0CONF0 0x00000E00
+#define MCDE_DSIVID0CONF0_GROUPOFFSET 0x20
+#define MCDE_DSIVID0CONF0_BLANKING_SHIFT 0
+#define MCDE_DSIVID0CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSIVID0CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, BLANKING, __x)
+#define MCDE_DSIVID0CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSIVID0CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSIVID0CONF0_VID_MODE_CMD 0
+#define MCDE_DSIVID0CONF0_VID_MODE_VID 1
+#define MCDE_DSIVID0CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, VID_MODE, \
+ MCDE_DSIVID0CONF0_VID_MODE_##__x)
+#define MCDE_DSIVID0CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, VID_MODE, __x)
+#define MCDE_DSIVID0CONF0_CMD8_SHIFT 13
+#define MCDE_DSIVID0CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSIVID0CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, CMD8, __x)
+#define MCDE_DSIVID0CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSIVID0CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSIVID0CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, BIT_SWAP, __x)
+#define MCDE_DSIVID0CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSIVID0CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSIVID0CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, BYTE_SWAP, __x)
+#define MCDE_DSIVID0CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSIVID0CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSIVID0CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSIVID0CONF0_PACKING_SHIFT 20
+#define MCDE_DSIVID0CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSIVID0CONF0_PACKING_RGB565 0
+#define MCDE_DSIVID0CONF0_PACKING_RGB666 1
+#define MCDE_DSIVID0CONF0_PACKING_RGB888 2
+#define MCDE_DSIVID0CONF0_PACKING_BGR888 3
+#define MCDE_DSIVID0CONF0_PACKING_HDTV 4
+#define MCDE_DSIVID0CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, PACKING, \
+ MCDE_DSIVID0CONF0_PACKING_##__x)
+#define MCDE_DSIVID0CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, PACKING, __x)
+#define MCDE_DSICMD0CONF0 0x00000E20
+#define MCDE_DSICMD0CONF0_BLANKING_SHIFT 0
+#define MCDE_DSICMD0CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSICMD0CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, BLANKING, __x)
+#define MCDE_DSICMD0CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSICMD0CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSICMD0CONF0_VID_MODE_CMD 0
+#define MCDE_DSICMD0CONF0_VID_MODE_VID 1
+#define MCDE_DSICMD0CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, VID_MODE, \
+ MCDE_DSICMD0CONF0_VID_MODE_##__x)
+#define MCDE_DSICMD0CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, VID_MODE, __x)
+#define MCDE_DSICMD0CONF0_CMD8_SHIFT 13
+#define MCDE_DSICMD0CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSICMD0CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, CMD8, __x)
+#define MCDE_DSICMD0CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSICMD0CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSICMD0CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, BIT_SWAP, __x)
+#define MCDE_DSICMD0CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSICMD0CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSICMD0CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, BYTE_SWAP, __x)
+#define MCDE_DSICMD0CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSICMD0CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSICMD0CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSICMD0CONF0_PACKING_SHIFT 20
+#define MCDE_DSICMD0CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSICMD0CONF0_PACKING_RGB565 0
+#define MCDE_DSICMD0CONF0_PACKING_RGB666 1
+#define MCDE_DSICMD0CONF0_PACKING_RGB888 2
+#define MCDE_DSICMD0CONF0_PACKING_BGR888 3
+#define MCDE_DSICMD0CONF0_PACKING_HDTV 4
+#define MCDE_DSICMD0CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, PACKING, \
+ MCDE_DSICMD0CONF0_PACKING_##__x)
+#define MCDE_DSICMD0CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, PACKING, __x)
+#define MCDE_DSIVID1CONF0 0x00000E40
+#define MCDE_DSIVID1CONF0_BLANKING_SHIFT 0
+#define MCDE_DSIVID1CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSIVID1CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, BLANKING, __x)
+#define MCDE_DSIVID1CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSIVID1CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSIVID1CONF0_VID_MODE_CMD 0
+#define MCDE_DSIVID1CONF0_VID_MODE_VID 1
+#define MCDE_DSIVID1CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, VID_MODE, \
+ MCDE_DSIVID1CONF0_VID_MODE_##__x)
+#define MCDE_DSIVID1CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, VID_MODE, __x)
+#define MCDE_DSIVID1CONF0_CMD8_SHIFT 13
+#define MCDE_DSIVID1CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSIVID1CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, CMD8, __x)
+#define MCDE_DSIVID1CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSIVID1CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSIVID1CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, BIT_SWAP, __x)
+#define MCDE_DSIVID1CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSIVID1CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSIVID1CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, BYTE_SWAP, __x)
+#define MCDE_DSIVID1CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSIVID1CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSIVID1CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSIVID1CONF0_PACKING_SHIFT 20
+#define MCDE_DSIVID1CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSIVID1CONF0_PACKING_RGB565 0
+#define MCDE_DSIVID1CONF0_PACKING_RGB666 1
+#define MCDE_DSIVID1CONF0_PACKING_RGB888 2
+#define MCDE_DSIVID1CONF0_PACKING_BGR888 3
+#define MCDE_DSIVID1CONF0_PACKING_HDTV 4
+#define MCDE_DSIVID1CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, PACKING, \
+ MCDE_DSIVID1CONF0_PACKING_##__x)
+#define MCDE_DSIVID1CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, PACKING, __x)
+#define MCDE_DSICMD1CONF0 0x00000E60
+#define MCDE_DSICMD1CONF0_BLANKING_SHIFT 0
+#define MCDE_DSICMD1CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSICMD1CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, BLANKING, __x)
+#define MCDE_DSICMD1CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSICMD1CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSICMD1CONF0_VID_MODE_CMD 0
+#define MCDE_DSICMD1CONF0_VID_MODE_VID 1
+#define MCDE_DSICMD1CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, VID_MODE, \
+ MCDE_DSICMD1CONF0_VID_MODE_##__x)
+#define MCDE_DSICMD1CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, VID_MODE, __x)
+#define MCDE_DSICMD1CONF0_CMD8_SHIFT 13
+#define MCDE_DSICMD1CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSICMD1CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, CMD8, __x)
+#define MCDE_DSICMD1CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSICMD1CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSICMD1CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, BIT_SWAP, __x)
+#define MCDE_DSICMD1CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSICMD1CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSICMD1CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, BYTE_SWAP, __x)
+#define MCDE_DSICMD1CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSICMD1CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSICMD1CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSICMD1CONF0_PACKING_SHIFT 20
+#define MCDE_DSICMD1CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSICMD1CONF0_PACKING_RGB565 0
+#define MCDE_DSICMD1CONF0_PACKING_RGB666 1
+#define MCDE_DSICMD1CONF0_PACKING_RGB888 2
+#define MCDE_DSICMD1CONF0_PACKING_BGR888 3
+#define MCDE_DSICMD1CONF0_PACKING_HDTV 4
+#define MCDE_DSICMD1CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, PACKING, \
+ MCDE_DSICMD1CONF0_PACKING_##__x)
+#define MCDE_DSICMD1CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, PACKING, __x)
+#define MCDE_DSIVID2CONF0 0x00000E80
+#define MCDE_DSIVID2CONF0_BLANKING_SHIFT 0
+#define MCDE_DSIVID2CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSIVID2CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, BLANKING, __x)
+#define MCDE_DSIVID2CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSIVID2CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSIVID2CONF0_VID_MODE_CMD 0
+#define MCDE_DSIVID2CONF0_VID_MODE_VID 1
+#define MCDE_DSIVID2CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, VID_MODE, \
+ MCDE_DSIVID2CONF0_VID_MODE_##__x)
+#define MCDE_DSIVID2CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, VID_MODE, __x)
+#define MCDE_DSIVID2CONF0_CMD8_SHIFT 13
+#define MCDE_DSIVID2CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSIVID2CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, CMD8, __x)
+#define MCDE_DSIVID2CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSIVID2CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSIVID2CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, BIT_SWAP, __x)
+#define MCDE_DSIVID2CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSIVID2CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSIVID2CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, BYTE_SWAP, __x)
+#define MCDE_DSIVID2CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSIVID2CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSIVID2CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSIVID2CONF0_PACKING_SHIFT 20
+#define MCDE_DSIVID2CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSIVID2CONF0_PACKING_RGB565 0
+#define MCDE_DSIVID2CONF0_PACKING_RGB666 1
+#define MCDE_DSIVID2CONF0_PACKING_RGB888 2
+#define MCDE_DSIVID2CONF0_PACKING_BGR888 3
+#define MCDE_DSIVID2CONF0_PACKING_HDTV 4
+#define MCDE_DSIVID2CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, PACKING, \
+ MCDE_DSIVID2CONF0_PACKING_##__x)
+#define MCDE_DSIVID2CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, PACKING, __x)
+#define MCDE_DSICMD2CONF0 0x00000EA0
+#define MCDE_DSICMD2CONF0_BLANKING_SHIFT 0
+#define MCDE_DSICMD2CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSICMD2CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, BLANKING, __x)
+#define MCDE_DSICMD2CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSICMD2CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSICMD2CONF0_VID_MODE_CMD 0
+#define MCDE_DSICMD2CONF0_VID_MODE_VID 1
+#define MCDE_DSICMD2CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, VID_MODE, \
+ MCDE_DSICMD2CONF0_VID_MODE_##__x)
+#define MCDE_DSICMD2CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, VID_MODE, __x)
+#define MCDE_DSICMD2CONF0_CMD8_SHIFT 13
+#define MCDE_DSICMD2CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSICMD2CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, CMD8, __x)
+#define MCDE_DSICMD2CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSICMD2CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSICMD2CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, BIT_SWAP, __x)
+#define MCDE_DSICMD2CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSICMD2CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSICMD2CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, BYTE_SWAP, __x)
+#define MCDE_DSICMD2CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSICMD2CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSICMD2CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSICMD2CONF0_PACKING_SHIFT 20
+#define MCDE_DSICMD2CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSICMD2CONF0_PACKING_RGB565 0
+#define MCDE_DSICMD2CONF0_PACKING_RGB666 1
+#define MCDE_DSICMD2CONF0_PACKING_RGB888 2
+#define MCDE_DSICMD2CONF0_PACKING_BGR888 3
+#define MCDE_DSICMD2CONF0_PACKING_HDTV 4
+#define MCDE_DSICMD2CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, PACKING, \
+ MCDE_DSICMD2CONF0_PACKING_##__x)
+#define MCDE_DSICMD2CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, PACKING, __x)
+#define MCDE_DSIVID0FRAME 0x00000E04
+#define MCDE_DSIVID0FRAME_GROUPOFFSET 0x20
+#define MCDE_DSIVID0FRAME_FRAME_SHIFT 0
+#define MCDE_DSIVID0FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSIVID0FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0FRAME, FRAME, __x)
+#define MCDE_DSICMD0FRAME 0x00000E24
+#define MCDE_DSICMD0FRAME_FRAME_SHIFT 0
+#define MCDE_DSICMD0FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSICMD0FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0FRAME, FRAME, __x)
+#define MCDE_DSIVID1FRAME 0x00000E44
+#define MCDE_DSIVID1FRAME_FRAME_SHIFT 0
+#define MCDE_DSIVID1FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSIVID1FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1FRAME, FRAME, __x)
+#define MCDE_DSICMD1FRAME 0x00000E64
+#define MCDE_DSICMD1FRAME_FRAME_SHIFT 0
+#define MCDE_DSICMD1FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSICMD1FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1FRAME, FRAME, __x)
+#define MCDE_DSIVID2FRAME 0x00000E84
+#define MCDE_DSIVID2FRAME_FRAME_SHIFT 0
+#define MCDE_DSIVID2FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSIVID2FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2FRAME, FRAME, __x)
+#define MCDE_DSICMD2FRAME 0x00000EA4
+#define MCDE_DSICMD2FRAME_FRAME_SHIFT 0
+#define MCDE_DSICMD2FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSICMD2FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2FRAME, FRAME, __x)
+#define MCDE_DSIVID0PKT 0x00000E08
+#define MCDE_DSIVID0PKT_GROUPOFFSET 0x20
+#define MCDE_DSIVID0PKT_PACKET_SHIFT 0
+#define MCDE_DSIVID0PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSIVID0PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0PKT, PACKET, __x)
+#define MCDE_DSICMD0PKT 0x00000E28
+#define MCDE_DSICMD0PKT_PACKET_SHIFT 0
+#define MCDE_DSICMD0PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSICMD0PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0PKT, PACKET, __x)
+#define MCDE_DSIVID1PKT 0x00000E48
+#define MCDE_DSIVID1PKT_PACKET_SHIFT 0
+#define MCDE_DSIVID1PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSIVID1PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1PKT, PACKET, __x)
+#define MCDE_DSICMD1PKT 0x00000E68
+#define MCDE_DSICMD1PKT_PACKET_SHIFT 0
+#define MCDE_DSICMD1PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSICMD1PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1PKT, PACKET, __x)
+#define MCDE_DSIVID2PKT 0x00000E88
+#define MCDE_DSIVID2PKT_PACKET_SHIFT 0
+#define MCDE_DSIVID2PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSIVID2PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2PKT, PACKET, __x)
+#define MCDE_DSICMD2PKT 0x00000EA8
+#define MCDE_DSICMD2PKT_PACKET_SHIFT 0
+#define MCDE_DSICMD2PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSICMD2PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2PKT, PACKET, __x)
+#define MCDE_DSIVID0SYNC 0x00000E0C
+#define MCDE_DSIVID0SYNC_GROUPOFFSET 0x20
+#define MCDE_DSIVID0SYNC_DMA_SHIFT 0
+#define MCDE_DSIVID0SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSIVID0SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0SYNC, DMA, __x)
+#define MCDE_DSIVID0SYNC_SW_SHIFT 16
+#define MCDE_DSIVID0SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSIVID0SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0SYNC, SW, __x)
+#define MCDE_DSICMD0SYNC 0x00000E2C
+#define MCDE_DSICMD0SYNC_DMA_SHIFT 0
+#define MCDE_DSICMD0SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSICMD0SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0SYNC, DMA, __x)
+#define MCDE_DSICMD0SYNC_SW_SHIFT 16
+#define MCDE_DSICMD0SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSICMD0SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0SYNC, SW, __x)
+#define MCDE_DSIVID1SYNC 0x00000E4C
+#define MCDE_DSIVID1SYNC_DMA_SHIFT 0
+#define MCDE_DSIVID1SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSIVID1SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1SYNC, DMA, __x)
+#define MCDE_DSIVID1SYNC_SW_SHIFT 16
+#define MCDE_DSIVID1SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSIVID1SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1SYNC, SW, __x)
+#define MCDE_DSICMD1SYNC 0x00000E6C
+#define MCDE_DSICMD1SYNC_DMA_SHIFT 0
+#define MCDE_DSICMD1SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSICMD1SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1SYNC, DMA, __x)
+#define MCDE_DSICMD1SYNC_SW_SHIFT 16
+#define MCDE_DSICMD1SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSICMD1SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1SYNC, SW, __x)
+#define MCDE_DSIVID2SYNC 0x00000E8C
+#define MCDE_DSIVID2SYNC_DMA_SHIFT 0
+#define MCDE_DSIVID2SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSIVID2SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2SYNC, DMA, __x)
+#define MCDE_DSIVID2SYNC_SW_SHIFT 16
+#define MCDE_DSIVID2SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSIVID2SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2SYNC, SW, __x)
+#define MCDE_DSICMD2SYNC 0x00000EAC
+#define MCDE_DSICMD2SYNC_DMA_SHIFT 0
+#define MCDE_DSICMD2SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSICMD2SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2SYNC, DMA, __x)
+#define MCDE_DSICMD2SYNC_SW_SHIFT 16
+#define MCDE_DSICMD2SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSICMD2SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2SYNC, SW, __x)
+#define MCDE_DSIVID0CMDW 0x00000E10
+#define MCDE_DSIVID0CMDW_GROUPOFFSET 0x20
+#define MCDE_DSIVID0CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSIVID0CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSIVID0CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSIVID0CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSIVID0CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSIVID0CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CMDW, CMDW_START, __x)
+#define MCDE_DSICMD0CMDW 0x00000E30
+#define MCDE_DSICMD0CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSICMD0CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSICMD0CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSICMD0CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSICMD0CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSICMD0CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CMDW, CMDW_START, __x)
+#define MCDE_DSIVID1CMDW 0x00000E50
+#define MCDE_DSIVID1CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSIVID1CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSIVID1CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSIVID1CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSIVID1CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSIVID1CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CMDW, CMDW_START, __x)
+#define MCDE_DSICMD1CMDW 0x00000E70
+#define MCDE_DSICMD1CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSICMD1CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSICMD1CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSICMD1CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSICMD1CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSICMD1CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CMDW, CMDW_START, __x)
+#define MCDE_DSIVID2CMDW 0x00000E90
+#define MCDE_DSIVID2CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSIVID2CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSIVID2CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSIVID2CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSIVID2CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSIVID2CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CMDW, CMDW_START, __x)
+#define MCDE_DSICMD2CMDW 0x00000EB0
+#define MCDE_DSICMD2CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSICMD2CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSICMD2CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSICMD2CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSICMD2CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSICMD2CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CMDW, CMDW_START, __x)
+#define MCDE_DSIVID0DELAY0 0x00000E14
+#define MCDE_DSIVID0DELAY0_GROUPOFFSET 0x20
+#define MCDE_DSIVID0DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSIVID0DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSIVID0DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0DELAY0, INTPKTDEL, __x)
+#define MCDE_DSICMD0DELAY0 0x00000E34
+#define MCDE_DSICMD0DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSICMD0DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSICMD0DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0DELAY0, INTPKTDEL, __x)
+#define MCDE_DSIVID1DELAY0 0x00000E54
+#define MCDE_DSIVID1DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSIVID1DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSIVID1DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1DELAY0, INTPKTDEL, __x)
+#define MCDE_DSICMD1DELAY0 0x00000E74
+#define MCDE_DSICMD1DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSICMD1DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSICMD1DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1DELAY0, INTPKTDEL, __x)
+#define MCDE_DSIVID2DELAY0 0x00000E94
+#define MCDE_DSIVID2DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSIVID2DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSIVID2DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2DELAY0, INTPKTDEL, __x)
+#define MCDE_DSICMD2DELAY0 0x00000EB4
+#define MCDE_DSICMD2DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSICMD2DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSICMD2DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2DELAY0, INTPKTDEL, __x)
+#define MCDE_DSIVID0DELAY1 0x00000E18
+#define MCDE_DSIVID0DELAY1_GROUPOFFSET 0x20
+#define MCDE_DSIVID0DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSIVID0DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSIVID0DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0DELAY1, TEREQDEL, __x)
+#define MCDE_DSIVID0DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSIVID0DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSIVID0DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSICMD0DELAY1 0x00000E38
+#define MCDE_DSICMD0DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSICMD0DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSICMD0DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0DELAY1, TEREQDEL, __x)
+#define MCDE_DSICMD0DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSICMD0DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSICMD0DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSIVID1DELAY1 0x00000E58
+#define MCDE_DSIVID1DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSIVID1DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSIVID1DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1DELAY1, TEREQDEL, __x)
+#define MCDE_DSIVID1DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSIVID1DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSIVID1DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSICMD1DELAY1 0x00000E78
+#define MCDE_DSICMD1DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSICMD1DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSICMD1DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1DELAY1, TEREQDEL, __x)
+#define MCDE_DSICMD1DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSICMD1DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSICMD1DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSIVID2DELAY1 0x00000E98
+#define MCDE_DSIVID2DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSIVID2DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSIVID2DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2DELAY1, TEREQDEL, __x)
+#define MCDE_DSIVID2DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSIVID2DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSIVID2DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSICMD2DELAY1 0x00000EB8
+#define MCDE_DSICMD2DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSICMD2DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSICMD2DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2DELAY1, TEREQDEL, __x)
+#define MCDE_DSICMD2DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSICMD2DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSICMD2DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2DELAY1, FRAMESTARTDEL, __x)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 79fd606b7cd..01477f737ca 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -343,6 +343,22 @@ config IMX2_WDT
To compile this driver as a module, choose M here: the
module will be called imx2_wdt.
+config U8500_WATCHDOG
+ bool "ST-Ericsson U8500 watchdog"
+ depends on UX500_SOC_DB8500
+ default y
+ help
+ Say Y here to include Watchdog timer support for the
+ watchdog existing in the prcmu of ST-Ericsson U8500 series platforms.
+ This watchdog is used to reset the system and thus cannot be
+ compiled as a module.
+
+config U8500_WATCHDOG_DEBUG
+ bool "ST-Ericsson U8500 watchdog DEBUG"
+ depends on UX500_SOC_DB8500 && DEBUG_FS
+ help
+ Say Y here to add various debugfs entries in wdog/
+
# AVR32 Architecture
config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index fe893e91935..f2556f31511 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
+obj-$(CONFIG_U8500_WATCHDOG) += u8500_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 82ccd36e2c9..018c1ffc7dc 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -32,11 +32,13 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/cpufreq.h>
+#include <linux/kexec.h>
#include <asm/smp_twd.h>
struct mpcore_wdt {
- unsigned long timer_alive;
+ cpumask_t timer_alive;
struct device *dev;
void __iomem *base;
int irq;
@@ -47,6 +49,8 @@ struct mpcore_wdt {
static struct platform_device *mpcore_wdt_dev;
static DEFINE_SPINLOCK(wdt_lock);
+static DEFINE_PER_CPU(unsigned long, mpcore_wdt_rate);
+
#define TIMER_MARGIN 60
static int mpcore_margin = TIMER_MARGIN;
module_param(mpcore_margin, int, 0);
@@ -67,6 +71,8 @@ MODULE_PARM_DESC(mpcore_noboot, "MPcore watchdog action, "
"set to 1 to ignore reboots, 0 to reboot (default="
__MODULE_STRING(ONLY_TESTING) ")");
+#define MPCORE_WDT_PERIPHCLK_PRESCALER 2
+
/*
* This is the interrupt handler. Note that we only use this
* in testing mode, so don't actually do a reboot here.
@@ -99,9 +105,8 @@ static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt)
spin_lock(&wdt_lock);
/* Assume prescale is set to 256 */
- count = __raw_readl(wdt->base + TWD_WDOG_COUNTER);
- count = (0xFFFFFFFFU - count) * (HZ / 5);
- count = (count / 256) * mpcore_margin;
+ count = per_cpu(mpcore_wdt_rate, smp_processor_id()) / 256;
+ count = count*mpcore_margin;
/* Reload the counter */
writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD);
@@ -109,6 +114,56 @@ static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt)
spin_unlock(&wdt_lock);
}
+static void mpcore_wdt_set_rate(unsigned long new_rate)
+{
+ unsigned long count;
+ unsigned long long rate_tmp;
+ unsigned long old_rate;
+
+ spin_lock(&wdt_lock);
+ old_rate = per_cpu(mpcore_wdt_rate, smp_processor_id());
+ per_cpu(mpcore_wdt_rate, smp_processor_id()) = new_rate;
+
+ if (mpcore_wdt_dev) {
+ struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_dev);
+ count = readl(wdt->base + TWD_WDOG_COUNTER);
+ /* The goal: count = count * (new_rate/old_rate); */
+ rate_tmp = (unsigned long long)count * new_rate;
+ do_div(rate_tmp, old_rate);
+ count = rate_tmp;
+ writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD);
+ wdt->perturb = wdt->perturb ? 0 : 1;
+ }
+ spin_unlock(&wdt_lock);
+}
+
+static void mpcore_wdt_update_cpu_frequency_on_cpu(void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ mpcore_wdt_set_rate((freq->new * 1000) /
+ MPCORE_WDT_PERIPHCLK_PRESCALER);
+}
+
+static int mpcore_wdt_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+
+ if (event == CPUFREQ_RESUMECHANGE ||
+ (event == CPUFREQ_PRECHANGE && freq->new > freq->old) ||
+ (event == CPUFREQ_POSTCHANGE && freq->new < freq->old))
+ smp_call_function_single(freq->cpu,
+ mpcore_wdt_update_cpu_frequency_on_cpu,
+ freq, 1);
+
+ return 0;
+}
+
+static struct notifier_block mpcore_wdt_cpufreq_notifier_block = {
+ .notifier_call = mpcore_wdt_cpufreq_notifier,
+};
+
+
static void mpcore_wdt_stop(struct mpcore_wdt *wdt)
{
spin_lock(&wdt_lock);
@@ -143,6 +198,20 @@ static int mpcore_wdt_set_heartbeat(int t)
return 0;
}
+static int mpcore_wdt_stop_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_dev);
+ printk(KERN_INFO "Stopping watchdog on non-crashing core %u\n",
+ smp_processor_id());
+ mpcore_wdt_stop(wdt);
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block mpcore_wdt_stop_block = {
+ .notifier_call = mpcore_wdt_stop_notifier,
+};
+
/*
* /dev/watchdog handling
*/
@@ -150,7 +219,7 @@ static int mpcore_wdt_open(struct inode *inode, struct file *file)
{
struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_dev);
- if (test_and_set_bit(0, &wdt->timer_alive))
+ if (cpumask_test_and_set_cpu(smp_processor_id(), &wdt->timer_alive))
return -EBUSY;
if (nowayout)
@@ -158,6 +227,9 @@ static int mpcore_wdt_open(struct inode *inode, struct file *file)
file->private_data = wdt;
+ atomic_notifier_chain_register(&crash_percpu_notifier_list,
+ &mpcore_wdt_stop_block);
+
/*
* Activate timer
*/
@@ -181,7 +253,7 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file)
"unexpected close, not stopping watchdog!\n");
mpcore_wdt_keepalive(wdt);
}
- clear_bit(0, &wdt->timer_alive);
+ cpumask_clear_cpu(smp_processor_id(), &wdt->timer_alive);
wdt->expect_close = 0;
return 0;
}
@@ -447,16 +519,31 @@ static char banner[] __initdata = KERN_INFO "MPcore Watchdog Timer: 0.1. "
static int __init mpcore_wdt_init(void)
{
+ int i;
+
/*
* Check that the margin value is within it's range;
* if not reset to the default
*/
if (mpcore_wdt_set_heartbeat(mpcore_margin)) {
mpcore_wdt_set_heartbeat(TIMER_MARGIN);
- printk(KERN_INFO "mpcore_margin value must be 0 < mpcore_margin < 65536, using %d\n",
+ printk(KERN_INFO "mpcore_wdt: mpcore_margin value must be 0 < mpcore_margin < 65536, using %d\n",
TIMER_MARGIN);
}
+ cpufreq_register_notifier(&mpcore_wdt_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ for_each_online_cpu(i)
+ per_cpu(mpcore_wdt_rate, i) =
+ (cpufreq_get(i) * 1000) / MPCORE_WDT_PERIPHCLK_PRESCALER;
+
+ for_each_online_cpu(i)
+ printk(KERN_INFO
+ "mpcore_wdt: rate for core %d is %lu.%02luMHz.\n", i,
+ per_cpu(mpcore_wdt_rate, i) / 1000000,
+ (per_cpu(mpcore_wdt_rate, i) / 10000) % 100);
+
printk(banner, mpcore_noboot, mpcore_margin, nowayout);
return platform_driver_register(&mpcore_wdt_driver);
diff --git a/drivers/watchdog/u8500_wdt.c b/drivers/watchdog/u8500_wdt.c
new file mode 100644
index 00000000000..3ac23ab05f3
--- /dev/null
+++ b/drivers/watchdog/u8500_wdt.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * Heavily based upon geodewdt.c
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#define WATCHDOG_TIMEOUT 600 /* 10 minutes */
+
+#define WDT_FLAGS_OPEN 1
+#define WDT_FLAGS_ORPHAN 2
+
+static unsigned long wdt_flags;
+
+static int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+static u8 wdog_id;
+static bool wdt_en;
+static bool wdt_auto_off = false;
+static bool safe_close;
+
+static int u8500_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(WDT_FLAGS_OPEN, &wdt_flags))
+ return -EBUSY;
+
+ if (!test_and_clear_bit(WDT_FLAGS_ORPHAN, &wdt_flags))
+ __module_get(THIS_MODULE);
+
+ prcmu_enable_a9wdog(wdog_id);
+ wdt_en = true;
+
+ return nonseekable_open(inode, file);
+}
+
+static int u8500_wdt_release(struct inode *inode, struct file *file)
+{
+ if (safe_close) {
+ prcmu_disable_a9wdog(wdog_id);
+ module_put(THIS_MODULE);
+ } else {
+ pr_crit("Unexpected close - watchdog is not stopping.\n");
+ prcmu_kick_a9wdog(wdog_id);
+
+ set_bit(WDT_FLAGS_ORPHAN, &wdt_flags);
+ }
+
+ clear_bit(WDT_FLAGS_OPEN, &wdt_flags);
+ safe_close = false;
+ return 0;
+}
+
+static ssize_t u8500_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ if (!len)
+ return len;
+
+ if (!nowayout) {
+ size_t i;
+ safe_close = false;
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+
+ if (c == 'V')
+ safe_close = true;
+ }
+ }
+
+ prcmu_kick_a9wdog(wdog_id);
+
+ return len;
+}
+
+static long u8500_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int interval;
+
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .firmware_version = 1,
+ .identity = "U8500 WDT",
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+
+ case WDIOC_SETOPTIONS:
+ {
+ int options;
+ int ret = -EINVAL;
+
+ if (get_user(options, p))
+ return -EFAULT;
+
+ if (options & WDIOS_DISABLECARD) {
+ prcmu_disable_a9wdog(wdog_id);
+ wdt_en = false;
+ ret = 0;
+ }
+
+ if (options & WDIOS_ENABLECARD) {
+ prcmu_enable_a9wdog(wdog_id);
+ wdt_en = true;
+ ret = 0;
+ }
+
+ return ret;
+ }
+ case WDIOC_KEEPALIVE:
+ return prcmu_kick_a9wdog(wdog_id);
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(interval, p))
+ return -EFAULT;
+
+ /* 28 bit resolution in ms, becomes 268435.455 s */
+ if (interval > 268435 || interval < 0)
+ return -EINVAL;
+ timeout = interval;
+ prcmu_disable_a9wdog(wdog_id);
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+
+ /* Fall through */
+ case WDIOC_GETTIMEOUT:
+ return put_user(timeout, p);
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static const struct file_operations u8500_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = u8500_wdt_write,
+ .unlocked_ioctl = u8500_wdt_ioctl,
+ .open = u8500_wdt_open,
+ .release = u8500_wdt_release,
+};
+
+static struct miscdevice u8500_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &u8500_wdt_fops,
+};
+
+#ifdef CONFIG_U8500_WATCHDOG_DEBUG
+
+enum wdog_dbg {
+ WDOG_DBG_CONFIG,
+ WDOG_DBG_LOAD,
+ WDOG_DBG_KICK,
+ WDOG_DBG_EN,
+ WDOG_DBG_DIS,
+};
+
+static ssize_t wdog_dbg_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long val;
+ int err;
+ enum wdog_dbg v = (enum wdog_dbg)((struct seq_file *)
+ (file->private_data))->private;
+
+ switch(v) {
+ case WDOG_DBG_CONFIG:
+ err = kstrtoul_from_user(user_buf, count, 0, &val);
+
+ if (!err) {
+ wdt_auto_off = val != 0;
+ (void) prcmu_config_a9wdog(1,
+ wdt_auto_off);
+ }
+ else {
+ pr_err("u8500_wdt:dbg: unknown value\n");
+ }
+ break;
+ case WDOG_DBG_LOAD:
+ err = kstrtoul_from_user(user_buf, count, 0, &val);
+
+ if (!err) {
+ timeout = val;
+ /* Convert seconds to ms */
+ prcmu_disable_a9wdog(wdog_id);
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+ }
+ else {
+ pr_err("u8500_wdt:dbg: unknown value\n");
+ }
+ break;
+ case WDOG_DBG_KICK:
+ (void) prcmu_kick_a9wdog(wdog_id);
+ break;
+ case WDOG_DBG_EN:
+ wdt_en = true;
+ (void) prcmu_enable_a9wdog(wdog_id);
+ break;
+ case WDOG_DBG_DIS:
+ wdt_en = false;
+ (void) prcmu_disable_a9wdog(wdog_id);
+ break;
+ }
+
+ return count;
+}
+
+static int wdog_dbg_read(struct seq_file *s, void *p)
+{
+ enum wdog_dbg v = (enum wdog_dbg)s->private;
+
+ switch(v) {
+ case WDOG_DBG_CONFIG:
+ seq_printf(s,"wdog is on id %d, auto off on sleep: %s\n",
+ (int)wdog_id,
+ wdt_auto_off ? "enabled": "disabled");
+ break;
+ case WDOG_DBG_LOAD:
+ /* In 1s */
+ seq_printf(s, "wdog load is: %d s\n",
+ timeout);
+ break;
+ case WDOG_DBG_KICK:
+ break;
+ case WDOG_DBG_EN:
+ case WDOG_DBG_DIS:
+ seq_printf(s, "wdog is %sabled\n",
+ wdt_en ? "en" : "dis");
+ break;
+ }
+ return 0;
+}
+
+static int wdog_dbg_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, wdog_dbg_read, inode->i_private);
+}
+
+static const struct file_operations wdog_dbg_fops = {
+ .open = wdog_dbg_open,
+ .write = wdog_dbg_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int __init wdog_dbg_init(void)
+{
+ struct dentry *wdog_dir;
+
+ wdog_dir = debugfs_create_dir("wdog", NULL);
+ if (IS_ERR_OR_NULL(wdog_dir))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_u8("id",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ &wdog_id)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("config",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ (void *)WDOG_DBG_CONFIG,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("load",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ (void *)WDOG_DBG_LOAD,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("kick",
+ S_IWUGO, wdog_dir,
+ (void *)WDOG_DBG_KICK,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("enable",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ (void *)WDOG_DBG_EN,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("disable",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ (void *)WDOG_DBG_DIS,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ return 0;
+fail:
+ pr_err("u8500:wdog: Failed to initialize wdog dbg\n");
+ debugfs_remove_recursive(wdog_dir);
+
+ return -EFAULT;
+}
+
+#else
+static inline int __init wdog_dbg_init(void)
+{
+ return 0;
+}
+#endif
+
+static int __init u8500_wdt_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ /* Number of watch dogs */
+ prcmu_config_a9wdog(1, wdt_auto_off);
+ /* convert to ms */
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+
+ ret = misc_register(&u8500_wdt_miscdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register misc\n");
+ return ret;
+ }
+
+ ret = wdog_dbg_init();
+ if (ret < 0)
+ goto fail;
+
+ dev_info(&pdev->dev, "initialized\n");
+
+ return 0;
+fail:
+ misc_deregister(&u8500_wdt_miscdev);
+ return ret;
+}
+
+static int __exit u8500_wdt_remove(struct platform_device *dev)
+{
+ prcmu_disable_a9wdog(wdog_id);
+ wdt_en = false;
+ misc_deregister(&u8500_wdt_miscdev);
+ return 0;
+}
+#ifdef CONFIG_PM
+static int u8500_wdt_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ if (wdt_en && !wdt_auto_off) {
+ prcmu_disable_a9wdog(wdog_id);
+ prcmu_config_a9wdog(1, true);
+
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+ }
+ return 0;
+}
+
+static int u8500_wdt_resume(struct platform_device *pdev)
+{
+ if (wdt_en && !wdt_auto_off) {
+ prcmu_disable_a9wdog(wdog_id);
+ prcmu_config_a9wdog(1, wdt_auto_off);
+
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+ }
+ return 0;
+}
+
+#else
+#define u8500_wdt_suspend NULL
+#define u8500_wdt_resume NULL
+#endif
+static struct platform_driver u8500_wdt_driver = {
+ .remove = __exit_p(u8500_wdt_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "u8500_wdt",
+ },
+ .suspend = u8500_wdt_suspend,
+ .resume = u8500_wdt_resume,
+};
+
+static int __init u8500_wdt_init(void)
+{
+ return platform_driver_probe(&u8500_wdt_driver, u8500_wdt_probe);
+}
+module_init(u8500_wdt_init);
+
+MODULE_AUTHOR("Jonas Aaberg <jonas.aberg@stericsson.com>");
+MODULE_DESCRIPTION("U8500 Watchdog Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/firmware/Makefile b/firmware/Makefile
index 5f43bfba3c7..bd8cd7649c5 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -32,6 +32,7 @@ fw-shipped-$(CONFIG_ADAPTEC_STARFIRE) += adaptec/starfire_rx.bin \
adaptec/starfire_tx.bin
fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin
fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw
+fw-shipped-$(CONFIG_AV8100) += av8100.fw
fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-6.2.9.0.fw \
bnx2x/bnx2x-e1h-6.2.9.0.fw \
bnx2x/bnx2x-e2-6.2.9.0.fw
diff --git a/firmware/av8100.fw.ihex b/firmware/av8100.fw.ihex
new file mode 100644
index 00000000000..384a432ea43
--- /dev/null
+++ b/firmware/av8100.fw.ihex
@@ -0,0 +1,1281 @@
+:1000000080FECBFEA3EE5AF05AF0F8F159EFFDF165
+:1000100045F307F51CF52DF53FF552F557F55CF561
+:1000200061F566F57BF5B5F54CF69DEE9DEE9EEE21
+:10003000000000000000000000000081EAADECAD0F
+:1000400038201B7280814D514D514D514D514D51B4
+:100050004D51B420F42A5A9082FECD8000AF7293A5
+:10006000A3205F90808097908D200FAB01A6888998
+:100070009096AD97909AAD8B939EAD9790A2AD80E0
+:10008000B7A6AD81B7AAAD82B7AEAD263120087252
+:1000900082B721A681B7C2A680B701A6B420F52AEF
+:1000A0005A908000A77293CCAD492797905D90D469
+:1000B000AD82B7D8AD81B7DCAD80B7E0ADFFAE9013
+:1000C000815929EA265AFB382007724D38201A72C6
+:1000D000490038200572BCFFCD08AEF320792449D1
+:1000E0004424492324490BAD82FECD9E82FECD9F40
+:1000F0005B82FECD80B682FECD81B682FECD82B619
+:1001000082AD9F9086AD859084428C209F9001ABFC
+:100110000FA62A31200872FB382007724D38201AAA
+:1001200072BCFFCDFB382007723020107231201EC8
+:100130007230201C729505A09E94F6CC5F905F4FA4
+:1001400007382005720C39200572814129382018A2
+:1001500072D5265AFB382007729D9D4D38201A72A1
+:100160004DAEFFCD4DAEFFCD38201B724D4D4D4DE8
+:100170004D382019720425493820187208AE3420F1
+:1001800000000000000000191817161514131211B2
+:10019000100F0E0D0C0B0A090807060504030201D7
+:1001A000000000000000000000000000000000004F
+:1001B000000000000000000000000000000000003F
+:1001C000000000000000000000000000000000002F
+:1001D000000000000000000000000000000000001F
+:1001E000000000000000000000000000000000000F
+:1001F00000000000000000000000000000000000FF
+:1002000000000000000000000000000000000000EE
+:1002100000000000000000000000000000000000DE
+:1002200000000000000000000000000000000000CE
+:1002300000000000000000000000000000000000BE
+:1002400000000000000000000000000000000000AE
+:10025000000000000000000000000000000000009E
+:10026000000000000000000000000000000000008E
+:10027000000000000000000000000000000000007E
+:10028000000000000000000000000000000000006E
+:10029000000000000000000000000000000000005E
+:1002A000000000000000000000000000000000004E
+:1002B000000000000000000000000000000000003E
+:1002C000000000000000000000000000000000002E
+:1002D000000000000000000000000000000000001E
+:1002E000000000000000000000000000000000000E
+:1002F00000000000000000000000000000000000FE
+:1003000000000000000000000000000000000000ED
+:1003100000000000000000000000000000000000DD
+:1003200000000000000000000000000000000000CD
+:1003300000000000000000000000000000000000BD
+:1003400000000000000000000000000000000000AD
+:10035000000000000000000000000000000000009D
+:10036000000000000000000000000000000000008D
+:10037000000000000000000000000000000000007D
+:10038000000000000000000000000000000000006D
+:10039000000000000000000000000000000000005D
+:1003A000000000000000000000000000000000004D
+:1003B00000000000000000008101A60227FBD69289
+:1003C000FC3C0726FBD692FC3C0E26FBD692FC3C5E
+:1003D0001726FBD692FC3F81FAB7FBD8925CFAB69F
+:1003E000F9B7FBD8925CF9B6F8B7FBD89201AEF832
+:1003F000B6F7B7FBC892F7B6FCBF81FCBE84FAB76C
+:10040000FBD6925CF9B7FBD6925CF8B7FBD69201AB
+:10041000AEF7B7FBC69288FCBF81FAB7FBDA925CF5
+:10042000FAB6F9B7FBDA925CF9B6F8B7FBDA9201E3
+:10043000AEF8B6F7B7FBCA92F7B6FCBFBAFACCC4AF
+:10044000FBCD81FCBEE9264AFB003972FB006972D4
+:100450005AFB0069725AFB00687203AEFCBF1B278F
+:100460004DBAFACC0FFBCD81F12001ABFBD6920740
+:100470002B5A0A24FBD792FBDB9203AEFCBF81010F
+:10048000A60220FFA604240827FBD192FAB6FC3C62
+:100490000926FBD192F9B6FC3C1226FBD192F8B6A4
+:1004A000FC3C2326FBD192F7B6FC3F81FAB7FBD484
+:1004B000925CFAB6F9B7FBD4925CF9B6F8B7FBD404
+:1004C0009201AEF8B6F7B7FBC492F7B6FCBF81F75E
+:1004D000B7FBC992F7B6F8B7FBD9925AF8B6F9B795
+:1004E000FBD9925AF9B6FAB7FBDB92FAB603AEFC27
+:1004F000BF81F73C0224F8B7F8B99FF9B7F9BB42BE
+:10050000930F274D8400FBCDFFB60527FEBEFAB73B
+:10051000F9BF4293FFB6F8B7F7BF42FEB689FFB7FF
+:1005200081F73FF83FF9BFFAB781FCBE84FBD79251
+:10053000FAB65CFBD792F9B65CFBD792F8B601AE7F
+:10054000FBC792F7B688FCBF81FCBEFBD792FAB618
+:100550005CFBD792F9B65CFBD792F8B601AEFBC74D
+:1005600092F7B6FCBF8101A60227036D0426026D37
+:100570000826016D0E267D81F5264AFA36F936F8F1
+:1005800036F7340B274D8184FAB703E6F9B702E654
+:10059000F8B701E6F7B7F6888101A60227FA3D040D
+:1005A00026F93D0826F83D0E26F73D81F5264AF747
+:1005B00039F839F939FA380B274D81F7F7B9F601CF
+:1005C000E7F8B901E602E7F9B902E603E7FABB0387
+:1005D000E681F73FF83F81F7B7F8B7FFA6072AF99A
+:1005E000BFFAB78141FFBB41529384FFB7FFBB52B3
+:1005F000017BFEBEFFB7529F908881FCBEFAB7FB1D
+:10060000D6925CF9B7FBD6925CF8B7FBD69201AEF6
+:10061000F7B7FBC692FCBF81F73FF9B7F8BF5C01A3
+:1006200024F9BB4293F7B6FAB7F9BF4293F7BFE09C
+:10063000208FE326017B1A15CD0326B203CAB3032C
+:10064000C6DAEDCD032420A11227016B6AF6CDBBDB
+:10065000F6CD888155FACDF73FF83FF9B781F9B665
+:10066000FA3FE7FBCDFBB700A94101AB818484844D
+:100670008484062008359A1A2010721520C7EEA42B
+:100680001520C61420C77CA41420C61320C708A4B4
+:100690001320C609C0CD84848484848484846EDB62
+:1006A000CD47033B48033B49033B4A033B014B8097
+:1006B0004B854B804B1F2729FCCD47AEFB000335F4
+:1006C000B0840035B1840035B2840035B38413356D
+:1006D000A4840035A5840035A6840035A7840035A0
+:1006E000A0840035A1840035A2840035A3840035A0
+:1006F0000C8C00350D8C00350E8C00350F8C0035C0
+:10070000188C0035198C00351A8C00351B8C03357C
+:10071000048C0035058C0035068C0035078C0A35B5
+:10072000008C0035018C0035028C0035038C0035BF
+:100730001C8C00351D8C00351E8C00351F8C04353B
+:100740001FC1CDACC3CD248300352583003526835E
+:1007500002352783003584840035858400358684FE
+:10076000003587842035B4840035B5842035B684BF
+:100770000035B784A0350C20B5840035B68400352B
+:10078000B78480350E2420A1C803C618840035190B
+:100790008400351A8400351B84003578840035794F
+:1007A0008400357A8400357B8400353F2001353D57
+:1007B00020C7FFA63C20C706A6470301354803868D
+:1007C000354903A0354A03C70F20470302354803C4
+:1007D00049354903F0354A03C71C2048039B354976
+:1007E00003FC354A03C0352A2048038C354903BA37
+:1007F000354A0380353820480324354903F8354A03
+:1008000003C7492047035F7248035F7249035F7261
+:100810004A035F724C2708A03F274A34274A2927FA
+:100820004A1F270FA40820C6BE035F72BF03C7017B
+:10083000A63E20FE35C80310350A2003A63E202020
+:1008400035C803C721A6113F042023A61100033594
+:100850000827A3F9CD04A494F9CD5B172022A61193
+:100860000001350827A3F9CD02A494F9CD5B2A2015
+:1008700011B730A60627A3F9CD08A494F9CD5B01E2
+:100880006B9484C6026B9584C6036B9684C6046B16
+:100890009784C65A2510A10320C6F22510A1056B26
+:1008A0004CA2034F7205EE72056B4FAD025F72AE44
+:1008B000025F72B8035F7297025F7299025F72986B
+:1008C000025F72B3035F72B2035F7276030A357917
+:1008D000035F727A035F7283035F727B035F727CD4
+:1008E000035F727D035F7261035F72BD035F72C25B
+:1008F000035F724C035F724D035F7286025F724F3B
+:10090000035F7262035F7250035F729B025F729CAF
+:10091000025F729D025F724B035F72113F103F4E88
+:10092000035F72A0035F72A1035F72C3035F72C4AF
+:1009300003FF35C7035F72BC035F729B06205F72C3
+:1009400088888888888184E3ADF42540A1016B4CB8
+:10095000126F01EE72016B4F888105201072152015
+:100960001172101EADF9CC8BFFA606AE8185017BFE
+:100970009AA003C70FA4A003C6A2034F72A0035CF2
+:1009800072016BA203D6A003CE1827A003C1A10356
+:10099000C69B016B4F8881A103C70FA4A103C6A208
+:1009A00003D7A1035C72A103CE808602C75120C683
+:1009B00050201172808590FD0032FC0032FB003225
+:1009C000000132FF0032FE00328484848457F6CD69
+:1009D00063A60520B3035A72B2035A720426B30306
+:1009E000C60F27B203CAB303C67D035F72B2035FAB
+:1009F00072B3031835082500A2B203C619A0B303C9
+:100A0000C618207D030135062710A5047B016B1055
+:100A100040C6026B1140C6036B1240C6046B134004
+:100A2000C609C0CD4620C74620C641205F720A4293
+:100A3000200D7288888888FE003BFF003B00013B48
+:100A4000FB003BFC003BFD003B8990808590FD0056
+:100A500032FC0032FB0032000132FF0032FE003275
+:100A600057F6CD152010720727092B10B61220015A
+:100A700035FE003BFF003B00013BFB003BFC003B25
+:100A8000FD003B8990800C4000350D4000350E4044
+:100A900000350F400435112080358011204035800D
+:100AA0001120203580112010358011200835801745
+:100AB000201472801720157205172005721120046A
+:100AC0003580BB03C74A05201A7206201872112010
+:100AD000C702A680BB03C705201A7206201972112F
+:100AE00020C701A6800488003505883035068800B7
+:100AF00035078800351020803581B8FACDBCAEFBB3
+:100B000000843581FBB700A94101AB808590FD00D1
+:100B100032FC0032FB0032000132FF0032FE0032B4
+:100B2000FA0032F90032F80032F70032848484840B
+:100B30008484B8FACDA0AEFB008435C4FBCD33ADC0
+:100B40005BF73FF83FF91FD7FACDC903CECA03C6FA
+:100B5000036B80A4037B046B4F016BA084C6026B04
+:100B6000A184C6036BA284C6046BA384C68484C913
+:100B700003CFCA03C76EC0CD9802CE9902C6CB037D
+:100B80003BCC033B0F2052C0CD9802CE9902C6CB7E
+:100B9000033BCC033B112405E272CB03C606E07293
+:100BA000CC03C61F2405E2729802C606E0729902C1
+:100BB000C61C25CB03C2057BCC03C0067B0C249846
+:100BC00002C2057B9902C0067B5C20C903CF980254
+:100BD000CECA039902550D264C03C60D20CB03CE79
+:100BE000CA03C7CC03C60B274A4B03C607200135EF
+:100BF00007200335082604A105201D7205201C725C
+:100C0000082502A19702C61B269802C1057B222651
+:100C10009902C1067B29264A4B03C6CB035F72CCDF
+:100C200003C706A6FDF4CDF9B718AAF9B6E7FBCDC0
+:100C3000F5F4CD5B12200AA6FDF4CDF918E7FBCD43
+:100C4000F5F4CD5B102400A2057B0AA0067B1A25D3
+:100C500000A2057B06A0067B036BC7A4037B016B88
+:100C6000BC84C6026BBD84C6036BBE84C6046BBF66
+:100C700084C657274C03C6066BA783C6056BA6839D
+:100C8000C6CA035F72C9035F72CC035F72CB035F96
+:100C90007210204035888888888888F7003BF800E3
+:100CA0003BF9003BFA003BFE003BFF003B00013BF1
+:100CB000FB003BFC003BFD003B89908148830035F5
+:100CC000498300354A8300354B830F35808590FD7D
+:100CD0000032FC0032FB0032000132FF0032FE0025
+:100CE0003284848484BC035F725DC7CD20AD052649
+:100CF0004ABC03C60B2603A14B03C6C4035C7204A3
+:100D0000270FA1C403C60B20BB0301351126C40362
+:100D10005A721727C403C6112603EA72047B0727F9
+:100D20004C037B05264C047B1127037B04264A04D5
+:100D30007B7DDECD03EE729F036B04EF7203E272E4
+:100D40004104E0725F01A6036B01E272037B046B56
+:100D500002E072047B0E2403E2729F04E0724654A8
+:100D600001EF72026B46542903CE2A03C6082029DC
+:100D700003CE2A03C608264A2203C6E32603E172ED
+:100D8000A683C6EB2604E172A783C6046BA783C6BD
+:100D9000036BA683C605F3CC848484848B0235352B
+:100DA0008C0286358D0237358E02BD35C4030F3572
+:100DB000C703013534F3CD8AC0CD8F023B90023B8F
+:100DC00091023B92023B3126C703C62BC7CD0327B6
+:100DD000C403C61BF3CC032602A1BA03C605F3CC99
+:100DE000032702A14B03C61020203588888888FE7F
+:100DF000003BFF003B00013BFB003BFC003BFD00D8
+:100E00003B8990801020083581E7FBCDFBB700A916
+:100E10004102AB808590FD0032FC0032FB003200C5
+:100E20000132FF0032FE0032FA0032F90032F800DF
+:100E300032F700328484848484B8FACDFBBF84AE58
+:100E4000FA1A2FAD5B092004AEFB004435F73F3C96
+:100E5000AD5B026B0884C6036B0984C6046B0A840D
+:100E6000C6056B0B84C6B8FACDFBBF84AEFA1B5C1B
+:100E7000AD5B2D274D03C632279D02C6026B8484CD
+:100E8000C6036B8584C6046B8684C6056B8784C6DF
+:100E9000848484E8C8CD4102A941AFAB5F5221AE42
+:100EA000AD02C6024B104B004BAD025C720420AD8C
+:100EB000025F720626FFB1FFBF900D26FEB3FE3F14
+:100EC000AD02CE904A5A01264D5F3A2502A1AE02EC
+:100ED000C6EBC6CDBB035F7207264ABB03C6062618
+:100EE00004A14603C607264A4B03C6CFD5CDBD0392
+:100EF000C706264ABD03C661035C7204274C610322
+:100F0000C60A2061035F720627C403C6C5035C726C
+:100F10000424FFA1C503C6BC030135B8FACDA0AEB9
+:100F2000FB008435F71FEDF1CD5B026BA084C60397
+:100F30006BA184C6046BA284C6056BA384C684849B
+:100F400084848AC0CD8F023B90023B91023B920287
+:100F50003BC7035F72412504A1C503C665274A4B01
+:100F600003C6B8FACD9CAEFB008335F7B7F8B7F9E6
+:100F7000B74FFAB7016B9F83C60520147210200685
+:100F8000358888888888F7003BF8003BF9003BFAF1
+:100F9000003BFE003BFF003B00013BFB003BFC0035
+:100FA0003BFD003B899081FB008C35F7B7F8B7F922
+:100FB000B74FFAB7808590FD0032FC0032FB00325B
+:100FC000000132FF0032FE0032FA0032F90032F83E
+:100FD0000032F7003284B8FACD0CAE27ADF0A40190
+:100FE0007B62035F72052016720620C705AA0620E1
+:100FF000C61B2704A5017BB8FACD0CAE48ADF0A402
+:101000000520167206201472112062035A7247EFEF
+:10101000CD02A605264A05204F032502A16203C67C
+:10102000B8FACD10AE71AD6303D622276203CE5954
+:10103000270FA5017BB8FACD0CAE4CF0CD0FA4055F
+:10104000201672062012724F030135182720A501C1
+:101050007B98AD4F052780A5017B5103D7178CC620
+:1010600050035C725003CE0F2780A507205003CF9A
+:1010700006201372092710A520264F03CE016B0FFF
+:101080008CC61020103588F7003BF8003BF9003B78
+:10109000FA003BFE003BFF003B00013BFB003BFC3A
+:1010A000003BFD003B8990B8FACC0CAEFB008C35C0
+:1010B000F7B7F8B7F9B74FFAB7808590FD0032FC63
+:1010C0000032FB0032000132FF0032FE0032848425
+:1010D0008484A084C7017BA184C7027BA284C70348
+:1010E0007BA384C7047BBC030135016B7FA4017B18
+:1010F0000A20016B80AA017B122702A1BA03C60F46
+:101100002603A1042702A14B03C61A274C03C605D8
+:10111000264D03C6016BA084C6026BA184C6036B77
+:10112000A284C6046BA384C6C5035F72BA03C70357
+:10113000A48384C61FC1CD03264AB903C605201265
+:10114000721020013588888888FE003BFF003B0034
+:10115000013BFB003BFC003BFD003B8990801220E3
+:101160001035808113000135042712B7C084C6A74B
+:10117000F6CD11B71EEDCDA7F6CC11B701A6072409
+:1011800020A1C803C609206CEBCDD6206CEBCD05A1
+:101190002604A112B6062702A112B6112602A11139
+:1011A000B6B32057EBCD2A209CEACD2F2017EACDED
+:1011B000C22011B717EACD072602A111B68112008D
+:1011C000184D551300194D5514001A4D5515001B97
+:1011D0004D556E2520A1C803C6A7F6CD11B7C9E7A6
+:1011E000CD65202BE7CD6A2049E4CD9CF6CC8CE37D
+:1011F000CD7520ADE3CD7A20B4E2CD84EECC5EE2B5
+:10120000CD84EECC1EE2CD84EECC71E1CD84EECC6B
+:1012100047E0CD8188BCEDD688BDEDD658979CEED1
+:10122000CC03250FA14A7BEE5FEE5AEE55EE43EE5E
+:101230001FEE1AEE15EE0FEE0AEE05EEFFEDF9EDDC
+:10124000F3EDEDED81E7FBCDFBB700A94102ABF675
+:101250002001A6818585858585062611BE04204F3F
+:1012600026C0CD0AA69A7884C7027B7984C7037BFF
+:101270007A84C7047B7B84C7057BB8FACD78AEFB44
+:10128000008435F71836AD5B026B7884C6036B7942
+:1012900084C6046B7A84C6056B7B84C656C8CD01B0
+:1012A000A656C8CD4F9B492701E4724F01204C033D
+:1012B000264A11B6016B4F012001A6042655FACD2E
+:1012C000F83FF93FFA3F77AD5B026BC084C6036B12
+:1012D000C184C6046BC284C6056BC384C688888873
+:1012E000888881B8FACDB4AEFB00843581E7FBCDA8
+:1012F000FBB700A94101AB818585858585057BB854
+:10130000030135042702A111B60A264A13B6056B5C
+:1013100001A6042012009803550920D2C8CD052744
+:1013200085854D2F1ECD13BE14B6014B004B1E20DC
+:10133000850A1FCD13BE14B6014B9803C715B621FD
+:101340002033274D721FCD39209A5EADF91857AD65
+:101350005B9B016BB484C6026BB584C6036BB68419
+:10136000C6046BB784C626C0CDFAA69A1884003589
+:10137000198403351A8483351B84003514450035E0
+:1013800015450035164500351745023514EDCDF9E4
+:1013900016FA1209EDCD5B9B016BB484C6026BB5E6
+:1013A00084C6036BB684C6046BB784C68484848405
+:1013B000848484846EDBCD47033B48033B49033B75
+:1013C0004A033B044BC44BB44B004BEEECCCD2EC89
+:1013D000CC03264AC1ECCC03264AB9ECCC03264AFE
+:1013E00015274A12B6EEECCC032702A111B6F2EC97
+:1013F000CCBC03C74C9AB8FACD18AEFB008435F7C5
+:10140000B7F8B7F9B74FFAB712B69BF2ECCC03278F
+:1014100003A14B03C6056B01A6C2035F724B035FBA
+:10142000720C274A0F274A12274A15274AC203C6B9
+:10143000C0035F72C10301352320C0035F72C10383
+:10144000C702A602200AA6262006274A05274A0C1C
+:10145000274AC203C61D2420A1C803C6C203C77FF2
+:10146000A412B64B03033579264A11B6B8035F724E
+:101470009A18840035198400351A8400351B8400BD
+:10148000359B4B035F721A2702A111B6056B4F887B
+:1014900088888888814FD2C8CC032785854D2F1E28
+:1014A000CD11BE12B6804B004B8185858585854F59
+:1014B000848484E8C8CD00AE11A688047B88047BB0
+:1014C00088017BAE025C72042403A10826AE02C12F
+:1014D000047BE22205E17213B6056B4C9F90B202C9
+:1014E000D714E69005E6729705EB725221AE047BA5
+:1014F00015204FB102D713B6B002D712B6AF02D73C
+:1015000011B6975221AE432403A1047BE82503A121
+:10151000056B4C057B046B057B042611B1AF02D62D
+:10152000975221AE056B4F046BAE02C6036B10A63B
+:10153000026B02A67120026B03EF7206A94100AB99
+:10154000594859485948594859484A5A01264D5FFF
+:10155000016B0FA411B6222581A111B6016B4F8832
+:101560008888888881858585854F790301357A0348
+:10157000C74C082017B70C267B03C616B7047B019F
+:101580006B026B036B4F046B7E03C615B7017B14B4
+:10159000B7027B13B7037B12B7047B016B7F03C6D3
+:1015A000026B8003C6036B8103C6046B8203C61102
+:1015B000B74F01204C03267A03CE502079035F7287
+:1015C0007A03C74C5A2079035F727A035F725C27F3
+:1015D0004A17274A10274A11B678030835042712FC
+:1015E0003D78035F728888888881B8FACDB4AEFBF5
+:1015F00000843581E7FBCDFBB700A94101AB811029
+:101600004D0035114D0035124D0035819ACBCD017D
+:10161000A61CCACD044800350548003506480035EB
+:101620000748003531ADFA1A81858585854F9A7C4A
+:1016300003187288031A005589031B00558A031C7E
+:1016400000558B031D005584031E005585031F00A4
+:10165000558603200055870321005590031200553D
+:101660009103130055920314005593031500558CF4
+:10167000031600558D031700558E031800558F0370
+:10168000190055542628A111B65A207C03C710A46E
+:101690007C03C664207C031072062627A111B69B2A
+:1016A000FACDFBB748A94144AB5208AE11B668FA6F
+:1016B000CD12AE9BFACDFBB748A94140AB5208AE64
+:1016C00011B668FACD16AE1448880355154889033B
+:1016D0005516488A035517488B03551048840355FF
+:1016E000114885035512488603551348870355D87A
+:1016F000E9CDF81B02EACD5B016BB484C6026BB581
+:1017000084C6036BB684C6046BB784C64526113DF8
+:1017100077E9CC032528A111B6D1E9CC0DEACDF8A3
+:101720001B02EACD5B26C0CD0AA6016BB484C602BB
+:101730006BB584C6036BB684C6046BB784C67C03E2
+:1017400010726DE9CC032791A111B6F5E9CD134DC7
+:101750000335F5E9CD134D43350C4D1E00550D4DA8
+:101760001F00550E4D2000550F4D210055084D1AF4
+:101770000055094D1B00550A4D1C00550B4D1D0011
+:1017800055044D160055054D170055064D180055CA
+:10179000074D190055004D120055014D130055021B
+:1017A0004D140055034D150055C42755FACDF73F8C
+:1017B000F8B706A4F8B6F93FFA3F02EACD5B016B31
+:1017C000144DC6026B154DC6036B164DC6046B1740
+:1017D0004DC626C0CD0FA6F5E9CD134D0335F5E96D
+:1017E000CD134DC3352520026B036B046B4FD8E935
+:1017F000CDF81A02EACD5B016BB484C6026BB584E6
+:10180000C6036BB684C6046BB784C6622680A1117A
+:10181000B6F72492A111B6E7E8CC032480A111B653
+:101820009BD3E9CC01A6052420A1C803C60C2580C2
+:10183000A111B68888888881484200354942003520
+:101840004A42003581858585854F9A9DC5CD4D03DA
+:1018500001356E14CD03279D02C616AD4B420535EA
+:1018600004204B42013506277703C60CC4CDB8FAD5
+:10187000CDB4AEFB008435F916E7FBCDFBB700A96C
+:101880004101AB5B016BB484C6026BB584C6036BCC
+:10189000B684C6046BB784C6F4DACD502059AD4B7C
+:1018A00042053504204B42013506277703C613262F
+:1018B0004A4D03C66920CCC4CDACC3CD0826113D2A
+:1018C0009B77030135042077035F72062602A1117E
+:1018D000B68888888881B8FACD78AEFB00843581D7
+:1018E00018800035198000351A8000358108800085
+:1018F00035098000350A80003581585812BE81E7CD
+:10190000FBCDFBB700A94107AB81848B4100A94106
+:1019100009AB5B0A6B4F9ACCC4CD4C035F72BC849D
+:101920000035BD840035BE841835BF84003542AD16
+:101930001B808035148000351580003516800035F9
+:10194000178000354BAD0B800035048000350580D5
+:1019500000350680003507800035008000350180A5
+:101960000035028000350380003521E7CDFAB7F855
+:10197000A4FAB66AAD5B076B7884C6086B7984C637
+:10198000096B7A84C60A6B7B84C673209DC5CD4CD7
+:101990000301359084C7037B9184C7047B9284C77D
+:1019A000057B9384C7067BBEFBCDFBB700A9410333
+:1019B000AB5BF71EF7E6CD5B036BBFA4037B056B48
+:1019C000066B4F0F20F814F7E6CD5B036BBFA40343
+:1019D0007B046BC3A4047B056B066B4F192622A105
+:1019E000C803C69FFBCD10A6FBB700A94103AB5BA4
+:1019F000036B9484C6046B9584C6056B9684C606F7
+:101A00006B9784C682FBCD01A6FBB700A94107AB4B
+:101A10005B076B086B4F076B9884C6086B9984C68D
+:101A2000096B9A84C60A6B9B84C66DE6CC0325308D
+:101A3000A1C803C6CDD9CDBC840035BD840035BE58
+:101A4000841B35BF8440350420BF844135062614ED
+:101A50003D14E7CD1B808135B8FACD14AEFB008074
+:101A6000351DFACD5280AE30E4D602E7CD07E7CD82
+:101A70000B8065353080003531800035328000358F
+:101A800033808B352C8000352D8000352E8000353D
+:101A90002F80093528800035298000352A800035BF
+:101AA0002B802A3530264A30E4D602E7CD44810027
+:101AB000354581003546810035478132350C80003F
+:101AC000350D8000350E8000350F80083510800000
+:101AD00035118000351280003513800835B8FACDF5
+:101AE0005FFB0080351DFACD06AA4101EA72410272
+:101AF000EA7228AA5240AE2DE4D602E7CD01EF7279
+:101B0000026B5210AE15B6B8FACD04AEFB008035AC
+:101B10001DFACD4101EA724102EA7280AA5220AE5A
+:101B20002FE4D602E7CD01EF72026B5204AE2EE431
+:101B3000D602E7CD21E7CDFA10FA12FA14F7E6CD76
+:101B40005B076B7884C6086B7984C6096B7A84C698
+:101B50000A6B7B84C6B8FACDB4AEFB008435F916A7
+:101B6000F7E6CD5B076BB484C6086BB584C6096B1A
+:101B7000B684C60A6BB784C676E6CC03274A13B68A
+:101B80009BEBE6CC01A6052755FACDF73FF83FF9C8
+:101B9000B740A4F9B6FA3FF7E6CD5B076B9484C66D
+:101BA000086B9584C6096B9684C60A6B9784C68BAE
+:101BB0004100A2410AA05B000200010103010200F2
+:101BC0000200020002010300020102000301000002
+:101BD00003000081854F62035A729A0C8C00350D08
+:101BE0008C00350E8C00350F8C013504200F8C03D2
+:101BF0003506264A6203C6B8FACD10AEFB008C3516
+:101C0000F7B7F8B7F9B74FFA00630355B8FACD1C28
+:101C1000AEFB008C351DFACD5F0FA44E6303C69B4F
+:101C20000620C7FAA40620C65227E6256203C6622C
+:101C300003C1016B4C9F906303D712E69001E672DB
+:101C40009701E072102001A66303C712B66203C7B2
+:101C500011B68881854F4F035F72EF255003C10194
+:101C60006B4C9F14E75103D601EE720A204F13000C
+:101C700050035588814F9AA884290055A9842A00C9
+:101C800055AA842B0055AB842C0055B8FACD48AE2C
+:101C9000FB0084351DFACD27BE28B6B8FACD44AE78
+:101CA000FB0084351DFACD25BE26B6B8FACD40AE70
+:101CB000FB0084351DFACD23BE24B6B8FACD3CAE68
+:101CC000FB0084351DFACD21BE22B6B8FACD38AE60
+:101CD000FB0084351DFACD1FBE20B6B8FACD34AE58
+:101CE000FB0084351DFACD1DBE1EB6B8FACD30AE50
+:101CF000FB0084351DFACD1BBE1CB6B8FACD2CAE48
+:101D0000FB0084351DFACD19BE1AB6B8FACD28AE3F
+:101D1000FB0084351DFACD17BE18B6B8FACD24AE37
+:101D2000FB0084351DFACD15BE16B6B8FACD20AE2F
+:101D3000FB0084351DFACD13BE14B6B8FACD1CAE27
+:101D4000FB0084351DFACD11BE12B69B814FBD0339
+:101D500001352003CF1FBE2103C720B61E03CF1DB0
+:101D6000BE1F03C71EB61C03CF1BBE1D03C71CB678
+:101D70001A03CF19BE1B03C71AB61803CF17BE1913
+:101D800003C718B61603CF15BE1703C716B614033C
+:101D9000CF13BE1503C714B61203CF11BE1303C76A
+:101DA00012B6814F9A30420035314200353242003E
+:101DB0003533420035CFD5CD2BD7CD848484848470
+:101DC0008484846EDBCD47033B48033B49033B4A95
+:101DD000033B39033B3A033B3B033B3C033BFED213
+:101DE000CD9B8185858585854F9D0201359A8484AB
+:101DF000C7027B8584C7037B8684C7047B8784C72F
+:101E00009B056BFBA4057B042004AA057B06270128
+:101E10007B026B8484C6036B8584C6046B8684C690
+:101E2000056B8784C69A6E14CD9B05274D03C600AB
+:101E300010CD5A14CD9B02C712B6022004A6042668
+:101E400002A111B69AB8FACDFBBF84AEFA1BE7FB2C
+:101E5000CDFBB700A94102AB5B9B056B04AA057BD8
+:101E6000026B8484C6036B8584C6046B8684C605B6
+:101E70006B8784C616000135016B16B69C025F7233
+:101E800004209C0201350627173D888888888881B0
+:101E90008585858585017BBC0301359AB8FACDB46B
+:101EA000AEFB008435FA1FE7FBCDFBB700A941026A
+:101EB000AB5BB484C7027BB584C7037BB684C7041D
+:101EC0007BB784C7057BBEFBCDFBB700A94102AB46
+:101ED0005BFA1EF73FF83FF9B703A4F9B6FA3FE003
+:101EE000FACD5F90FE0001355F12B6046BFCA404CE
+:101EF0007B026BB484C6036BB584C6046BB684C620
+:101F0000056BB784C6CC840035CD840035CE844AB9
+:101F100035CF840035102520A1C803C6172603A19C
+:101F20004603C665C1CD1CBE20A69B4703220055B3
+:101F3000480323005549032400554A03250055143E
+:101F40002789FACD22AE4603C712B6C603C726B606
+:101F500026B69702C721B69802CF1FBE9902C720A6
+:101F6000B64003CF19BE4103C71AB64403CF17BE0C
+:101F70004503C718B63E03CF15BE3F03C716B6428A
+:101F800003CF13BE4303C714B669E1CCB903C74CF2
+:101F900069E1CC016B01A647274A11274A0D274D5D
+:101FA00097025F7299025F7298025F724B03C711CA
+:101FB000B6016B4F88888888880000BA420000B854
+:101FC000410AD7233C0000803F818B4100A9411486
+:101FD000AB5B84848484AFC0CD88127B88127B88FD
+:101FE000127B88127B0F208AC0CD88127B88127BDF
+:101FF00088127B88127B1127C403C6C7035F720453
+:102000002099FBCDFBB700A9410FAB5BE7FBCD8F60
+:10201000AEFB000235162BFFA20D7BFDA00E7B0A46
+:102020002B00A20D7B04A00E6B107B0D6B0F7B0FA2
+:102030006B6020C6106B6120C6116B6220C6126BEC
+:102040006320C66820263568202435642087025521
+:102050006520880255662089025567208A025568E6
+:102060002021356420C7057B6520C7067B6620C715
+:10207000077B6720C7087B60208B025561208C029C
+:102080005562208D025563208E02558702602055CF
+:10209000880261205589026220558A0263205568B2
+:1020A0002021356420C7017B6520C7027B6620C7DD
+:1020B000037B6720C7047B8B026420558C0265205C
+:1020C000558D026620558E02672055682064356460
+:1020D000208B025565208C025566208D02556720A5
+:1020E0008E0255682021356420C7097B6520C70A08
+:1020F0007B6620C70B7B6720C70C7B68202235B826
+:10210000FACD60AEFB0020351DFACD13EE72147BC4
+:1021100005E0CC032613EA72147BB8FACDFBB700B6
+:10212000A9410FAB5BF73FE7FBCD5FFB008435B800
+:10213000FACDFBB700A94109AB5BE7FBCD37AEFB9E
+:1021400000E03506203BAEFB00E03508264C61037D
+:10215000C6056B3FE0C6066B40E0C6076B41E0C6B4
+:10216000086B42E0C6016B43E0C6026B44E0C60365
+:102170006B45E0C6046B46E0C68B4100A24112A04D
+:102180005B898881F7B7F8B7F9B74FFAB781FBB722
+:1021900000A94101ABE2DDCC03A6E2DDCC02A681C1
+:1021A000204C8184848484B8FACD10AEFB00843541
+:1021B000F81EE7FBCD20AD5B1084C7017B1184C7FF
+:1021C000027B1284C7037B1384C7047B026B7FA44A
+:1021D000027BB8FACD40AD5BC4FBCDFBB700A94193
+:1021E00007AB5BF7B703A4F7B6F83FF93FFA3FE751
+:1021F000FBCD5DAD5B016B1084C6026B1184C60321
+:102200006B1284C6046B1384C6BEFBCDFBB700A95A
+:102210004107AB5B46FACD14A673DECD0B6B04A66B
+:1022200066DECC032608A061DECC032604A05EDEB9
+:10223000CC032602A0172702A01B204C1E2006A6B6
+:10224000222002A6262007A60C2710A00C2708A0F3
+:102250000C2704A0342702A03A2702A023240B7BDA
+:1022600020A1C803C6076B6020C6086B6120C609A1
+:102270006B6220C60A6B6320C66820263568202161
+:102280003568205235642000356520003566208091
+:1022900035672000359D9D682027356020930255C5
+:1022A000612094025562209502556320960255687C
+:1022B00020613568205235B8FACD64AEFB00203578
+:1022C00073DECD0B7B682022356020C7077B612041
+:1022D000C7087B6220C7097B6320C70A7B88888880
+:1022E00088816420003565200035662000358184B2
+:1022F000B8FACD10AEFB008435F9B755AAF9B6FA95
+:10230000B755AAFAB6F8B755AAF8B646FACD18A640
+:10231000F7B7F8B7F9B74FFAB7017BB8FACD14AEF3
+:10232000FB008435F7B73FA4F7B6F83FF93FFA3F13
+:1023300046FACD18A6E7FBCDFBB700A94104AB5B7D
+:1023400025240FA1077B26C0CD64A6848484848ABB
+:10235000C0CD8F023B90023B91023B92023B0F208B
+:10236000AFC0CD8F023B90023B91023B92023B11EA
+:10237000264A4B03C614840F351584003516840095
+:102380003517840035046B6020C6056B6120C606D6
+:102390006B6220C6076B6320C66820C74A9D9D6894
+:1023A00020C727A668205235642005356520F535FD
+:1023B0006620E13567200035602093025561209446
+:1023C0000255622095025563209602558F026020C7
+:1023D000559002612055910262205592026320556A
+:1023E0006820263568202135682052356420003564
+:1023F0006520003566208035672000356820273548
+:1024000093026020559402612055950262205596F2
+:1024100002632055682052356420C7047B6520C7BD
+:10242000057B6620C7067B6720C7077B6820C74AF5
+:102430006820C722A66020C7087B6120C7097B628D
+:1024400020C70A7B6320C70B7B68205235108402AB
+:1024500035118455351284553513845535016B0279
+:10246000A612DDCD67202835092001A612DDCD6733
+:102470002014350B2502A2087BDDA0097B1E204F0E
+:1024800012DDCD67200A350A2504A2087BD9A009F0
+:102490007B88812D03CE2E03C6B8FACD818484B803
+:1024A000FACD0CAEFB004135D7FACD412503C94129
+:1024B0002603CB17AD08AEFB004135D7FACD5C0142
+:1024C000264C26AD04AEFB004135D7FACD4123039F
+:1024D000C9412403CB4101E2724102E0722B03CED9
+:1024E0002C03C6B8FACD5FFB004135D7FACD5C01AD
+:1024F000264C4101E2724102E0722B03CE2C03C64E
+:10250000026B3003C6016B2F03C68888813303CE6C
+:102510003403C6B8FACD81412F03C9413003CB46FD
+:10252000542703CE2803C6812F03CE3003C6B8FA42
+:10253000CD8146FACD10A6D7FACD81FBB700A941CF
+:1025400001AB818484848484847C8300357D830012
+:10255000357E830F357F83FF35B8FACD6CAEFB0037
+:102560008335C4FBCD23AD5B2EAD43ADB8FACD2D85
+:10257000AD5BD7FACD4EAD0D2048AD39AD5BD7FA86
+:10258000CD2F03CE3003C610264A2203C6B8FACD9B
+:1025900068AEFB008335C4FBCD57AD5B62AD6DAD5E
+:1025A0005EAD5BD7FACD75AD64AEFB008335C4FB81
+:1025B000CD6FAD5B7AAD413503C9413603CB05EE36
+:1025C00072067BB8FACDBEDACD5BD7FACD05EE72D6
+:1025D000067BB8FACD60AEFB008335C4FBCDBEDA16
+:1025E000CD5BC6DACD413503C9413603CBEADACD3E
+:1025F000BEDACD5BD7FACDEADACD5CAEFB0083352F
+:10260000D7FACD46542703CE2803C605EF72066BD2
+:102610004A5A01264D3303CE3403C61020066B34CC
+:1026200003C6056B3303C60C264A2203C688888876
+:10263000888888812B03CE2C03C6B8FACD8133035A
+:10264000CE3403C6B8FACD814D412503C9412603D6
+:10265000CB81412F03C9413003CB46542703CE28F9
+:1026600003C681D7FACD41BE03C241BF03C031AD1D
+:10267000814D41BE03C241BF03C0412303C9412470
+:1026800003CB45AD812F03CE3003C6B8FACD8146CA
+:10269000FACD10A6D7FACD81FBB700A94101AB81D5
+:1026A0008B4100A9410EAB5B9883003599830035BF
+:1026B0009A8300359B831835B8FACD3CAEFB008376
+:1026C00035C4FBCD25AD5B30AD67ADB8FACD2FADD0
+:1026D0005BD7FACD72AD0D204AAD3BAD5BD7FACDDD
+:1026E0002F03CE3003C610264A2203C6B8FACD38CF
+:1026F000AEFB008335C4FBCD59AD5B64AD6FAD60FF
+:10270000AD5BD7FACD77AD34AEFB008335C4FBCDDE
+:1027100071AD5B7CAD413503C9413603CB07EE7229
+:10272000087BB8FACD61D9CD5BD7FACD07EE720838
+:102730007BB8FACD30AEFB008335C4FBCD61D9CD7B
+:102740005B69D9CD413503C9413603CBB9D9CD61D8
+:10275000D9CD5BD7FACDB9D9CD2CAEFB008335C42A
+:10276000FBCD61D9CD5B69D9CD413103C94132037C
+:10277000CB72D9CD61D9CD5BD7FACD72D9CD10AEA0
+:10278000FB008335C4FBCD61D9CD5B69D9CD4A5AF5
+:102790000126AFD9CD0DEE720E7BB8FACD61D9CD41
+:1027A0005BD7FACD4A5A01267CD9CD0CAEFB00830B
+:1027B00035C4FBCD61D9CD5B69D9CD0DEE720E7BF1
+:1027C000B8FACD61D9CD5B90D9CD08AEFB00833589
+:1027D000C4FBCD61D9CD5B69D9CD4A5A0126AFD9A9
+:1027E000CD0DEE720E7BB8FACD61D9CD5BD7FACDA7
+:1027F0004A5A01267CD9CD04AEFB008335C4FBCDFB
+:1028000061D9CD5B69D9CD0DEE720E7BB8FACD6181
+:10281000D9CD5B90D9CD40AEFB008335F73FF83F73
+:10282000F9B70FA4F9B6D7FACD05EE72067BB8FA60
+:10283000CD28AEFB008335F73FF83FF9B71FA4F969
+:10284000B6D7FACD2703CE2803C67884C7097B798B
+:1028500084C70A7B7A84C70B7B7B84C70C7B07EF1A
+:1028600072086B4A5A01264D3303CE3403C6066BF9
+:102870002A03C6056B2903C60A6B05AA0A7B20201A
+:10288000086B3403C6076B3303C605EF72066B593A
+:10289000482903CE2A03C60A6B01AA0A7B1F264ACF
+:1028A0002203C6096B80AA097B062604A14603C63B
+:1028B000096B20A60A6B0B6B0C6B4F0D6B0EEF7246
+:1028C0003303C9413403CB2D03CE2E03C68B410005
+:1028D000A2410EA05B8184848484BB030135B8FAD5
+:1028E000CDB4AEFB008435F716E7FBCDFBB700A9EE
+:1028F0004101AB5B016BE3A4017B016BB484C602B5
+:102900006BB584C6036BB684C6046BB784C630252A
+:1029100020A1C803C6B8FACD78AEFB008435C4FB4D
+:10292000CDFBB700A94101AB5BF7B703A4F7B6F83D
+:10293000B7C0A4F8B6F93FFA3F46FACD16A6F7B7E6
+:10294000F8B7F9B74FFA003D0355016BFCA4017BC2
+:10295000026B3FA4027B016B7884C6026B7984C64C
+:10296000036B7A84C6046B7B84C6B8FACD74AEFB65
+:10297000008435D7FACD2003CE2103C6B8FACD7036
+:10298000AEFB008435D7FACD1E03CE1F03C6B8FABE
+:10299000CD6CAEFB008435D7FACD1C03CE1D03C62B
+:1029A000B8FACD68AEFB008435D7FACD1A03CE1B3A
+:1029B00003C6B8FACD64AEFB008435D7FACD180350
+:1029C000CE1903C612206484003565840035668400
+:1029D00002356784CB35122502A21803C6CBA01995
+:1029E00003C61E2602A1C203C6252603A14B03C6A9
+:1029F0002C2420A1C803C6B8FACD60AEFB008435F4
+:102A0000D7FACD1603CE1703C6B8FACD5CAEFB00DD
+:102A10008435D7FACD1403CE1503C6B8FACD58AE17
+:102A2000FB008435D7FACD1203CE1303C6888888FD
+:102A30008881FBC692FCBFFBB781FBD692FC3FFBB3
+:102A4000B781842003CF1803CE2103C71903C6180A
+:102A500003CF1903C74A5A01264D59484003CE41B6
+:102A600003C60B200E264D4003CE4103C60B264A5B
+:102A70002203C61E03CF1603CE1F03C71703C617B4
+:102A8000035F7216035F721C03CF1403CE1D03C7CE
+:102A90001503C61403CF1503C74A5A01264DFA265B
+:102AA0005A90465406273D03CE903E03CE3F03C6C0
+:102AB0001A03CF1203CE1B03C71303C613035F729F
+:102AC00012035F723D035F72042604A1C203C60BAA
+:102AD0002603A10F202D035F722E03233519260331
+:102AE000A1C203C6202603A115244B03C620A1C8FA
+:102AF00003C6400325035541032603554403290318
+:102B00005545032A03553E032303553F032403552C
+:102B10004203270355430328035597D3CCC7D5CD8C
+:102B2000D0A94109AB521CAE017BB8FACD39AEFB3E
+:102B3000000335E9F9CDFBB7D0A94105AB521CAE76
+:102B4000017B3803C7C7D5CDD0A94104AB521CAE19
+:102B5000017B3703C7C7D5CDD0A94103AB521CAE0B
+:102B6000017B3603C7FBD692FC3C3503C7BFD5CDEE
+:102B7000D0A94101AB521CAE017B33035F72340319
+:102B800001353203C7FBD692FC3C3103C7BFD5CD1C
+:102B9000CFA941FDAB521CAE017B3003C7FBD692DF
+:102BA000FC3C2F03C7BFD5CDCFA941FBAB521CAE18
+:102BB000017B2E03C7FBD692FC3C2D03C7BFD5CDAE
+:102BC000CFA941F9AB521CAE017B2C03C7FBD692B7
+:102BD000FC3C2B03C7BFD5CDCFA941F7AB521CAEF0
+:102BE000017B2A03C7FBD692FC3C2903C7BFD5CD86
+:102BF000CFA941F5AB521CAE017B2803C7FBD6928F
+:102C0000FC3C2703C7BFD5CDCFA941F3AB521CAEC7
+:102C1000017B2603C7FBD692FC3C2503C7BFD5CD5D
+:102C2000CFA941F1AB521CAE017B2403C7FBD69266
+:102C3000FC3C2303C7BFD5CDCFA941EFAB521CAE9F
+:102C4000017B2203C7C7D5CDCFA941EEAB521CAE45
+:102C5000017BE6D4CC0ED5CC032702A1E6D4CC036D
+:102C60002603A14B03C63D03C729B639032A0055E5
+:102C70003A032B00553B032C00553C032D005537E0
+:102C800003C713B613B63503CF22BE3603C723B628
+:102C90003303CF20BE3403C721B63103CF1EBE326B
+:102CA00003C71FB62F03CF1CBE3003C71DB62D03AD
+:102CB000CF26BE2E03C727B62B03CF24BE2C03C7B7
+:102CC00025B62903CF18BE2A03C719B62703CF1488
+:102CD000BE2803C715B62503CF1ABE2603C71BB6E9
+:102CE0002303CF16BE2403C717B62203C728B6AEE8
+:102CF000D3CC03241DA111B60927113D016B4A1144
+:102D0000B68800F0701C050101030003008F004627
+:102D1000001B00AA011E030007000350050100600C
+:102D2000A01805010106000300700040001800B063
+:102D3000011B030007000350050100E01BFA04011A
+:102D40000006000300800048001C0090013F039033
+:102D500006200300050100C05F3B040001060003DC
+:102D600000200030001400A0003703A0052003005D
+:102D7000050100E012BD04010007000300800040CF
+:102D8000001B0080011E0380060003000501009067
+:102D9000691104000107000300200030001300A0A7
+:102DA000001603A00500030005010040D2DF030068
+:102DB000000600030088001800230040012603409D
+:102DC00005000300040100F0FB02020101080006F7
+:102DD00000700010001F00F00005024004E00150E8
+:102DE000030100005A620201010400010080002872
+:102DF000001B00000174022004580220030100108F
+:102E0000F76C040101050001002800E0061900E448
+:102E100007EE02E40CD0020005010010F76C04017B
+:102E20000105000100280074091900780AEE0278F3
+:102E30000FD00200050100405F8A03010105000177
+:102E4000002800E0061900E407EE02E40CD00200BE
+:102E500005010010F76C040101050001002C005869
+:102E60000029001801650498083804800701001043
+:102E7000F76C040101050001002C0010022900D0AC
+:102E8000026504500A38048007010010F76C040141
+:102E900001050001002C007E0229003E036504BEEE
+:102EA0000A38048007010020EED90801010500015D
+:102EB000002C0010022900D0026504500A3804805A
+:102EC00007010080F93703000005000100800018A9
+:102ED000002C0020017102C0064002A0050101C0C3
+:102EE000FC9B010000030001007E0018001600207A
+:102EF000017102C0062001A005000010F76C04015A
+:102F000001050001002C0010021400D002650450DD
+:102F10000A1C028007000010F76C04010105000183
+:102F2000002800B8011900BC02EE02BC07D0020064
+:102F3000050100C0FC9B0100000500010040000CE1
+:102F4000002C009000710260034002D002010020BA
+:102F5000EED9080101050001002C005800290018D5
+:102F6000016504980838048007010080F9370300E0
+:102F700000060007007C002000240014010D02B4AC
+:102F800006E001A0050101C0FC9B01000003000454
+:102F9000007C002600120014010D02B406F000A00F
+:102FA00005000010F76C040101050001002C005819
+:102FB0000014001801650498081C02800700001026
+:102FC000F76C0401010500010028006E0019007271
+:102FD00001EE027206D00200050100C0FC9B010058
+:102FE00000060007003E00100024008A000D025A6F
+:102FF00003E001D002010080858001000002000191
+:1030000000600010002300A0000D022003E00180FA
+:10301000020181848B4100A94108AB5B096B016B04
+:103020004F012001A604269803C1057B0B2604E16D
+:10303000729903C608279803C6B72655FBCDFBB780
+:1030400000A94102AB5BE7FBCDFBB700A94106AB92
+:103050005B066B3048C6076B3148C6086B3248C602
+:10306000096B3348C684842F1ECD74AE08A6024B6C
+:10307000004B026B067B036B077B046B087B056BC5
+:10308000097B066B3048C6076B3148C6086B32486F
+:10309000C6096B3348C6016B4F8B4100A24109A0A2
+:1030A0005B8118E7FEC692FFBF9019CECC17B6170A
+:1030B000B780AA027BC62505A10D6B4C0D7BD225DE
+:1030C00004A10C6B4C0C7B18CECC0327A0E118E6B6
+:1030D0009097900CEB720BEB7248480D7B9701EBCD
+:1030E0007248480D7B016B0CE07203A60C6B4F0D10
+:1030F0006B4F84842F1ECD74AE20A6144B8807AB73
+:103100000B7B761DCD84B417CD00AE18A6880AAB14
+:103110000B7BFD16CDAE2504A10D6B4C0D7B74AD64
+:10312000FE3C02249790FFBB0DE07203A693FFBF05
+:10313000FEB700A94107AB5B93909706AB0DEB720E
+:103140000B7B56CFCDFE3C02249790FFBB0DE07267
+:1031500003A693FFBFFEB700A94103AB5B93905C4E
+:103160005C970DEB720B7B0D6B4F076B2C48C60801
+:103170006B2D48C6096B2E48C60A6B2F48C6036BD9
+:103180002848C6046B2948C6056B2A48C6066B2B1F
+:1031900048C6F02505A10D6B4C9F18E79803D60D86
+:1031A000EE720D6B12264A027B84842F1ECD74AE04
+:1031B00043A6880C7B074B19E79903C618E79803C9
+:1031C000C60BEE724DCFCC0326027B0B6B5205AEC5
+:1031D000E82414A1026B7FA49803C681848B41006C
+:1031E000A9410CAB5B0D6B4F0D99030772052002D3
+:1031F00098030F7284842F1ECD74AE41A6024B003B
+:103200004B192660A160A49803C684842F1ECD7438
+:10321000AE40A6014B004B8B4100A2410DA05B814B
+:103220007B03C67B0301350498030D7284842F1E33
+:10323000CD74AE40A6014B004B7B035F7281859A33
+:10324000B8FACD14AEFB004835F7B7F8B7F9B74F69
+:10325000FA007E035510487F03551148800355122C
+:103260004881035513488203559BCD2505A1016B69
+:103270004C017BBEFBCD7FAEFB00033546FACD484B
+:1032800048489FF7B7F8B7F9B74FFAB79803D60190
+:10329000EE721F207E039C0355072604A1016B4F8D
+:1032A00084847F035F7280035F7281035F72820395
+:1032B0005F722F1ECD74AE4F054B004B8881848BFF
+:1032C0004100A94108AB5B096B016B850A1FCD74F6
+:1032D000AE10A6054B9C03C7057B026B9003C6038B
+:1032E0006B9103C6046B9203C6056B9303C69B03E5
+:1032F000C7027B9A03C7037B9903C7047B9803C764
+:10330000057B026B8C03C6036B8D03C6046B8E03B7
+:10331000C6056B8F03C6016B850A1FCD74AE18A658
+:10332000084B9F03C7067B9E03C7077B9D03C70807
+:103330007B9C03C7097B066B1C48C6076B1D48C6F0
+:10334000086B1E48C6096B1F48C69B03C7067B9ABD
+:1033500003C7077B9903C7087B9803C7097B066BE4
+:103360001848C6076B1948C6086B1A48C6096B1B74
+:1033700048C6016B4F8B4100A24109A05B81858546
+:10338000858585017B016B01A6042614A1027B0AB9
+:103390002614A1037BD12505A1056B4C057BDD25FA
+:1033A00008A1046B4C047B98034472026B4C027BB3
+:1033B0000520036B4C037B072601A49803D605EE7A
+:1033C00072046B4F056B4F44204F032785854D2FAB
+:1033D0001ECD74AE054B004B9A144890035515480A
+:1033E0009103551648920355174893035510488C7E
+:1033F000035511488D035512488E035513488F030A
+:10340000559B036B026B016B4F888888888881FB12
+:10341000004835F7B7F8B7F9B74FFAB78185858512
+:1034200085017B9AB8FACD0CAE0DAD037B9B016B89
+:1034300001A60426027B04264A037BEE26027B06B5
+:1034400026037B026B4A027B036B0F48C60A209A55
+:10345000B8FACD5F38AD047B9B026B32A6016B03DB
+:103460006B4F8888888881E7FBCDFBB700A94105B1
+:10347000AB8184848484848484849AB8FACD04AE35
+:10348000FB004835FA15F7B7F8B7F9B74FFAB703A5
+:10349000AA3FA4087B9B056B0448C6066B0548C67B
+:1034A000076B0648C6086B0748C69AB8FACD5FFB9B
+:1034B000004035C4FBCDFBB700A94101AB5BFA145A
+:1034C0004FAD5BB8FACDFBB700A94101AB5BF7B7D5
+:1034D000F8B7F9B74FFA007803559B056B0040C663
+:1034E000066B0140C6076B0240C6086B0340C69AD4
+:1034F000B8FACDB4AEFB008435FA1AFA198FCBCDE9
+:103500005B9B056BB484C6066BB584C6076BB6843B
+:10351000C6086BB784C6888888888888888881E7C9
+:10352000FBCDFBB700A94101AB81848484849AB8A8
+:10353000FACD04AEFB004835FA1411AD5B9B016B6C
+:103540000448C6026B0548C6036B0648C6046B07F1
+:1035500048C69AB8FACD5FFB004035FA1534AD5B2A
+:103560009B016B0040C6026B0140C6036B0240C664
+:10357000046B0340C68888888881858585854F9A35
+:10358000B8FACD04AEFB004835F73FF83FF93FFAF3
+:10359000B730A4FAB6F7B7F8B7F9B74FFAB7047B64
+:1035A0009B016B0448C6026B0548C6036B0648C600
+:1035B000046B0748C69AB8FACD5FFB004035FA1590
+:1035C000E7FBCDFBB700A94101AB5B9B016B004062
+:1035D000C6026B0140C6036B0240C6046B0340C6C3
+:1035E00088888888819A9BFACDFBB740A94100ABB7
+:1035F00081FBB700A94107AB818B4100A9410DAB0D
+:103600005B9AB8FACD30AEFB004235C4FBCDFBB7B8
+:1036100000A94101AB5B46FACD4848127BF73FF861
+:103620003FF93FFA0003359B016B3042C6026B3114
+:1036300042C6036B3242C6046B3342C639C9CC035F
+:103640002506E172FBD69202AEFCB70D7BFBB70CF0
+:103650007B066B04AB067B67AD5C012406EB721046
+:10366000EE72117BE7FBCD6FAD5B9BAF2504A10B29
+:103670006B4C0B7BFBC792FCBF85FB0032FBD692E9
+:1036800003AEFCBFFBB70EE972410FEB725F4A0756
+:103690006B4C077BFB003B89FB3C022497FCBB0B7C
+:1036A000E07203A6FCBF08CACD5B362505E172FBBC
+:1036B000D69202AEFCB70D7BFBB70C7B0B6B076B96
+:1036C000086B096B0A6B4F7420056B4F106B00A9D8
+:1036D000107B116B04AB117B10CACD10EE72117B05
+:1036E000E7FBCD08CACD5B9B076B4F086BFBD692FF
+:1036F0005C096BFBD69201AEFCB70D7BFBB70C7B74
+:103700000A6BFBC692FCB70D7BFBBF0CEE728B41C4
+:1037100000A2410BA05B89888101A61500DE35144B
+:1037200000C0351300AD351200DE35A7F6CD81841B
+:103730008484848493C7CDAC2510A1046B4C047B96
+:10374000C22508A1056B4C057BDBC7CD03275D03B4
+:10375000264D4101E4724102E472FA265A9059481A
+:10376000062705E67201A601EF72026B5F037B007C
+:10377000C8CD03209AAD0426057B0826047B056B83
+:103780004F036B12E69704E0720FA6046B4FF5250A
+:1037900010A1046B4C047B97AD046B4FDBC7CD933A
+:1037A000C7CDBDC7CD888888888881B3840035B0EF
+:1037B000840035B184003581B0840035B184003592
+:1037C000B28400350DADB284033513ADB284023539
+:1037D00019ADB2840035B384003581B0840035B1B1
+:1037E000840035B2840035B38400358101ADB084E6
+:1037F0000035B1840035B2840135B384003513AD92
+:103800008164A6B0840035B184003526C0CC03ADF8
+:10381000B2840035B384003526C0CD10ADB2840427
+:1038200035B384003581AD840035AE840035AF8476
+:10383000003581AC84003505AD81AC8480350CAD9C
+:1038400007274D26C0CC64A6B0840035B18400356E
+:10385000B2840035B384003526C0CD0AA6B08400FA
+:1038600035B1840035B2841035B384003581848449
+:103870008484BB030135B8FACD5FFB008435F71AA9
+:10388000E7FBCDFBB700A94101AB5B016B0084C630
+:10389000026B0184C6036B0284C6046B0384C68872
+:1038A0008888888184848484B8FACD5FFB0084355D
+:1038B000F71BE7FBCDFBB700A94101AB5B016B0038
+:1038C00084C6026B0184C6036B0284C6046B038446
+:1038D000C68888888881B8FACD78AEFB00843581A7
+:1038E000E7FBCDFBB700A94101AB81848484841040
+:1038F000ADFA1D09AD5B17ADFA1C10AD5B016B781D
+:1039000084C6026B7984C6036B7A84C6046B7B849D
+:10391000C68888888881D7FACD9802CE9902C68158
+:1039200084848484A084C7017BA184C7027BA28491
+:10393000C7037BA384C7047B036B80AA037B046B50
+:1039400006AA047B0C20BEFBCDFBB700A94101AB4E
+:103950005BF91E34AD112400A29802C606A099029C
+:10396000C60C274C03C6222502A19702C629264A67
+:103970004B03C6036B80A4037B046B4F016BA084D5
+:10398000C6026BA184C6036BA284C6046BA384C663
+:103990006C2502A19702C673264A4B03C60D274A1F
+:1039A0004C03C6B8FACD78AEFB008435F71CE7FBB4
+:1039B000CDFBB700A94101AB5B016B7884C6026BFC
+:1039C0007984C6036B7A84C6046B7B84C648830003
+:1039D00035498300354A8300354B830F3510264A1D
+:1039E0002483003525830035268300352783003561
+:1039F0004D274B03C6B8FACDA4AEFB008435F91EA3
+:103A0000F73FF83FF91FE1C6CD142603A1042701B3
+:103A1000A19702C61F264A4B03C6B8FACDA0AEFB3B
+:103A2000008435F71FF93FFA3F46FACD10A6D7FAC2
+:103A3000CD5C012402AB3303CE3403C60B20C0039C
+:103A4000CEC103C62A264A0B2702A04B03C6C703D2
+:103A50005F725DC6CC03274D03C608274C03C6889A
+:103A60008888888184848484A084C7017BA184C7DA
+:103A7000027BA284C7037BA384C7047BBEFBCDFB70
+:103A8000B700A94101AB5BF91ED7FACD9802CE99D8
+:103A900002C6036B80A4037B046B4F1F2502A19712
+:103AA00002C626264A4B03C6036B7FA4037B016B29
+:103AB000A084C6026BA184C6036BA284C6046BA358
+:103AC00084C65A264C03C65F202483003525830014
+:103AD000352683023527830035A0840035A1840074
+:103AE00035A2840035A3840035A4840035A5840064
+:103AF00035A6840035A7840035B8FACD78AEFB0032
+:103B00008435F71DE7FBCDFBB700A94101AB5B0195
+:103B10006B7884C6026B7984C6036B7A84C6046BA7
+:103B20007B84C6C703C75F264C03CE64264D03C6FD
+:103B30008888888881E7FBCDFBB700A94101AB816C
+:103B4000848484840040C7017B0140C7027B02401B
+:103B5000C7037B0340C7047BB8FACD5FFB00403549
+:103B6000F71E23AD5BB8FACD1CAEFB004035F7B7AE
+:103B7000F8B7F9B74FFAB713B614201C4000351D3B
+:103B80004000351E4000351F400035046B02AA047A
+:103B90007B1826123D046B40AA047B06263803C618
+:103BA000046B10AA047B06263703C6016B026B0365
+:103BB0006B4F046B01A6B8FACDB4AEFB008435FAA6
+:103BC00018F918C1C4CD5B016BB484C6026BB5840F
+:103BD000C6036BB684C6046BB784C626C0CDFAA6EE
+:103BE0001445003515450035164500351745023595
+:103BF00088888888810140003502400035034000F4
+:103C00003581848484844D035F72B8FACDB4AEFBF1
+:103C1000008435F919E7FBCDFBB700A94101AB5B87
+:103C2000016BB484C6026BB584C6036BB684C6044C
+:103C30006BB784C61445003515450035164500356B
+:103C4000174501350040003547AD004080354DAD8A
+:103C50008888888881B8FACDB4AEFB008435F91E17
+:103C600081AC840035AD840035AE84003581FBB76E
+:103C700000A94105AB81FBB700A94101AB81FBB7AE
+:103C800000A94109AB810088003501880035028810
+:103C9000003581F914FA14F73FF83FF9B701A4F998
+:103CA000B6FAB7F8A4FAB6D1F9CD08AE905FC6035C
+:103CB000C6818B4100A9410EAB5B28AD038801355D
+:103CC00008880035098830350A8800350B880035AA
+:103CD000B8FACD0CAEFB008835FA1EF7B7F8B7F985
+:103CE000B74FFAB707A40D7B18880035198800353F
+:103CF0001A88FF351B88FF35148800351588003574
+:103D00001688FF351788FF351088003511885F3514
+:103D10001288FF351388FF356EC3CD03880035A0A8
+:103D2000C3CDF71EE7FBCD83C3CD5B0C2621A110CD
+:103D3000209084C7057B9184C7067B9284C7077B4C
+:103D40009384C7087BB8FACD8BC3CD5BC4FBCD830E
+:103D5000C3CD5BC4FBCD7BC3CD5BF71EE7FBCD8B37
+:103D6000C3CD5B056BBFA4057B066BC3A4067B07B5
+:103D70006B086B4F9FFBCD10A68BC3CD5B9FFBCD1C
+:103D800010A67BC3CD5B096B0A6B0B6B4F0C6B3CB6
+:103D9000A60B2082FBCD04A67BC3CD5B0B2455FB79
+:103DA000CD61AEFB00C135E7FBCD7BC3CD5B096BBD
+:103DB0000A6B0B6B4F0C6B3CA4087B3E20096B4FCE
+:103DC0000A6B04A60B6B0C6B4F0E2430A1C803C604
+:103DD000056B9484C6066B9584C6076B9684C608EB
+:103DE0006B9784C6016B026B4F016B9884C6026BA4
+:103DF0009984C6036B9A84C6046B9B84C6A0C3CD0A
+:103E0000F7B760AAF7B6E7FBCD83C3CD5BD1C2CCD1
+:103E1000032422A1C803C6016BB484C6026BB58417
+:103E2000C6036BB684C6046BB784C693C3CDAF8498
+:103E30002035B8FACD7CAEFB008435F714F81AF8BB
+:103E4000104FC3CD0920F81AF8104FC3CD1220F837
+:103E5000104FC3CD19204FC3CD272017274A112754
+:103E60004A0D274A0B274A0D7B93C3CDAF840035FB
+:103E70007C8400357D8400357E8400357F84003568
+:103E80006EC3CD03880035D8C0CD212520A1C8033D
+:103E9000C68B4100A2410CA05B89883C00000081D8
+:103EA000AC840035AD840035AE840035AF84003578
+:103EB0007C8400357D8400357E8400357F84003528
+:103EC00000880035018800350288003503880035F8
+:103ED000A8AD022520A1C803C64B035F72B9035FDA
+:103EE000728184848484B484C7017BB584C7027BD7
+:103EF000B684C7037BB784C7047BB8FACDB4AEFBE6
+:103F0000008435F81CE7FBCDFBB700A94101AB5B92
+:103F1000016BB484C6026BB584C6036BB684C60459
+:103F20006BB784C68888888881E7FBCDFBB700A97A
+:103F30004103ABB8FACC5FFB008435F70030350C99
+:103F4000AD5BB8FACD5FFB008435F71AF73F1BADC8
+:103F50005B81F73FE7FBCDFBB700A94103ABB8FAA4
+:103F6000CC5FFB008435F7180AAD5BB8FACD5FFB78
+:103F700000843515AD5B818590859001EE72027BE2
+:103F8000052005EE72067B072405E2729F06E072AB
+:103F90008988818590859005EE72067B052001EE6B
+:103FA00072027B072405E2729F06E0728988818491
+:103FB00084E926FBB6ED265D016B00A2FBB7017B11
+:103FC000026B01A097027B9D012001EF72026B063C
+:103FD000FACD03AE90FE3F5F88888141204035488E
+:103FE000205F7246205F724520C701A60220D8A636
+:103FF000042420A1C803C6819AB203CFB303C79B90
+:1040000000000000000000000000000000000000B0
+:1040100000000000000000000000000000000000A0
+:104020000000000000000000000000000000000090
+:104030000000000000000000000000000000000080
+:1040400000000000814F13B75320C6502012728128
+:1040500001A603274DB61DCD88A612B75320C65022
+:104060002015728101A603274DB61DCD88A68101BA
+:10407000A603274D131ECD41A68101A603274DFEA1
+:104080001DCD86025F725020203552200935814DAA
+:10409000B61DCD88A6502020355320C7C5204F50CF
+:1040A000201272EC2506E172016B4C017BD42613C1
+:1040B000AD9803D601EE720D204F172760A1027B49
+:1040C00026C0CD05A6EC262BAD037BF2264DB61DF2
+:1040D000CD88A68185858501A606274D131ECD02B4
+:1040E0007B08264DFE1DCD86025F72502020355282
+:1040F000200935888988814DB61DCD88A64F1ECCF4
+:104100004F26C0CD05A6952506E172017B9803D701
+:104110005320C6016B4C9F01EE720D2011E7532016
+:10412000C69707EB724A016B4C017B112509A1066A
+:104130007B502022350426FFB1FFBF900B26FEB333
+:10414000FE3F01E6724A5A01264D5F067B5020155C
+:10415000720426FFB1FFBF900B26FEB3FE3F01E6BF
+:10416000725A012402A05F1A2502A1067BBA2672A8
+:10417000AD5020157204264A067BC7264D90AD012E
+:10418000AA027BD0264D84AD26C0CD05A6DA260333
+:104190001FCD502020355320C7037B02205280AE14
+:1041A000037B072680A1067BF526031FCD8185852D
+:1041B0008501A606274DC9AD027B07264DBBAD86FE
+:1041C000025F72016B4F50202035522009358889DB
+:1041D00088814F5020243526C0CD05A68101A60335
+:1041E000274DB7AD82A6502021355320C7814F26D9
+:1041F000C0CD05A68101A603274DCFAD81A65020D5
+:10420000293581854F86025F72818501A686025F0E
+:1042100072502022350C86020D72052601E17201D2
+:10422000E472FA278602C68F01208881854F818536
+:1042300001A650200535082701E17201E4725120E2
+:10424000C6FA51200F729D0120880218CCF5B545A1
+:10425000F4B445F33FF23FF13FF03FEF3FEE3FF65E
+:104260002538A3F6BEB66FF63C04200218CDF4251F
+:1042700040A1F6B6B66FF63CF6BE0820172539A168
+:10428000F6B6B6E780A6F63CF6BE8106FCCDFBB7D7
+:1042900000A941C6AB5204AE8106FCCDFBB700A914
+:1042A00041CEAB5204AE8106FCCDFBB700A941FA6A
+:1042B000AB5204AE81E9F9CDFBB700A941E6AB52A0
+:1042C00004AE81FBB700A94101AB81F7B7F8B7F99C
+:1042D000B74FFAB7B6E681B8FACDFBB700A9410DE2
+:1042E000AB810FFBCDFBB700A94111AB81FBB70040
+:1042F000A94119AB81FBB700A94105AB81FBB70010
+:10430000A94115AB81E7FBCDFBB700A9411DAB81EE
+:10431000FBB701A94106AB5204AE81FBB700A9412E
+:1043200009AB81E7FBCDFBB700A94121AB818B41F4
+:1043300000A94125AB5BF63F2FFACDB0AEE7FBCD30
+:10434000FBB700A94111AB5B2FFACDACAEE7FBCDBB
+:104350005CAD5B2FFACDA8AEE7FBCD57AD5B2FFA76
+:10436000CDA4AE36AD5B2FFACDA0AE5CAD5BC61B67
+:10437000CC032450A1256B4C257B1D6B0D7B1E6B44
+:104380000E7B1F6B0F7B206B107B216B1D7B226BC9
+:104390001E7B236B1F7B246B207BB8FACDFC1CCDCE
+:1043A0005BC4FBCDDE1CCD5B46FACD1EA6D31CCD77
+:1043B0005BB8FACDDE1CCD5B7AFACD02A6D31CCD5C
+:1043C0005B196B157B1A6B167B1B6B177B1C6B18B1
+:1043D0007B116B197B126B1A7B136B1B7B146B1C91
+:1043E0007B1F1DCD5B0FFBCDFEAEFB0017350FFB1A
+:1043F000CDE61CCD257B141DCD5B0FFBCDDE1CCD8A
+:104400005BC4FBCD041DCD5B46FACD05A6F11CCDEA
+:104410005BB8FACD041DCD5B7AFACD1BA6F11CCD9D
+:104420005BB8FACDDE1CCD5B06FCCD0C1DCD5B066A
+:10443000FCCDFC1CCD5BD31CCD5B256B3CA6D31AFD
+:10444000CC03243CA1256B4C257B1D6B0D7B1E6B87
+:104450000E7B1F6B0F7B206B107B216B1D7B226BF8
+:104460001E7B236B1F7B246B207BB8FACDFC1CCDFD
+:104470005BC4FBCDDE1CCD5B46FACD1EA6D31CCDA6
+:104480005BB8FACDDE1CCD5B7AFACD02A6D31CCD8B
+:104490005B196B157B1A6B167B1B6B177B1C6B18E0
+:1044A0007B116B197B126B1A7B136B1B7B146B1CC0
+:1044B0007B1F1DCD5B0FFBCDFAAEFB0017350FFB4D
+:1044C000CDE61CCD257B141DCD5B0FFBCD041DCD92
+:1044D0005BC4FBCD361DCD5B46FACD05A6F11CCDE8
+:1044E0005BB8FACD361DCD5B7AFACD1BA6F11CCD9B
+:1044F0005BB8FACD041DCD5BC4FBCDDE1CCD5B32B9
+:10450000FBCDFBB700A94121AB5BC4FBCD0C1DCD9E
+:104510005BE7FBCDFC1CCD5BB8FACDDE1CCD5B327E
+:10452000FBCD0C1DCD5BE7FBCDFC1CCD5B256B28CB
+:10453000A6031ACC032428A1256B4C257B1D6B0DEB
+:104540007B1E6B0E7B1F6B0F7B206B107B216B1D0B
+:104550007B226B1E7B236B1F7B246B207BB8FACDE9
+:10456000FC1CCD5BC4FBCDDE1CCD5B46FACD1EA68C
+:10457000D31CCD5BB8FACDDE1CCD5B7AFACD02A69A
+:10458000D31CCD5B196B157B1A6B167B1B6B177BD2
+:104590001C6B187B116B197B126B1A7B136B1B7BCB
+:1045A000146B1C7B1F1DCD5B0FFBCDF6AEFB001704
+:1045B000350FFBCDE61CCD257B141DCD5B0FFBCD50
+:1045C000DE1CCD5BC4FBCD041DCD5B46FACD05A63C
+:1045D000F11CCD5BB8FACD041DCD5B7AFACD1BA6DC
+:1045E000F11CCD5BB8FACDDE1CCD5B06FCCD0C1DFD
+:1045F000CD5B06FCCDFC1CCD5BD31CCD5B256B14C9
+:10460000A61919CC032414A1256B4C257B1D6B0D19
+:104610007B1E6B0E7B1F6B0F7B206B107B216B1D3A
+:104620007B226B1E7B236B1F7B246B207BB8FACD18
+:10463000FC1CCD5BC4FBCDDE1CCD5B46FACD1EA6BB
+:10464000D31CCD5BB8FACDDE1CCD5B7AFACD02A6C9
+:10465000D31CCD5B196B157B1A6B167B1B6B177B01
+:104660001C6B187B116B197B126B1A7B136B1B7BFA
+:10467000146B1C7B1F1DCD5B0FFBCDF2AEFB001737
+:10468000350FFBCDE61CCD257B141DCD5B0FFBCD7F
+:10469000041DCD5BC4FBCD361DCD5B46FACD05A612
+:1046A000F11CCD5BB8FACD361DCD5B7AFACD1BA6D9
+:1046B000F11CCD5BB8FACD041DCD5BC4FBCDDE1C77
+:1046C000CD5B32FBCDFC1CCD5BD31CCD5BB8FACDF2
+:1046D000DE1CCD5B32FBCD0C1DCD5BF733F833F91F
+:1046E00033FA33D31CCD5B256B4F116BB0B6126B15
+:1046F000B1B6136BB2B6146BB3B6196BACB61A6B1A
+:10470000ADB61B6BAEB61C6BAFB6156BA8B6166B11
+:10471000A9B6176BAAB6186BABB6216BA4B6226B01
+:10472000A5B6236BA6B6246BA7B61D6BA0B61E6BF1
+:10473000A1B61F6BA2B6206BA3B6AE2550A1256B08
+:104740004C257B9BFACDE61CCD257BC4FBCDDE1C26
+:10475000CD5BF739F839F939FA38681DCD257B5A20
+:104760001DCD257B4C1DCD257B3E1DCD257BB8FA6F
+:10477000CDDE1CCD5B7AFACD1FA6681DCD257B5AF8
+:104780001DCD257B4C1DCD257B3E1DCD256B10A65B
+:104790009D2510A1256B4C257BBEFBCDE61CCD25B0
+:1047A0007B2A1DCD9703AB9F585825EE72BEFBCDDB
+:1047B000E61CCD257BE0FACD5F90FE0001355FB6AB
+:1047C000E65C5C585825EE72BEFBCDE61CCD257B21
+:1047D00046FACD10A62A1DCD5C585825EE729BFADC
+:1047E000CDE61CCD257B46FACD18A62A1DCD5858FE
+:1047F00025EE72256B4F8B4100A24125A05BD6C1EF
+:1048000062CADCBC1B8FA1EBD96E9979825A818474
+:1048100084C9264C056B4A057B016B00A9017B020C
+:104820006B01AB027B27AD022640A1F6B6B43C0279
+:1048300024B5B708ABB5B6B6E7F63CF6BEFBC692F4
+:10484000FCB7027BFBB7017B2F208988818484849D
+:10485000B4B7B4B99FB5B7B5BB5208AEF6B7037BD2
+:10486000EF2503E172016B4C9FB6E74602D601EEDD
+:10487000720A204F76ADB4000235B53FF6004035E0
+:10488000F12540A1016B4C9FB6E701EE72027B015E
+:104890006B19A6ED2519A1016B4C9FB6E702E872D2
+:1048A00060FED601EE72016B4F88898881B000C32B
+:1048B00035B100D235B200E135B300F035AC0010AF
+:1048C00035AD003235AE005435AF007635A80098CE
+:1048D00035A900BA35AA00DC35AB00FE35A400EFDF
+:1048E00035A500CD35A600AB35A7008935A00067FA
+:1048F00035A1004535A2002335A3000135F63FB5AB
+:104900003FB43F818484761DCD5DAD5CAE14A60CB2
+:10491000ADDE2505A1026B4C027BEA2504A1016BEB
+:104920004C017B4602D7A0E69701EB724848027B18
+:10493000016B4F026B4F761DCD5417CD36AE05017E
+:10494000C63EAD05013235F02532A1026B4C9F46C3
+:1049500002D7CDA602EE72026B4F88883A15CC289A
+:104960004B1CCACD3E15CCC1A6004B084B87CACD07
+:104970003E15CCE0A6014BE04BBF15CC03267503DA
+:104980005A7281848475030235DF14CDE0A6014B91
+:10499000E04B10274D5FCFCD28277703C57903C69D
+:1049A0003E15CCC1A6004B284B3C15CC014B904B7F
+:1049B0001CCACD0A277703C57903C6C520284B3EFC
+:1049C00015CCE0A6014B904BE2CACD0C277703C56E
+:1049D0007903C6DD20084B3E15CC82A6014B904BD7
+:1049E00076035A72CE277603C612264DE1CDCD3E10
+:1049F00015CCC0A6004B504BB7274D5FCFCDE82656
+:104A00004D9ACBCD03A6262076030A35CB274D5FE2
+:104A1000CFCDD320604B04274D9ACBCD83A618274A
+:104A20004DC3CDCD3E15CC81A6004B604BEC264D41
+:104A30009ACBCD02A643CDCD3E15CCA0A6004B0807
+:104A40004B09274D83CCCD06264D9ACBCD4C8C20DF
+:104A500080A688884F932040A6004BA04B08264D87
+:104A6000F2CBCD9C16CC6016CC03261FA04516CCED
+:104A700003264A2D16CC032620A0E0271EA01116DF
+:104A8000CC03264A60274A392720A0C32720A02C20
+:104A90002720A0302720A09C16CC03268303C6DD48
+:104AA0002060A6004B504B1CCACD032561A1830397
+:104AB000C6122711A17C03C607277A03C681848406
+:104AC0009FAD20A6004B504B0B264D03C6052783F8
+:104AD00003C610204F004BA04B1CCACD03278303F5
+:104AE000C60F267D03C68184848400C0CD06EE7285
+:104AF000077B8303C7037B05201872042701E1723B
+:104B0000F0A48303C6016BF0A4037B0620C7026BED
+:104B100002EA72E0A4037B026B1FA40620C6888809
+:104B200088814442003545420035464200354742BF
+:104B30000035004200350142003502420135034292
+:104B4000013510200042083501420035024201358E
+:104B50000342003512264A9B02C620400035214000
+:104B60000035224000352340013504449E0255059E
+:104B7000449F02550644A002550744A10255004433
+:104B8000A202550144A302550244A402550344A5C0
+:104B9000025581854FF3259B02C1016B4C017B7649
+:104BA00012CD06204F88819A9BFACDF73FF83FF946
+:104BB0003F81F7B7F8B7F9B74FFAB7819A9BFACDAB
+:104BC000FBB701AA4181FBC692FCBFFBB7811DADBB
+:104BD000FA3FFBB701AA3313CC4F29ADFA003D359C
+:104BE00003EE72FBB7027B9B37ADFA000835FBB7CB
+:104BF00002AA4124AA02EE72037B9BD32604E1722F
+:104C000033AD02AA4103AA02EE72037B3213CC0336
+:104C100026016B4A017B9A9BFACDFBB702AA027B65
+:104C200003EE7268AD047B9B1C20016BFFA6046B36
+:104C300080AA047B062602A111B6046B03A60220FB
+:104C400001A6062002A60427153D0C279C02C67962
+:104C5000AD411CAA02EE72037B9B2A14CD4118AA17
+:104C600002EE72037B9B2A14CD4114AA02EE72035A
+:104C70007B9BD72604E1723314CD01AA4113AA020B
+:104C8000EE72037BB627016B4A017B3B14CD10AA61
+:104C900002EE72037B4514CD047B9B1720016BFF52
+:104CA000A6046BCFA602200FA606200CA60820069D
+:104CB000274A052714B6CC2604E1723314CD01AA85
+:104CC000410DAA02EE72037B81858585854C0626FF
+:104CD000016B4A017B3B14CD0CAA02EE72037B46AA
+:104CE000FACD10A64514CD047B9B2220016BFFA6B4
+:104CF000046BFCA60220F0A60620CCA60A20C0A6C3
+:104D00000E200CA612200FA616200CA618274A1754
+:104D1000274A16274A15274A14274A29274A1627B9
+:104D200013B6C92604E1723314CD01AA410BAA02BD
+:104D3000EE72037B6627016B4A017B3B14CD08AA08
+:104D400002EE72037BF7B703AAF7B6F8B7C3AAF768
+:104D5000B7F9B74FFAB7047B9B2520016BFFA60478
+:104D60006B4F01200CA60427153D2A14CD4104AA3F
+:104D700002EE72037B9B2A14CD9B026B03EF7250F1
+:104D8000AA414F5858978888888881E7FBCDA2AE02
+:104D9000FB00023581FBB700A94101AB81F7B7F8F1
+:104DA000B7F9B74FFAB781F7B703A4F7B6F83FF9E9
+:104DB0003FFA3F46FACD18A61DFACD4A81FBB7004F
+:104DC000A94107AB81A2020035A3020035A4021855
+:104DD00035812CAD18A44848484A12B6B8FACD43DC
+:104DE000AD818B4100A9410AAB5BB8FACD0CAEFB9B
+:104DF000008435FA36F936F836F73467AD0A20F70D
+:104E000039F839F939FA3873AD0C2602A111B68494
+:104E1000848484841FDDCD880B7B880B7B880B7B8F
+:104E2000880B7B88067B066B4444067B9FFBCD0288
+:104E3000A66DAD5B0E2602A111B6066B08A6076B28
+:104E40004F086BBBA6096B80A60A6BA2020035A3B4
+:104E5000020035A4026035A50200351820ACA60971
+:104E60006B44A60A6BA2020035A3020035A40262BD
+:104E700035A5020035392010A6076B4F086B5DA6DB
+:104E8000096BC0A60A6BA2020035A3020035A4027A
+:104E90003035A5020035182056A6096B22A60A6BEC
+:104EA000A2020035A3020035A4023135A502003567
+:104EB000742020A6076B4F086B2EA6096BE0A60A8C
+:104EC0006B2F12CDA5020035C411CC20A6076B4F65
+:104ED000086B2BA6096B11A60A6B2F12CDA50280B9
+:104EE00035C411CC20A6076B4F086B1FA6096B4079
+:104EF000A60A6BA2020035A3020035A4021035A554
+:104F0000020035C411CC20A6076B4F086B2EA609F2
+:104F10006BE0A60A6B4F2F12CDA5020035A511CC70
+:104F200003264A8B11CC03264A6A11CC03264A5029
+:104F300011CC03264A6D274A57274A382713B69AB9
+:104F4000B8FACDFBBF84AEFA1AE7FBCD3C12CD5BBD
+:104F50009BBEFBCD3C12CD5BC4FBCD6412CD5B1F71
+:104F600012CD5B4412CD5A01264D5F12B61520FAC0
+:104F700010C4FBCD6412CD5B1F12CD5B4412CD5A21
+:104F800001264D5F12B619264A11B63A20086B4029
+:104F9000AA087B0A6B02AA0A7B0E2602A111B60A96
+:104FA0006B04AA0A7B0627163DB8FACD3C12CD5BEE
+:104FB000F91EF7BFF8B73EA4F8B6F9BFFABF46FA34
+:104FC000CD11A65A12CD1420F714F73FF8B73EA41E
+:104FD000F8B6F93FFA3F46FACD11A65A12CD182776
+:104FE000B803CE056B18A6022014A6062010A6084A
+:104FF0002006274A052714B68B4100A2410AA05B70
+:00000001FF
diff --git a/fs/mpage.c b/fs/mpage.c
index fdfae9fa98c..cc782f9e68d 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -53,6 +53,8 @@ static void mpage_end_io(struct bio *bio, int err)
prefetchw(&bvec->bv_page->flags);
if (bio_data_dir(bio) == READ) {
if (uptodate) {
+ /* FIXME: fix to solve cache coherence issues. */
+ flush_dcache_page(page);
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig
index cb5f0a3f1b0..097be1934ee 100644
--- a/fs/partitions/Kconfig
+++ b/fs/partitions/Kconfig
@@ -68,6 +68,25 @@ config ACORN_PARTITION_RISCIX
of machines called RISCiX. If you say 'Y' here, Linux will be able
to read disks partitioned under RISCiX.
+config BLKDEV_PARTITION
+ bool "Blockdev commandline partition support" if PARTITION_ADVANCED
+ default n
+ help
+ Say Y if you like to setup partitions for block devices by reading
+ from the kernel command line (kernel boot arguments).
+
+ The format of the partitions on the command line:
+ blkdevparts=<blkdev-def>[;<blkdev-def>]
+ <blkdev-def> := <blkdev-id>:<partdef>[,<partdef>]
+ <partdef> := <size>[@<offset>]
+
+ <blkdev-id> := unique id used to map driver to blockdev name
+ <size> := size in numbers of sectors
+ <offset> := offset in sectors for partition to start at
+
+ Example:
+ blkdevparts=mmc0:1024@0,524288@1024;mmc1:8192@0,8192@8192
+
config OSF_PARTITION
bool "Alpha OSF partition support" if PARTITION_ADVANCED
default y if ALPHA
diff --git a/fs/partitions/Makefile b/fs/partitions/Makefile
index 03af8eac51d..48b216c53db 100644
--- a/fs/partitions/Makefile
+++ b/fs/partitions/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_BLOCK) := check.o
obj-$(CONFIG_ACORN_PARTITION) += acorn.o
obj-$(CONFIG_AMIGA_PARTITION) += amiga.o
obj-$(CONFIG_ATARI_PARTITION) += atari.o
+obj-$(CONFIG_BLKDEV_PARTITION) += blkdev_parts.o
obj-$(CONFIG_MAC_PARTITION) += mac.o
obj-$(CONFIG_LDM_PARTITION) += ldm.o
obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
diff --git a/fs/partitions/blkdev_parts.c b/fs/partitions/blkdev_parts.c
new file mode 100755
index 00000000000..030565b7ce7
--- /dev/null
+++ b/fs/partitions/blkdev_parts.c
@@ -0,0 +1,127 @@
+/*
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ulf Hansson <ulf.hansson@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Create partitions for block devices by reading from the kernel
+ * command line (kernel boot arguments).
+ *
+ */
+
+#include "check.h"
+#include "blkdev_parts.h"
+
+static char *cmdline;
+
+/*
+ * This is the handler for our kernel commandline parameter,
+ * called from main.c::checksetup().
+ * Note that we can not yet kmalloc() anything, so we only save
+ * the commandline for later processing.
+ */
+static int cmdline_setup(char *s)
+{
+ cmdline = s;
+ return 1;
+}
+__setup("blkdevparts=", cmdline_setup);
+
+/* Parse for a matching blkdev-id and return pointer to partdef */
+static char *parse_blkdev_id(char *blkdev_name)
+{
+ int blkdev_id_len;
+ char *p, *blkdev_id;
+
+ /* Start parsing for a matching blkdev-id */
+ p = blkdev_id = cmdline;
+ while (blkdev_id != NULL) {
+
+ /* Find the end of the blkdev-id string */
+ p = strchr(blkdev_id, ':');
+ if (p == NULL)
+ return NULL;
+
+ /* Check if we found a matching blkdev-id */
+ blkdev_id_len = p - blkdev_id;
+ if (strlen(blkdev_name) == blkdev_id_len) {
+ if (strncmp(blkdev_name, blkdev_id, blkdev_id_len) == 0)
+ return p;
+ }
+
+ /* Move to next blkdev-id string if there is one */
+ blkdev_id = strchr(p, ';');
+ if (blkdev_id != NULL)
+ blkdev_id++;
+ }
+ return NULL;
+}
+
+static int parse_partdef(char **part, struct parsed_partitions *state, int part_nbr)
+{
+ sector_t size, offset;
+ char *p = *part;
+
+ /* Skip the beginning "," or ":" */
+ p++;
+
+ /* Fetch and verify size from partdef */
+ size = simple_strtoull(p, &p, 10);
+ if ((size == 0) || (*p != '@'))
+ return 0;
+
+ /* Skip the "@" */
+ p++;
+
+ /* Fetch offset from partdef and check if there are more parts */
+ offset = simple_strtoull(p, &p, 10);
+ if (*p == ',')
+ *part = p;
+ else
+ *part = NULL;
+
+ /* Add partition to state */
+ put_partition(state, part_nbr, offset, size);
+ printk(KERN_INFO "\nPartition: size=%llu, offset=%llu\n",
+ (unsigned long long) size,
+ (unsigned long long) offset);
+ return 1;
+}
+
+static int parse_blkdev_parts(char *blkdev_name, struct parsed_partitions *state)
+{
+ char *partdef;
+ int part_nbr = 0;
+
+ /* Find partdef */
+ partdef = parse_blkdev_id(blkdev_name);
+
+ /* Add parts */
+ while (partdef != NULL) {
+ /* Find next part and add it to state */
+ part_nbr++;
+ if (!parse_partdef(&partdef, state, part_nbr))
+ return 0;
+ }
+ return part_nbr;
+}
+
+int blkdev_partition(struct parsed_partitions *state)
+{
+ char blkdev_name[BDEVNAME_SIZE];
+
+ /* Check if there are any partitions to handle */
+ if (cmdline == NULL)
+ return 0;
+
+ /* Get the name of the blockdevice we are operating upon */
+ if (bdevname(state->bdev, blkdev_name) == NULL) {
+ printk(KERN_WARNING "Could not get a blkdev name\n");
+ return 0;
+ }
+
+ /* Parse for partitions and add them to the state */
+ return parse_blkdev_parts(blkdev_name, state);
+}
+
diff --git a/fs/partitions/blkdev_parts.h b/fs/partitions/blkdev_parts.h
new file mode 100755
index 00000000000..16d2b571625
--- /dev/null
+++ b/fs/partitions/blkdev_parts.h
@@ -0,0 +1,14 @@
+/*
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ulf Hansson <ulf.hansson@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Create partitions for block devices by reading from the kernel
+ * command line (kernel boot arguments).
+ *
+ */
+
+int blkdev_partition(struct parsed_partitions *state);
+
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index e3c63d1c5e1..c8dc879e26b 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -27,6 +27,7 @@
#include "acorn.h"
#include "amiga.h"
#include "atari.h"
+#include "blkdev_parts.h"
#include "ldm.h"
#include "mac.h"
#include "msdos.h"
@@ -50,6 +51,9 @@ static int (*check_part[])(struct parsed_partitions *) = {
* Probe partition formats with tables at disk address 0
* that also have an ADFS boot block at 0xdc0.
*/
+#ifdef CONFIG_BLKDEV_PARTITION
+ blkdev_partition,
+#endif
#ifdef CONFIG_ACORN_PARTITION_ICS
adfspart_check_ICS,
#endif
diff --git a/include/Kbuild b/include/Kbuild
index 8d226bfa269..506f6d7dba7 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -10,3 +10,4 @@ header-y += video/
header-y += drm/
header-y += xen/
header-y += scsi/
+header-y += trace/
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index fcbbe71a3cc..849aec9aba5 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -94,4 +94,64 @@ void amba_release_regions(struct amba_device *);
#define amba_manf(d) AMBA_MANF_BITS((d)->periphid)
#define amba_part(d) AMBA_PART_BITS((d)->periphid)
+#ifdef CONFIG_PM_SLEEP
+extern int amba_pm_prepare(struct device *dev);
+extern void amba_pm_complete(struct device *dev);
+#else
+#define amba_pm_prepare NULL
+#define amba_pm_complete NULL
+#endif
+
+#ifdef CONFIG_SUSPEND
+extern int amba_pm_suspend(struct device *dev);
+extern int amba_pm_suspend_noirq(struct device *dev);
+extern int amba_pm_resume(struct device *dev);
+extern int amba_pm_resume_noirq(struct device *dev);
+#else
+#define amba_pm_suspend NULL
+#define amba_pm_resume NULL
+#define amba_pm_suspend_noirq NULL
+#define amba_pm_resume_noirq NULL
+#endif
+
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+extern int amba_pm_freeze(struct device *dev);
+extern int amba_pm_freeze_noirq(struct device *dev);
+extern int amba_pm_thaw(struct device *dev);
+extern int amba_pm_thaw_noirq(struct device *dev);
+extern int amba_pm_poweroff(struct device *dev);
+extern int amba_pm_poweroff_noirq(struct device *dev);
+extern int amba_pm_restore(struct device *dev);
+extern int amba_pm_restore_noirq(struct device *dev);
+#else
+#define amba_pm_freeze NULL
+#define amba_pm_thaw NULL
+#define amba_pm_poweroff NULL
+#define amba_pm_restore NULL
+#define amba_pm_freeze_noirq NULL
+#define amba_pm_thaw_noirq NULL
+#define amba_pm_poweroff_noirq NULL
+#define amba_pm_restore_noirq NULL
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+#define USE_AMBA_PM_SLEEP_OPS \
+ .prepare = amba_pm_prepare, \
+ .complete = amba_pm_complete, \
+ .suspend = amba_pm_suspend, \
+ .resume = amba_pm_resume, \
+ .freeze = amba_pm_freeze, \
+ .thaw = amba_pm_thaw, \
+ .poweroff = amba_pm_poweroff, \
+ .restore = amba_pm_restore, \
+ .suspend_noirq = amba_pm_suspend_noirq, \
+ .resume_noirq = amba_pm_resume_noirq, \
+ .freeze_noirq = amba_pm_freeze_noirq, \
+ .thaw_noirq = amba_pm_thaw_noirq, \
+ .poweroff_noirq = amba_pm_poweroff_noirq, \
+ .restore_noirq = amba_pm_restore_noirq,
+#else
+#define USE_AMBA_PM_SLEEP_OPS
+#endif
+
#endif
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index 21114810c7c..84766fd366b 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -6,6 +6,19 @@
#include <linux/mmc/host.h>
+
+/*
+ * These defines is places here due to access is needed from machine
+ * configuration files. The ST Micro version does not have ROD and
+ * reuse the voltage registers for direction settings.
+ */
+#define MCI_ST_DATA2DIREN (1 << 2)
+#define MCI_ST_CMDDIREN (1 << 3)
+#define MCI_ST_DATA0DIREN (1 << 4)
+#define MCI_ST_DATA31DIREN (1 << 5)
+#define MCI_ST_FBCLKEN (1 << 7)
+#define MCI_ST_DATA74DIREN (1 << 8)
+
/* Just some dummy forwarding */
struct dma_chan;
@@ -18,7 +31,8 @@ struct dma_chan;
* @ocr_mask: available voltages on the 4 pins from the block, this
* is ignored if a regulator is used, see the MMC_VDD_* masks in
* mmc/host.h
- * @vdd_handler: a callback function to translate a MMC_VDD_*
+ * @ios_handler: a callback function to act on specfic ios changes,
+ * used for example to control a levelshifter
* mask into a value to be binary (or set some other custom bits
* in MMCIPWR) or:ed and written into the MMCIPWR register of the
* block. May also control external power based on the power_mode.
@@ -30,6 +44,8 @@ struct dma_chan;
* @cd_invert: true if the gpio_cd pin value is active low
* @capabilities: the capabilities of the block as implemented in
* this platform, signify anything MMC_CAP_* from mmc/host.h
+ * @sigdir: a bit field indicating for what bits in the MMC bus the host
+ * should enable signal direction indication.
* @dma_filter: function used to select an appropriate RX and TX
* DMA channel to be used for DMA, if and only if you're deploying the
* generic DMA engine
@@ -45,13 +61,13 @@ struct dma_chan;
struct mmci_platform_data {
unsigned int f_max;
unsigned int ocr_mask;
- u32 (*vdd_handler)(struct device *, unsigned int vdd,
- unsigned char power_mode);
+ int (*ios_handler)(struct device *, struct mmc_ios *);
unsigned int (*status)(struct device *);
int gpio_wp;
int gpio_cd;
bool cd_invert;
unsigned long capabilities;
+ u32 sigdir;
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
void *dma_rx_param;
void *dma_tx_param;
diff --git a/include/linux/boottime.h b/include/linux/boottime.h
new file mode 100644
index 00000000000..9836c5b3175
--- /dev/null
+++ b/include/linux/boottime.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2009-2010
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * boottime is a tool for collecting start-up timing
+ * information and can together with boot loader support
+ * display a total system start-up time.
+ *
+ */
+
+#ifndef LINUX_BOOTTIME_H
+#define LINUX_BOOTTIME_H
+
+#ifdef CONFIG_BOOTTIME
+#include <linux/kernel.h>
+
+/**
+ * struct boottime_timer - Callbacks for generic timer.
+ * @init: Function to call at boottime initialization
+ * @get_time: Returns the number of us since start-up
+ * Preferable this is based upon a free running timer.
+ * This is the only required entry.
+ * @finalize: Called before init is executed and boottime is done.
+ */
+struct boottime_timer {
+ int (*init)(void);
+ unsigned long (*get_time)(void);
+ void (*finalize)(void);
+};
+
+/**
+ * boottime_mark_wtime()
+ * Add a sample point with a given time. Useful for adding data collected
+ * by for example a boot loader.
+ * @name: The name of the sample point
+ * @time: The time in us when this point was reached
+ */
+void __init boottime_mark_wtime(char *name, unsigned long time);
+
+/**
+ * boottime_mark()
+ * Add a sample point with the current time.
+ * @name: The name of this sample point
+ */
+void __init boottime_mark(char *name);
+
+/**
+ * boottime_mark_symbolic()
+ * Add a sample point where the name is a symbolic function
+ * and %pF is needed to get the correct function name.
+ * @name: function name.
+ */
+void __init boottime_mark_symbolic(void *name);
+
+/**
+ * boottime_activate()
+ * Activates boottime and register callbacks.
+ * @bt: struct with callbacks.
+ */
+void __ref boottime_activate(struct boottime_timer *bt);
+
+/**
+ * boottime_deactivate()
+ * This function is called when the kernel boot is done.
+ * (before "free init memory" is called)
+ */
+void __init boottime_deactivate(void);
+
+/**
+ * boottime_system_up()
+ * A function is called when the basics of the kernel
+ * is up and running.
+ */
+void __init boottime_system_up(void);
+
+#else
+
+#define boottime_mark_wtime(name, time)
+#define boottime_mark(name)
+#define boottime_mark_symbolic(name)
+#define boottime_activate(bt)
+#define boottime_deactivate()
+#define boottime_system_up()
+#endif
+
+#endif /* LINUX_BOOTTIME_H */
diff --git a/include/linux/cpufreq-dbx500.h b/include/linux/cpufreq-dbx500.h
new file mode 100644
index 00000000000..80d67083e11
--- /dev/null
+++ b/include/linux/cpufreq-dbx500.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ */
+#ifndef __CPUFREQ_DBX500_H
+#define __CPUFREQ_DBX500_H
+
+#include <linux/cpufreq.h>
+
+int dbx500_cpufreq_get_limits(int cpu, int r,
+ unsigned int *min, unsigned int *max);
+
+#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 6216115c778..1f8e97c2a08 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -199,6 +199,7 @@ extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+int cpufreq_update_freq(int cpu, unsigned int min, unsigned int max);
/*********************************************************************
* CPUFREQ DRIVER INTERFACE *
@@ -336,6 +337,7 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
}
#endif
+int cpufreq_update_freq(int cpu, unsigned int min, unsigned int max);
/*********************************************************************
* CPUFREQ DEFAULT GOVERNOR *
diff --git a/include/linux/cyttsp.h b/include/linux/cyttsp.h
new file mode 100755
index 00000000000..38ef1236d7a
--- /dev/null
+++ b/include/linux/cyttsp.h
@@ -0,0 +1,114 @@
+/* Header file for:
+ * Cypress TrueTouch(TM) Standard Product I2C touchscreen driver.
+ * include/linux/cyttsp.h
+ *
+ * Copyright (C) 2009-2011 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ */
+#include <linux/input.h>
+
+#ifndef _CYTTSP_H_
+#define _CYTTSP_H_
+
+#include <linux/input.h>
+
+#define CY_SPI_NAME "cyttsp-spi"
+#define CY_I2C_NAME "cyttsp-i2c"
+/* Scan Type selection for finger and/or stylus activation */
+#define CY_SCN_TYP_DFLT 0x01 /* finger only mutual scan */
+/* Active Power state scanning/processing refresh interval */
+#define CY_ACT_INTRVL_DFLT 0x00 /* ms */
+/* touch timeout for the Active power */
+#define CY_TCH_TMOUT_DFLT 0x64 /* ms */
+/* Low Power state scanning/processing refresh interval */
+#define CY_LP_INTRVL_DFLT 0x32 /* ms */
+/*
+ *defines for Gen2 (Txx2xx); Gen3 (Txx3xx)
+ * use these defines to set cyttsp_platform_data.gen in board config file
+ */
+enum cyttsp_gen {
+ CY_GEN2,
+ CY_GEN3,
+};
+/*
+ * Active distance in pixels for a gesture to be reported
+ * if set to 0, then all gesture movements are reported
+ * Valid range is 0 - 15
+ */
+#define CY_ACT_DIST_DFLT 8
+#define CY_ACT_DIST CY_ACT_DIST_DFLT
+#define CY_ACT_DIST_BITS 0x0F
+/* max retries for read/write ops */
+#define CY_NUM_RETRY 6
+
+enum cyttsp_gest {
+ CY_GEST_GRP_NONE = 0,
+ CY_GEST_GRP1 = 0x10,
+ CY_GEST_GRP2 = 0x20,
+ CY_GEST_GRP3 = 0x40,
+ CY_GEST_GRP4 = 0x80,
+};
+
+enum cyttsp_powerstate {
+ CY_IDLE_STATE, /* IC cannot be reached */
+ CY_READY_STATE, /* pre-operational; ready to go to ACTIVE */
+ CY_ACTIVE_STATE, /* app is running, IC is scanning */
+ CY_LOW_PWR_STATE, /* not currently used */
+ CY_SLEEP_STATE, /* app is running, IC is idle */
+ CY_BL_STATE, /* bootloader is running */
+ CY_LDR_STATE, /* loader is running */
+ CY_SYSINFO_STATE, /* Switching to SysInfo mode */
+ CY_INVALID_STATE /* always last in the list */
+};
+
+struct cyttsp_platform_data {
+ u32 maxx;
+ u32 maxy;
+ u32 flags;
+ enum cyttsp_gen gen;
+ unsigned use_st:1;
+ unsigned use_mt:1;
+ unsigned use_trk_id:1;
+ unsigned use_hndshk:1;
+ unsigned use_timer:1;
+ unsigned use_sleep:1;
+ unsigned use_gestures:1;
+ unsigned use_load_file:1;
+ unsigned use_force_fw_update:1;
+ unsigned use_virtual_keys:1;
+ enum cyttsp_powerstate power_state;
+ u8 gest_set;
+ u8 scn_typ; /* finger and/or stylus scanning */
+ u8 act_intrvl; /* Active refresh interval; ms */
+ u8 tch_tmout; /* Active touch timeout; ms */
+ u8 lp_intrvl; /* Low power refresh interval; ms */
+ int (*wakeup)(void);
+ int (*init)(int on_off);
+ void (*mt_sync)(struct input_dev *);
+ char *name;
+ s16 irq_gpio;
+ s16 rst_gpio;
+ bool invert;
+};
+
+#endif /* _CYTTSP_H_ */
diff --git a/include/linux/dispdev.h b/include/linux/dispdev.h
new file mode 100644
index 00000000000..cbcf6705150
--- /dev/null
+++ b/include/linux/dispdev.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * ST-Ericsson Display device driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _DISPDEV_H_
+#define _DISPDEV_H_
+
+#if !defined(__KERNEL__) && !defined(_KERNEL)
+#include <stdint.h>
+#else
+#include <linux/types.h>
+#include <video/mcde.h>
+#endif
+
+#define DISPDEV_DEFAULT_DEVICE_PREFIX "disp"
+
+enum dispdev_fmt {
+ DISPDEV_FMT_RGB565,
+ DISPDEV_FMT_RGB888,
+ DISPDEV_FMT_RGBX8888,
+ DISPDEV_FMT_RGBA8888,
+ DISPDEV_FMT_YUV422,
+};
+
+struct dispdev_config {
+ uint16_t format;
+ uint16_t stride;
+ uint16_t x;
+ uint16_t y;
+ uint16_t z;
+ uint16_t width;
+ uint16_t height;
+
+ uint32_t user_flags;
+};
+
+struct dispdev_buffer_info {
+ uint16_t buf_idx;
+ uint16_t display_update;
+ struct dispdev_config buf_cfg;
+};
+
+#define DISPDEV_SET_CONFIG_IOC _IOW('D', 1, struct dispdev_config)
+#define DISPDEV_GET_CONFIG_IOC _IOR('D', 2, struct dispdev_config)
+#define DISPDEV_REGISTER_BUFFER_IOC _IO('D', 3)
+#define DISPDEV_UNREGISTER_BUFFER_IOC _IO('D', 4)
+#define DISPDEV_QUEUE_BUFFER_IOC _IOW('D', 5, struct dispdev_buffer_info)
+#define DISPDEV_DEQUEUE_BUFFER_IOC _IO('D', 6)
+
+#ifdef __KERNEL__
+
+int dispdev_create(struct mcde_display_device *ddev, bool overlay,
+ struct mcde_overlay *parent_ovly);
+void dispdev_destroy(struct mcde_display_device *ddev);
+
+#endif /* __KERNEL__ */
+
+#endif /* _DISPDEV_H_ */
+
diff --git a/arch/arm/plat-nomadik/include/plat/gpio-nomadik.h b/include/linux/gpio/nomadik.h
index 9605bf227df..3e8b7f16fb7 100644
--- a/arch/arm/plat-nomadik/include/plat/gpio-nomadik.h
+++ b/include/linux/gpio/nomadik.h
@@ -29,6 +29,7 @@
#define NMK_GPIO_SLPC 0x1c
#define NMK_GPIO_AFSLA 0x20
#define NMK_GPIO_AFSLB 0x24
+#define NMK_GPIO_LOWEMI 0x28
#define NMK_GPIO_RIMSC 0x40
#define NMK_GPIO_FIMSC 0x44
diff --git a/include/linux/hwmem.h b/include/linux/hwmem.h
new file mode 100644
index 00000000000..ba4c116f4b9
--- /dev/null
+++ b/include/linux/hwmem.h
@@ -0,0 +1,593 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Hardware memory driver, hwmem
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _HWMEM_H_
+#define _HWMEM_H_
+
+#include <linux/types.h>
+
+#if !defined(__KERNEL__) && !defined(_KERNEL)
+#include <sys/types.h>
+#else
+#include <linux/mm_types.h>
+#endif
+
+#define HWMEM_DEFAULT_DEVICE_NAME "hwmem"
+
+/**
+ * @brief Flags defining behavior of allocation
+ */
+enum hwmem_alloc_flags {
+ /**
+ * @brief Buffered
+ */
+ HWMEM_ALLOC_HINT_WRITE_COMBINE = (1 << 0),
+ /**
+ * @brief Non-buffered
+ */
+ HWMEM_ALLOC_HINT_NO_WRITE_COMBINE = (1 << 1),
+ /**
+ * @brief Cached
+ */
+ HWMEM_ALLOC_HINT_CACHED = (1 << 2),
+ /**
+ * @brief Uncached
+ */
+ HWMEM_ALLOC_HINT_UNCACHED = (1 << 3),
+ /**
+ * @brief Write back
+ */
+ HWMEM_ALLOC_HINT_CACHE_WB = (1 << 4),
+ /**
+ * @brief Write through
+ */
+ HWMEM_ALLOC_HINT_CACHE_WT = (1 << 5),
+ /**
+ * @brief No alloc on write
+ */
+ HWMEM_ALLOC_HINT_CACHE_NAOW = (1 << 6),
+ /**
+ * @brief Alloc on write
+ */
+ HWMEM_ALLOC_HINT_CACHE_AOW = (1 << 7),
+ /**
+ * @brief Inner and outer cache
+ */
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE = (1 << 8),
+ /**
+ * @brief Inner cache only
+ */
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY = (1 << 9),
+ /**
+ * @brief Reserved for use by the cache handler integration
+ */
+ HWMEM_ALLOC_RESERVED_CHI = (1 << 31),
+};
+
+/**
+ * @brief Flags defining buffer access mode.
+ */
+enum hwmem_access {
+ /**
+ * @brief Buffer will be read from.
+ */
+ HWMEM_ACCESS_READ = (1 << 0),
+ /**
+ * @brief Buffer will be written to.
+ */
+ HWMEM_ACCESS_WRITE = (1 << 1),
+ /**
+ * @brief Buffer will be imported.
+ */
+ HWMEM_ACCESS_IMPORT = (1 << 2),
+};
+
+/**
+ * @brief Values defining memory types.
+ */
+enum hwmem_mem_type {
+ /**
+ * @brief Scattered system memory.
+ */
+ HWMEM_MEM_SCATTERED_SYS,
+ /**
+ * @brief Contiguous system memory.
+ */
+ HWMEM_MEM_CONTIGUOUS_SYS,
+};
+
+/* User space API */
+
+/**
+ * @see struct hwmem_region.
+ */
+struct hwmem_region_us {
+ __u32 offset;
+ __u32 count;
+ __u32 start;
+ __u32 end;
+ __u32 size;
+};
+
+/**
+ * @brief Alloc request data.
+ */
+struct hwmem_alloc_request {
+ /**
+ * @brief [in] Size of requested allocation in bytes. Size will be
+ * aligned to PAGE_SIZE bytes.
+ */
+ __u32 size;
+ /**
+ * @brief [in] Flags describing requested allocation options.
+ */
+ __u32 flags; /* enum hwmem_alloc_flags */
+ /**
+ * @brief [in] Default access rights for buffer.
+ */
+ __u32 default_access; /* enum hwmem_access */
+ /**
+ * @brief [in] Memory type of the buffer.
+ */
+ __u32 mem_type; /* enum hwmem_mem_type */
+};
+
+/**
+ * @brief Set domain request data.
+ */
+struct hwmem_set_domain_request {
+ /**
+ * @brief [in] Identifier of buffer to be prepared. If 0 is specified
+ * the buffer associated with the current file instance will be used.
+ */
+ __s32 id;
+ /**
+ * @brief [in] Flags specifying access mode of the operation.
+ *
+ * One of HWMEM_ACCESS_READ and HWMEM_ACCESS_WRITE is required.
+ * For details, @see enum hwmem_access.
+ */
+ __u32 access; /* enum hwmem_access */
+ /**
+ * @brief [in] The region of bytes to be prepared.
+ *
+ * For details, @see struct hwmem_region.
+ */
+ struct hwmem_region_us region;
+};
+
+/**
+ * @brief Pin request data.
+ */
+struct hwmem_pin_request {
+ /**
+ * @brief [in] Identifier of buffer to be pinned. If 0 is specified,
+ * the buffer associated with the current file instance will be used.
+ */
+ __s32 id;
+ /**
+ * @brief [out] Physical address of first word in buffer.
+ */
+ __u32 phys_addr;
+};
+
+/**
+ * @brief Set access rights request data.
+ */
+struct hwmem_set_access_request {
+ /**
+ * @brief [in] Identifier of buffer to set access rights for. If 0 is
+ * specified, the buffer associated with the current file instance will
+ * be used.
+ */
+ __s32 id;
+ /**
+ * @param access Access value indicating what is allowed.
+ */
+ __u32 access; /* enum hwmem_access */
+ /**
+ * @param pid Process ID to set rights for.
+ */
+ pid_t pid;
+};
+
+/**
+ * @brief Get info request data.
+ */
+struct hwmem_get_info_request {
+ /**
+ * @brief [in] Identifier of buffer to get info about. If 0 is specified,
+ * the buffer associated with the current file instance will be used.
+ */
+ __s32 id;
+ /**
+ * @brief [out] Size in bytes of buffer.
+ */
+ __u32 size;
+ /**
+ * @brief [out] Memory type of buffer.
+ */
+ __u32 mem_type; /* enum hwmem_mem_type */
+ /**
+ * @brief [out] Access rights for buffer.
+ */
+ __u32 access; /* enum hwmem_access */
+};
+
+/**
+ * @brief Allocates <size> number of bytes and returns a buffer identifier.
+ *
+ * Input is a pointer to a hwmem_alloc_request struct.
+ *
+ * @return A buffer identifier on success, or a negative error code.
+ */
+#define HWMEM_ALLOC_IOC _IOW('W', 1, struct hwmem_alloc_request)
+
+/**
+ * @brief Allocates <size> number of bytes and associates the created buffer
+ * with the current file instance.
+ *
+ * If the current file instance is already associated with a buffer the call
+ * will fail. Buffers referenced through files instances shall not be released
+ * with HWMEM_RELEASE_IOC, instead the file instance shall be closed.
+ *
+ * Input is a pointer to a hwmem_alloc_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_ALLOC_FD_IOC _IOW('W', 2, struct hwmem_alloc_request)
+
+/**
+ * @brief Releases buffer.
+ *
+ * Buffers are reference counted and will not be destroyed until the last
+ * reference is released. Buffers allocated with ALLOC_FD_IOC shall not be
+ * released with this IOC, @see HWMEM_ALLOC_FD_IOC.
+ *
+ * Input is the buffer identifier.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_RELEASE_IOC _IO('W', 3)
+
+/**
+ * Memory Mapping
+ *
+ * To map a hwmem buffer mmap the hwmem fd and supply the buffer identifier as
+ * the offset. If the buffer is linked to the fd and thus have no buffer
+ * identifier supply 0 as the offset. Note that the offset feature of mmap is
+ * disabled in both cases, you can only mmap starting a position 0.
+ */
+
+/**
+ * @brief Prepares the buffer for CPU access.
+ *
+ * Input is a pointer to a hwmem_set_domain_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_SET_CPU_DOMAIN_IOC _IOW('W', 4, struct hwmem_set_domain_request)
+
+/**
+ * DEPRECATED: Set sync domain from driver instead!
+ *
+ * @brief Prepares the buffer for access by any DMA hardware.
+ *
+ * Input is a pointer to a hwmem_set_domain_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_SET_SYNC_DOMAIN_IOC _IOW('W', 5, struct hwmem_set_domain_request)
+
+/**
+ * DEPRECATED: Pin from driver instead!
+ *
+ * @brief Pins the buffer.
+ *
+ * Input is a pointer to a hwmem_pin_request struct. Only contiguous buffers
+ * can be pinned from user space.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_PIN_IOC _IOWR('W', 6, struct hwmem_pin_request)
+
+/**
+ * DEPRECATED: Unpin from driver instead!
+ *
+ * @brief Unpins the buffer.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_UNPIN_IOC _IO('W', 7)
+
+/**
+ * @brief Set access rights for buffer.
+ *
+ * Input is a pointer to a hwmem_set_access_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_SET_ACCESS_IOC _IOW('W', 8, struct hwmem_set_access_request)
+
+/**
+ * @brief Get buffer information.
+ *
+ * Input is a pointer to a hwmem_get_info_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_GET_INFO_IOC _IOWR('W', 9, struct hwmem_get_info_request)
+
+/**
+ * @brief Export the buffer identifier for use in another process.
+ *
+ * The global name will not increase the buffers reference count and will
+ * therefore not keep the buffer alive.
+ *
+ * Input is the buffer identifier. If 0 is specified the buffer associated with
+ * the current file instance will be exported.
+ *
+ * @return A global buffer name on success, or a negative error code.
+ */
+#define HWMEM_EXPORT_IOC _IO('W', 10)
+
+/**
+ * @brief Import a buffer to allow local access to the buffer.
+ *
+ * Input is the buffer's global name.
+ *
+ * @return The imported buffer's identifier on success, or a negative error
+ * code.
+ */
+#define HWMEM_IMPORT_IOC _IO('W', 11)
+
+/**
+ * @brief Import a buffer to allow local access to the buffer using the current
+ * fd.
+ *
+ * Input is the buffer's global name.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_IMPORT_FD_IOC _IO('W', 12)
+
+#ifdef __KERNEL__
+
+/* Kernel API */
+
+/**
+ * @brief Values defining memory domain.
+ */
+enum hwmem_domain {
+ /**
+ * @brief This value specifies the neutral memory domain. Setting this
+ * domain will syncronize all supported memory domains.
+ */
+ HWMEM_DOMAIN_SYNC = 0,
+ /**
+ * @brief This value specifies the CPU memory domain.
+ */
+ HWMEM_DOMAIN_CPU,
+};
+
+struct hwmem_alloc;
+
+/**
+ * @brief Structure defining a region of a memory buffer.
+ *
+ * A buffer is defined to contain a number of equally sized blocks. Each block
+ * has a part of it included in the region [<start>-<end>). That is
+ * <end>-<start> bytes. Each block is <size> bytes long. Total number of bytes
+ * in the region is (<end> - <start>) * <count>. First byte of the region is
+ * <offset> + <start> bytes into the buffer.
+ *
+ * Here's an example of a region in a graphics buffer (X = buffer, R = region):
+ *
+ * XXXXXXXXXXXXXXXXXXXX \
+ * XXXXXXXXXXXXXXXXXXXX |-- offset = 60
+ * XXXXXXXXXXXXXXXXXXXX /
+ * XXRRRRRRRRXXXXXXXXXX \
+ * XXRRRRRRRRXXXXXXXXXX |-- count = 4
+ * XXRRRRRRRRXXXXXXXXXX |
+ * XXRRRRRRRRXXXXXXXXXX /
+ * XXXXXXXXXXXXXXXXXXXX
+ * --| start = 2
+ * ----------| end = 10
+ * --------------------| size = 20
+ */
+struct hwmem_region {
+ /**
+ * @brief The first block's offset from beginning of buffer.
+ */
+ size_t offset;
+ /**
+ * @brief The number of blocks included in this region.
+ */
+ size_t count;
+ /**
+ * @brief The index of the first byte included in this block.
+ */
+ size_t start;
+ /**
+ * @brief The index of the last byte included in this block plus one.
+ */
+ size_t end;
+ /**
+ * @brief The size in bytes of each block.
+ */
+ size_t size;
+};
+
+struct hwmem_mem_chunk {
+ phys_addr_t paddr;
+ size_t size;
+};
+
+/**
+ * @brief Allocates <size> number of bytes.
+ *
+ * @param size Number of bytes to allocate. All allocations are page aligned.
+ * @param flags Allocation options.
+ * @param def_access Default buffer access rights.
+ * @param mem_type Memory type.
+ *
+ * @return Pointer to allocation, or a negative error code.
+ */
+struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags,
+ enum hwmem_access def_access, enum hwmem_mem_type mem_type);
+
+/**
+ * @brief Release a previously allocated buffer.
+ * When last reference is released, the buffer will be freed.
+ *
+ * @param alloc Buffer to be released.
+ */
+void hwmem_release(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Set the buffer domain and prepare it for access.
+ *
+ * @param alloc Buffer to be prepared.
+ * @param access Flags defining memory access mode of the call.
+ * @param domain Value specifying the memory domain.
+ * @param region Structure defining the minimum area of the buffer to be
+ * prepared.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region);
+
+/**
+ * @brief Pins the buffer.
+ *
+ * Notice that the number of mem chunks a buffer consists of can change at any
+ * time if the buffer is not pinned. Because of this one can not assume that
+ * pin will succeed if <mem_chunks> has the length specified by a previous call
+ * to pin as the buffer layout may have changed between the calls. There are
+ * two ways of handling this situation, keep redoing the pin procedure till it
+ * succeeds or allocate enough mem chunks for the worst case ("buffer size" /
+ * "page size" mem chunks). Contiguous buffers always require only one mem
+ * chunk.
+ *
+ * @param alloc Buffer to be pinned.
+ * @param mem_chunks Pointer to array of mem chunks.
+ * @param mem_chunks_length Pointer to variable that contains the length of
+ * <mem_chunks> array. On success the number of written mem chunks will be
+ * stored in this variable. If the call fails with -ENOSPC the required length
+ * of <mem_chunks> will be stored in this variable.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks,
+ size_t *mem_chunks_length);
+
+/**
+ * @brief Unpins the buffer.
+ *
+ * @param alloc Buffer to be unpinned.
+ */
+void hwmem_unpin(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Map the buffer to user space.
+ *
+ * @param alloc Buffer to be mapped.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma);
+
+/**
+ * @brief Map the buffer for use in the kernel.
+ *
+ * This function implicitly pins the buffer.
+ *
+ * @param alloc Buffer to be mapped.
+ *
+ * @return Pointer to buffer, or a negative error code.
+ */
+void *hwmem_kmap(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Un-map a buffer previously mapped with hwmem_kmap.
+ *
+ * This function implicitly unpins the buffer.
+ *
+ * @param alloc Buffer to be un-mapped.
+ */
+void hwmem_kunmap(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Set access rights for buffer.
+ *
+ * @param alloc Buffer to set rights for.
+ * @param access Access value indicating what is allowed.
+ * @param pid Process ID to set rights for.
+ */
+int hwmem_set_access(struct hwmem_alloc *alloc, enum hwmem_access access,
+ pid_t pid);
+
+/**
+ * @brief Get buffer information.
+ *
+ * @param alloc Buffer to get information about.
+ * @param size Pointer to size output variable. Can be NULL.
+ * @param size Pointer to memory type output variable. Can be NULL.
+ * @param size Pointer to access rights output variable. Can be NULL.
+ */
+void hwmem_get_info(struct hwmem_alloc *alloc, size_t *size,
+ enum hwmem_mem_type *mem_type, enum hwmem_access *access);
+
+/**
+ * @brief Allocate a global buffer name.
+ * Generated buffer name is valid in all processes. Consecutive calls will get
+ * the same name for the same buffer.
+ *
+ * @param alloc Buffer to be made public.
+ *
+ * @return Positive global name on success, or a negative error code.
+ */
+s32 hwmem_get_name(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Import the global buffer name to allow local access to the buffer.
+ * This call will add a buffer reference. Resulting buffer should be
+ * released with a call to hwmem_release.
+ *
+ * @param name A valid global buffer name.
+ *
+ * @return Pointer to allocation, or a negative error code.
+ */
+struct hwmem_alloc *hwmem_resolve_by_name(s32 name);
+
+/* Integration */
+
+struct hwmem_allocator_api {
+ void *(*alloc)(void *instance, size_t size);
+ void (*free)(void *instance, void *alloc);
+ phys_addr_t (*get_alloc_paddr)(void *alloc);
+ void *(*get_alloc_kaddr)(void *instance, void *alloc);
+ size_t (*get_alloc_size)(void *alloc);
+};
+
+struct hwmem_mem_type_struct {
+ enum hwmem_mem_type id;
+ struct hwmem_allocator_api allocator_api;
+ void *allocator_instance;
+};
+
+extern struct hwmem_mem_type_struct *hwmem_mem_types;
+extern unsigned int hwmem_num_mem_types;
+
+#endif
+
+#endif /* _HWMEM_H_ */
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 6b6ee702b00..8e891b5a777 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -15,11 +15,16 @@
#define _HWMON_H_
#include <linux/device.h>
+#include <linux/notifier.h>
struct device *hwmon_device_register(struct device *dev);
void hwmon_device_unregister(struct device *dev);
+int hwmon_notifier_register(struct notifier_block *nb);
+int hwmon_notifier_unregister(struct notifier_block *nb);
+void hwmon_notify(unsigned long val, void *v);
+
/* Scale user input to sensible values */
static inline int SENSORS_LIMIT(long value, long low, long high)
{
diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h
index 05e03284b92..143f433b9ee 100644
--- a/include/linux/input/bu21013.h
+++ b/include/linux/input/bu21013.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) ST-Ericsson SA 2009
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
* License terms:GNU General Public License (GPL) version 2
*/
@@ -9,32 +9,36 @@
/**
* struct bu21013_platform_device - Handle the platform data
- * @cs_en: pointer to the cs enable function
- * @cs_dis: pointer to the cs disable function
- * @irq_read_val: pointer to read the pen irq value function
+ * @cs_en: pointer to the cs enable function
+ * @cs_dis: pointer to the cs disable function
+ * @irq_read_val: pointer to read the pen irq value function
+ * @x_max_res: xmax resolution
+ * @y_max_res: ymax resolution
* @touch_x_max: touch x max
* @touch_y_max: touch y max
* @cs_pin: chip select pin
* @irq: irq pin
- * @ext_clk: external clock flag
+ * @has_ext_clk: has external clock
+ * @enable_ext_clk: enable external clock
+ * @portrait: portrait mode flag
* @x_flip: x flip flag
* @y_flip: y flip flag
- * @wakeup: wakeup flag
- *
* This is used to handle the platform data
- */
+ **/
struct bu21013_platform_device {
int (*cs_en)(int reset_pin);
int (*cs_dis)(int reset_pin);
int (*irq_read_val)(void);
+ int x_max_res;
+ int y_max_res;
int touch_x_max;
int touch_y_max;
unsigned int cs_pin;
unsigned int irq;
- bool ext_clk;
+ bool has_ext_clk;
+ bool enable_ext_clk;
+ bool portrait;
bool x_flip;
bool y_flip;
- bool wakeup;
};
-
#endif
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 2fa0901219d..f499cdea584 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -118,6 +118,7 @@ struct kimage {
/* kexec interface functions */
extern void machine_kexec(struct kimage *image);
+extern void machine_crash_swreset(void);
extern int machine_kexec_prepare(struct kimage *image);
extern void machine_kexec_cleanup(struct kimage *image);
extern asmlinkage long sys_kexec_load(unsigned long entry,
@@ -168,6 +169,7 @@ unsigned long paddr_vmcoreinfo_note(void);
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
+extern struct atomic_notifier_head crash_percpu_notifier_list;
#ifndef kexec_flush_icache_page
#define kexec_flush_icache_page(page)
diff --git a/include/linux/leds-ab5500.h b/include/linux/leds-ab5500.h
new file mode 100644
index 00000000000..9ba9ac61d90
--- /dev/null
+++ b/include/linux/leds-ab5500.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA.
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Simple driver for HVLED in ST-Ericsson AB5500 Analog baseband Controller
+ *
+ * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
+ */
+
+#define AB5500_HVLED0 0
+#define AB5500_HVLED1 1
+#define AB5500_HVLED2 2
+#define AB5500_HVLEDS_MAX 3
+
+enum ab5500_fade_delay {
+ AB5500_FADE_DELAY_BYPASS = 0,
+ AB5500_FADE_DELAY_HALFSEC,
+ AB5500_FADE_DELAY_ONESEC,
+ AB5500_FADE_DELAY_TWOSEC
+};
+
+struct ab5500_led_conf {
+ char *name;
+ u8 led_id;
+ u8 max_current;
+ u8 fade_hi;
+ u8 fade_lo;
+ bool led_on;
+};
+
+struct ab5500_hvleds_platform_data {
+ bool hw_fade;
+ struct ab5500_led_conf leds[AB5500_HVLEDS_MAX];
+};
diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h
index 33a07116748..9c5eab6e086 100644
--- a/include/linux/leds_pwm.h
+++ b/include/linux/leds_pwm.h
@@ -11,6 +11,7 @@ struct led_pwm {
u8 active_low;
unsigned max_brightness;
unsigned pwm_period_ns;
+ unsigned int lth_brightness;
};
struct led_pwm_platform_data {
diff --git a/include/linux/mfd/ab8500.h b/include/linux/mfd/ab8500.h
index 838c6b487cc..424bdc81f68 100644
--- a/include/linux/mfd/ab8500.h
+++ b/include/linux/mfd/ab8500.h
@@ -157,7 +157,6 @@ struct ab8500 {
struct device *dev;
struct mutex lock;
struct mutex irq_lock;
-
int irq_base;
int irq;
u8 chip_id;
@@ -172,27 +171,44 @@ struct ab8500 {
u8 oldmask[AB8500_NUM_IRQ_REGS];
};
-struct regulator_reg_init;
-struct regulator_init_data;
+struct ab8500_regulator_platform_data;
+struct ab8500_accdet_platform_data;
+struct ab8500_denc_platform_data;
+struct ab8500_audio_platform_data;
struct ab8500_gpio_platform_data;
/**
* struct ab8500_platform_data - AB8500 platform data
+ * @pm_power_off: Should machine pm power off hook be registered or not
+ * @thermal_power_off_pending: Set if there was a thermal alarm
+ * @thermal_set_time_sec: Time of the thermal alarm
+ * @thermal_time_out: Time out before the thermal alarm should be ignored
* @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
* @init: board-specific initialization after detection of ab8500
- * @num_regulator_reg_init: number of regulator init registers
- * @regulator_reg_init: regulator init registers
- * @num_regulator: number of regulators
* @regulator: machine-specific constraints for regulators
+ * @accdet: machine-specific Accessory detection data
+ * @battery: machine-specific battery management data
+ * @charger: machine-specific charger data
+ * @btemp: machine-specific battery temp data
*/
struct ab8500_platform_data {
int irq_base;
+ bool pm_power_off;
+ bool thermal_power_off_pending;
+ long thermal_set_time_sec;
+ long thermal_time_out;
void (*init) (struct ab8500 *);
- int num_regulator_reg_init;
- struct ab8500_regulator_reg_init *regulator_reg_init;
- int num_regulator;
- struct regulator_init_data *regulator;
+ struct ab8500_regulator_platform_data *regulator;
+ struct abx500_accdet_platform_data *accdet;
+ struct ab8500_bm_data *battery;
+ struct ab8500_denc_platform_data *denc;
+ struct ab8500_audio_platform_data *audio;
+ struct ab8500_charger_platform_data *charger;
+ struct ab8500_btemp_platform_data *btemp;
+ struct ab8500_fg_platform_data *fg;
+ struct ab8500_chargalg_platform_data *chargalg;
struct ab8500_gpio_platform_data *gpio;
+ struct abx500_usbgpio_platform_data *usb;
};
extern int __devinit ab8500_init(struct ab8500 *ab8500);
diff --git a/include/linux/mfd/ab8500/bm.h b/include/linux/mfd/ab8500/bm.h
new file mode 100644
index 00000000000..1fb67d25deb
--- /dev/null
+++ b/include/linux/mfd/ab8500/bm.h
@@ -0,0 +1,514 @@
+/*
+ * Copyright ST-Ericsson 2009.
+ *
+ * Author: Arun Murthy <arun.murthy@stericsson.com>
+ * Licensed under GPLv2.
+ */
+
+#ifndef _AB8500_BM_H
+#define _AB8500_BM_H
+
+#include <linux/kernel.h>
+
+/*
+ * System control 2 register offsets.
+ * bank = 0x02
+ */
+#define AB8500_MAIN_WDOG_CTRL_REG 0x01
+#define AB8500_LOW_BAT_REG 0x03
+
+/*
+ * USB/ULPI register offsets
+ * Bank : 0x5
+ */
+#define AB8500_USB_LINE_STAT_REG 0x80
+
+/*
+ * Charger / status register offfsets
+ * Bank : 0x0B
+ */
+#define AB8500_CH_STATUS1_REG 0x00
+#define AB8500_CH_STATUS2_REG 0x01
+#define AB8500_CH_USBCH_STAT1_REG 0x02
+#define AB8500_CH_USBCH_STAT2_REG 0x03
+#define AB8500_CH_FSM_STAT_REG 0x04
+#define AB8500_CH_STAT_REG 0x05
+
+/*
+ * Charger / control register offfsets
+ * Bank : 0x0B
+ */
+#define AB8500_CH_VOLT_LVL_REG 0x40
+#define AB8500_CH_VOLT_LVL_MAX_REG 0x41 /*Only in Cut2.0*/
+#define AB8500_CH_OPT_CRNTLVL_REG 0x42
+#define AB8500_CH_OPT_CRNTLVL_MAX_REG 0x43 /*Only in Cut2.0*/
+#define AB8500_CH_WD_TIMER_REG 0x50
+#define AB8500_CHARG_WD_CTRL 0x51
+#define AB8500_BTEMP_HIGH_TH 0x52
+#define AB8500_LED_INDICATOR_PWM_CTRL 0x53
+#define AB8500_LED_INDICATOR_PWM_DUTY 0x54
+#define AB8500_BATT_OVV 0x55
+#define AB8500_CHARGER_CTRL 0x56
+#define AB8500_BAT_CTRL_CURRENT_SOURCE 0x60 /*Only in Cut2.0*/
+
+/*
+ * Charger / main control register offsets
+ * Bank : 0x0B
+ */
+#define AB8500_MCH_CTRL1 0x80
+#define AB8500_MCH_CTRL2 0x81
+#define AB8500_MCH_IPT_CURLVL_REG 0x82
+#define AB8500_CH_WD_REG 0x83
+
+/*
+ * Charger / USB control register offsets
+ * Bank : 0x0B
+ */
+#define AB8500_USBCH_CTRL1_REG 0xC0
+#define AB8500_USBCH_CTRL2_REG 0xC1
+#define AB8500_USBCH_IPT_CRNTLVL_REG 0xC2
+
+/*
+ * Gas Gauge register offsets
+ * Bank : 0x0C
+ */
+#define AB8500_GASG_CC_CTRL_REG 0x00
+#define AB8500_GASG_CC_ACCU1_REG 0x01
+#define AB8500_GASG_CC_ACCU2_REG 0x02
+#define AB8500_GASG_CC_ACCU3_REG 0x03
+#define AB8500_GASG_CC_ACCU4_REG 0x04
+#define AB8500_GASG_CC_SMPL_CNTRL_REG 0x05
+#define AB8500_GASG_CC_SMPL_CNTRH_REG 0x06
+#define AB8500_GASG_CC_SMPL_CNVL_REG 0x07
+#define AB8500_GASG_CC_SMPL_CNVH_REG 0x08
+#define AB8500_GASG_CC_CNTR_AVGOFF_REG 0x09
+#define AB8500_GASG_CC_OFFSET_REG 0x0A
+#define AB8500_GASG_CC_NCOV_ACCU 0x10
+#define AB8500_GASG_CC_NCOV_ACCU_CTRL 0x11
+#define AB8500_GASG_CC_NCOV_ACCU_LOW 0x12
+#define AB8500_GASG_CC_NCOV_ACCU_MED 0x13
+#define AB8500_GASG_CC_NCOV_ACCU_HIGH 0x14
+
+/*
+ * Interrupt register offsets
+ * Bank : 0x0E
+ */
+#define AB8500_IT_SOURCE2_REG 0x01
+#define AB8500_IT_SOURCE21_REG 0x14
+
+/*
+ * RTC register offsets
+ * Bank: 0x0F
+ */
+#define AB8500_RTC_BACKUP_CHG_REG 0x0C
+#define AB8500_RTC_CC_CONF_REG 0x01
+#define AB8500_RTC_CTRL_REG 0x0B
+
+/*
+ * OTP register offsets
+ * Bank : 0x15
+ */
+#define AB8500_OTP_CONF_15 0x0E
+
+/* GPADC constants from AB8500 spec, UM0836 */
+#define ADC_RESOLUTION 1024
+#define ADC_CH_MAIN_MIN 0
+#define ADC_CH_MAIN_MAX 20030
+#define ADC_CH_VBUS_MIN 0
+#define ADC_CH_VBUS_MAX 20030
+#define ADC_CH_VBAT_MIN 2300
+#define ADC_CH_VBAT_MAX 4800
+#define ADC_CH_BKBAT_MIN 0
+#define ADC_CH_BKBAT_MAX 3200
+
+/* Main charge i/p current */
+#define MAIN_CH_IP_CUR_0P9A 0x80
+#define MAIN_CH_IP_CUR_1P0A 0x90
+#define MAIN_CH_IP_CUR_1P1A 0xA0
+#define MAIN_CH_IP_CUR_1P2A 0xB0
+#define MAIN_CH_IP_CUR_1P3A 0xC0
+#define MAIN_CH_IP_CUR_1P4A 0xD0
+#define MAIN_CH_IP_CUR_1P5A 0xE0
+
+/* ChVoltLevel */
+#define CH_VOL_LVL_3P5 0x00
+#define CH_VOL_LVL_4P0 0x14
+#define CH_VOL_LVL_4P05 0x16
+#define CH_VOL_LVL_4P1 0x1B
+#define CH_VOL_LVL_4P15 0x20
+#define CH_VOL_LVL_4P2 0x25
+#define CH_VOL_LVL_4P6 0x4D
+
+/* ChOutputCurrentLevel */
+#define CH_OP_CUR_LVL_0P1 0x00
+#define CH_OP_CUR_LVL_0P2 0x01
+#define CH_OP_CUR_LVL_0P3 0x02
+#define CH_OP_CUR_LVL_0P4 0x03
+#define CH_OP_CUR_LVL_0P5 0x04
+#define CH_OP_CUR_LVL_0P6 0x05
+#define CH_OP_CUR_LVL_0P7 0x06
+#define CH_OP_CUR_LVL_0P8 0x07
+#define CH_OP_CUR_LVL_0P9 0x08
+#define CH_OP_CUR_LVL_1P4 0x0D
+#define CH_OP_CUR_LVL_1P5 0x0E
+#define CH_OP_CUR_LVL_1P6 0x0F
+
+/* BTEMP High thermal limits */
+#define BTEMP_HIGH_TH_57_0 0x00
+#define BTEMP_HIGH_TH_52 0x01
+#define BTEMP_HIGH_TH_57_1 0x02
+#define BTEMP_HIGH_TH_62 0x03
+
+/* current is mA */
+#define USB_0P1A 100
+#define USB_0P2A 200
+#define USB_0P3A 300
+#define USB_0P4A 400
+#define USB_0P5A 500
+
+#define LOW_BAT_3P1V 0x20
+#define LOW_BAT_2P3V 0x00
+#define LOW_BAT_RESET 0x01
+#define LOW_BAT_ENABLE 0x01
+
+/* Backup battery constants */
+#define BUP_ICH_SEL_50UA 0x00
+#define BUP_ICH_SEL_150UA 0x04
+#define BUP_ICH_SEL_300UA 0x08
+#define BUP_ICH_SEL_700UA 0x0C
+
+#define BUP_VCH_SEL_2P5V 0x00
+#define BUP_VCH_SEL_2P6V 0x01
+#define BUP_VCH_SEL_2P8V 0x02
+#define BUP_VCH_SEL_3P1V 0x03
+
+/* Battery OVV constants */
+#define BATT_OVV_ENA 0x02
+#define BATT_OVV_TH_3P7 0x00
+#define BATT_OVV_TH_4P75 0x01
+
+/* VBUS OVV constants */
+#define VBUS_OVV_SELECT_MASK 0x78
+#define VBUS_OVV_SELECT_5P6V 0x00
+#define VBUS_OVV_SELECT_5P7V 0x08
+#define VBUS_OVV_SELECT_5P8V 0x10
+#define VBUS_OVV_SELECT_5P9V 0x18
+#define VBUS_OVV_SELECT_6P0V 0x20
+#define VBUS_OVV_SELECT_6P1V 0x28
+#define VBUS_OVV_SELECT_6P2V 0x30
+#define VBUS_OVV_SELECT_6P3V 0x38
+
+#define VBUS_AUTO_IN_CURR_LIM_ENA 0x04
+
+/* Fuel Gauge constants */
+#define RESET_ACCU 0x02
+#define READ_REQ 0x01
+#define CC_DEEP_SLEEP_ENA 0x02
+#define CC_PWR_UP_ENA 0x01
+#define CC_SAMPLES_40 0x28
+#define RD_NCONV_ACCU_REQ 0x01
+#define CC_CALIB 0x08
+#define CC_INTAVGOFFSET_ENA 0x10
+#define CC_MUXOFFSET 0x80
+#define CC_INT_CAL_N_AVG_MASK 0x60
+#define CC_INT_CAL_SAMPLES_16 0x40
+#define CC_INT_CAL_SAMPLES_8 0x20
+#define CC_INT_CAL_SAMPLES_4 0x00
+
+/* RTC constants */
+#define RTC_BUP_CH_ENA 0x10
+
+/* BatCtrl Current Source Constants */
+#define BAT_CTRL_7U_ENA 0x01
+#define BAT_CTRL_20U_ENA 0x02
+#define BAT_CTRL_CMP_ENA 0x04
+#define FORCE_BAT_CTRL_CMP_HIGH 0x08
+#define BAT_CTRL_PULL_UP_ENA 0x10
+
+/* Battery type */
+#define BATTERY_UNKNOWN 00
+
+/* Concurrent instant current i/f */
+#define INVALID_CURRENT INT_MAX
+
+/*
+ * ADC for the battery thermistor.
+ * When using the ADC_THERM_BATCTRL the battery ID resistor is combined with
+ * a NTC resistor to both identify the battery and to measure its temperature.
+ * Different phone manufactures uses different techniques to both identify the
+ * battery and to read its temperature.
+ */
+enum adc_therm {
+ ADC_THERM_BATCTRL,
+ ADC_THERM_BATTEMP,
+};
+
+/**
+ * struct res_to_temp - defines one point in a temp to res curve. To
+ * be used in battery packs that combines the identification resistor with a
+ * NTC resistor.
+ * @temp: battery pack temperature in Celcius
+ * @resist: NTC resistor net total resistance
+ */
+struct res_to_temp {
+ int temp;
+ int resist;
+};
+
+/**
+ * struct v_to_cap - Table for translating voltage to capacity
+ * @voltage: Voltage in mV
+ * @capacity: Capacity in percent
+ */
+struct v_to_cap {
+ int voltage;
+ int capacity;
+};
+
+/* Forward declaration */
+struct ab8500_fg;
+
+/**
+ * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
+ * if not specified
+ * @recovery_sleep_timer: Time between measurements while recovering
+ * @recovery_total_time: Total recovery time
+ * @init_timer: Measurement interval during startup
+ * @init_discard_time: Time we discard voltage measurement at startup
+ * @init_total_time: Total init time during startup
+ * @high_curr_time: Time current has to be high to go to recovery
+ * @accu_charging: FG accumulation time while charging
+ * @accu_high_curr: FG accumulation time in high current mode
+ * @high_curr_threshold: High current threshold, in mA
+ * @lowbat_threshold: Low battery threshold, in mV
+ */
+struct ab8500_fg_parameters {
+ int recovery_sleep_timer;
+ int recovery_total_time;
+ int init_timer;
+ int init_discard_time;
+ int init_total_time;
+ int high_curr_time;
+ int accu_charging;
+ int accu_high_curr;
+ int high_curr_threshold;
+ int lowbat_threshold;
+};
+
+/**
+ * struct ab8500_charger_maximization - struct used by the board config.
+ * @use_maxi: Enable maximization for this battery type
+ * @maxi_chg_curr: Maximum charger current allowed
+ * @maxi_wait_cycles: cycles to wait before setting charger current
+ * @charger_curr_step delta between two charger current settings (mA)
+ */
+struct ab8500_maxim_parameters {
+ bool ena_maxi;
+ int chg_curr;
+ int wait_cycles;
+ int charger_curr_step;
+};
+
+/**
+ * struct battery_type - different batteries supported
+ * @name: battery technology
+ * @resis_high: battery upper resistance limit
+ * @resis_low: battery lower resistance limit
+ * @charge_full_design: Maximum battery capacity in mAh
+ * @nominal_voltage: Nominal voltage of the battery in mV
+ * @termination_vol: max voltage upto which battery can be charged
+ * @termination_curr battery charging termination current in mA
+ * @recharge_vol battery voltage limit that will trigger a new
+ * full charging cycle in the case where maintenan-
+ * -ce charging has been disabled
+ * @normal_cur_lvl: charger current in normal state in mA
+ * @normal_vol_lvl: charger voltage in normal state in mV
+ * @maint_a_cur_lvl: charger current in maintenance A state in mA
+ * @maint_a_vol_lvl: charger voltage in maintenance A state in mV
+ * @maint_a_chg_timer_h: charge time in maintenance A state
+ * @maint_b_cur_lvl: charger current in maintenance B state in mA
+ * @maint_b_vol_lvl: charger voltage in maintenance B state in mV
+ * @maint_b_chg_timer_h: charge time in maintenance B state
+ * @low_high_cur_lvl: charger current in temp low/high state in mA
+ * @low_high_vol_lvl: charger voltage in temp low/high state in mV'
+ * @battery_resistance: battery inner resistance in mOhm.
+ * @n_r_t_tbl_elements: number of elements in r_to_t_tbl
+ * @r_to_t_tbl: table containing resistance to temp points
+ * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl
+ * @v_to_cap_tbl: Voltage to capacity (in %) table
+ */
+struct battery_type {
+ int name;
+ int resis_high;
+ int resis_low;
+ int charge_full_design;
+ int nominal_voltage;
+ int termination_vol;
+ int termination_curr;
+ int recharge_vol;
+ int normal_cur_lvl;
+ int normal_vol_lvl;
+ int maint_a_cur_lvl;
+ int maint_a_vol_lvl;
+ int maint_a_chg_timer_h;
+ int maint_b_cur_lvl;
+ int maint_b_vol_lvl;
+ int maint_b_chg_timer_h;
+ int low_high_cur_lvl;
+ int low_high_vol_lvl;
+ int battery_resistance;
+ int n_temp_tbl_elements;
+ struct res_to_temp *r_to_t_tbl;
+ int n_v_cap_tbl_elements;
+ struct v_to_cap *v_to_cap_tbl;
+};
+
+/**
+ * struct ab8500_bm_capacity_levels - ab8500 capacity level data
+ * @critical: critical capacity level in percent
+ * @low: low capacity level in percent
+ * @normal: normal capacity level in percent
+ * @high: high capacity level in percent
+ * @full: full capacity level in percent
+ */
+struct ab8500_bm_capacity_levels {
+ int critical;
+ int low;
+ int normal;
+ int high;
+ int full;
+};
+
+/**
+ * struct ab8500_bm_charger_parameters - Charger specific parameters
+ * @usb_volt_max: maximum allowed USB charger voltage in mV
+ * @usb_curr_max: maximum allowed USB charger current in mA
+ * @ac_volt_max: maximum allowed AC charger voltage in mV
+ * @ac_curr_max: maximum allowed AC charger current in mA
+ */
+struct ab8500_bm_charger_parameters {
+ int usb_volt_max;
+ int usb_curr_max;
+ int ac_volt_max;
+ int ac_curr_max;
+};
+
+/**
+ * struct ab8500_bm_data - ab8500 battery management data
+ * @temp_under under this temp, charging is stopped
+ * @temp_low between this temp and temp_under charging is reduced
+ * @temp_high between this temp and temp_over charging is reduced
+ * @temp_over over this temp, charging is stopped
+ * @temp_interval_chg temperature measurement interval in s when charging
+ * @temp_interval_nochg temperature measurement interval in s when not charging
+ * @main_safety_tmr_h safety timer for main charger
+ * @usb_safety_tmr_h safety timer for usb charger
+ * @bkup_bat_v voltage which we charge the backup battery with
+ * @bkup_bat_i current which we charge the backup battery with
+ * @no_maintenance indicates that maintenance charging is disabled
+ * @adc_therm placement of thermistor, batctrl or battemp adc
+ * @chg_unknown_bat flag to enable charging of unknown batteries
+ * @enable_overshoot flag to enable VBAT overshoot control
+ * @fg_res resistance of FG resistor in mOhm
+ * @n_btypes number of elements in array bat_type
+ * @batt_id index of the identified battery in array bat_type
+ * @interval_charging charge alg cycle period time when charging (sec)
+ * @interval_not_charging charge alg cycle period time when not charging (sec)
+ * @temp_hysteresis temperature hysteresis
+ * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
+ * @maxi: maximization parameters
+ * @cap_levels capacity in percent for the different capacity levels
+ * @bat_type table of supported battery types
+ * @chg_params charger parameters
+ * @fg_params fuel gauge parameters
+ */
+struct ab8500_bm_data {
+ int temp_under;
+ int temp_low;
+ int temp_high;
+ int temp_over;
+ int temp_interval_chg;
+ int temp_interval_nochg;
+ int main_safety_tmr_h;
+ int usb_safety_tmr_h;
+ int bkup_bat_v;
+ int bkup_bat_i;
+ bool no_maintenance;
+ bool chg_unknown_bat;
+ bool enable_overshoot;
+ enum adc_therm adc_therm;
+ int fg_res;
+ int n_btypes;
+ int batt_id;
+ int interval_charging;
+ int interval_not_charging;
+ int temp_hysteresis;
+ int gnd_lift_resistance;
+ const struct ab8500_maxim_parameters *maxi;
+ const struct ab8500_bm_capacity_levels *cap_levels;
+ const struct battery_type *bat_type;
+ const struct ab8500_bm_charger_parameters *chg_params;
+ const struct ab8500_fg_parameters *fg_params;
+};
+
+struct ab8500_charger_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct ab8500_btemp_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct ab8500_fg_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct ab8500_chargalg_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+struct ab8500_btemp;
+struct ab8500_gpadc;
+struct ab8500_fg;
+#ifdef CONFIG_AB8500_BM
+void ab8500_fg_reinit(void);
+void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
+struct ab8500_btemp *ab8500_btemp_get(void);
+int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
+struct ab8500_fg *ab8500_fg_get(void);
+int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
+int ab8500_fg_inst_curr_nonblocking(struct ab8500_fg *dev, int *local_result);
+#else
+static void ab8500_fg_reinit(void)
+{
+}
+static void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA)
+{
+}
+static struct ab8500_btemp *ab8500_btemp_get(void)
+{
+ return NULL;
+}
+static int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
+{
+ return 0;
+}
+struct ab8500_fg *ab8500_fg_get(void)
+{
+ return NULL;
+}
+static int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev)
+{
+ return -ENODEV;
+}
+static int ab8500_fg_inst_curr_nonblocking(
+ struct ab8500_fg *dev,
+ int *local_result)
+{
+ return -ENODEV;
+}
+#endif
+#endif /* _AB8500_BM_H */
diff --git a/include/linux/mfd/ab8500/denc-regs.h b/include/linux/mfd/ab8500/denc-regs.h
new file mode 100644
index 00000000000..a6683ca7470
--- /dev/null
+++ b/include/linux/mfd/ab8500/denc-regs.h
@@ -0,0 +1,357 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson AB8500 DENC related registers
+ *
+ * Author: Marcus Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __AB8500_DENC_H
+#define __AB8500_DENC_H
+
+#define AB8500_VAL2REG(__reg, __fld, __val) \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK)
+#define AB8500_REG2VAL(__reg, __fld, __val) \
+ (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT)
+
+#define AB8500_CTRL3 0x00000200
+#define AB8500_CTRL3_TH_SD_ENA_SHIFT 3
+#define AB8500_CTRL3_TH_SD_ENA_MASK 0x00000008
+#define AB8500_CTRL3_TH_SD_ENA(__x) \
+ AB8500_VAL2REG(AB8500_CTRL3, TH_SD_ENA, __x)
+#define AB8500_CTRL3_RESET_DENC_N_SHIFT 2
+#define AB8500_CTRL3_RESET_DENC_N_MASK 0x00000004
+#define AB8500_CTRL3_RESET_DENC_N(__x) \
+ AB8500_VAL2REG(AB8500_CTRL3, RESET_DENC_N, __x)
+#define AB8500_CTRL3_RESET_AUD_N_SHIFT 1
+#define AB8500_CTRL3_RESET_AUD_N_MASK 0x00000002
+#define AB8500_CTRL3_RESET_AUD_N(__x) \
+ AB8500_VAL2REG(AB8500_CTRL3, RESET_AUD_N, __x)
+#define AB8500_CTRL3_CLK_32K_OUT2_IS_SHIFT 0
+#define AB8500_CTRL3_CLK_32K_OUT2_IS_MASK 0x00000001
+#define AB8500_CTRL3_CLK_32K_OUT2_IS(__x) \
+ AB8500_VAL2REG(AB8500_CTRL3, CLK_32K_OUT2_IS, __x)
+#define AB8500_SYS_ULP_CLK_CONF 0x0000020A
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_PD_ENA_SHIFT 7
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_PD_ENA_MASK 0x00000080
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_PD_ENA(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_PD_ENA, __x)
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_BUF_ENA_SHIFT 6
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_BUF_ENA_MASK 0x00000040
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_BUF_ENA(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_BUF_ENA, __x)
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_STRE_SHIFT 5
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_STRE_MASK 0x00000020
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_STRE(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, ULP_CLK_STRE, __x)
+#define AB8500_SYS_ULP_CLK_CONF_TVOUT_CLK_INV_SHIFT 4
+#define AB8500_SYS_ULP_CLK_CONF_TVOUT_CLK_INV_MASK 0x00000010
+#define AB8500_SYS_ULP_CLK_CONF_TVOUT_CLK_INV(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, TVOUT_CLK_INV, __x)
+#define AB8500_SYS_ULP_CLK_CONF_TVOUT_CLK_DE_IN_SHIFT 3
+#define AB8500_SYS_ULP_CLK_CONF_TVOUT_CLK_DE_IN_MASK 0x00000008
+#define AB8500_SYS_ULP_CLK_CONF_TVOUT_CLK_DE_IN(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, TVOUT_CLK_DE_IN, __x)
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_STRE_SHIFT 2
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_STRE_MASK 0x00000004
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_STRE(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_STRE, __x)
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF_SHIFT 0
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF_MASK 0x00000003
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF_NO_FUNC 0
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF_AS_OUTPUT 1
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF_AS_INPUT 2
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, ULP_CLK_CONF, \
+ AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF_##__x)
+#define AB8500_SYS_CLK_CTRL 0x0000020C
+#define AB8500_SYS_CLK_CTRL_USB_CLK_VALID_SHIFT 2
+#define AB8500_SYS_CLK_CTRL_USB_CLK_VALID_MASK 0x00000004
+#define AB8500_SYS_CLK_CTRL_USB_CLK_VALID(__x) \
+ AB8500_VAL2REG(AB8500_SYS_CLK_CTRL, USB_CLK_VALID, __x)
+#define AB8500_SYS_CLK_CTRL_TVOUT_CLK_VALID_SHIFT 1
+#define AB8500_SYS_CLK_CTRL_TVOUT_CLK_VALID_MASK 0x00000002
+#define AB8500_SYS_CLK_CTRL_TVOUT_CLK_VALID(__x) \
+ AB8500_VAL2REG(AB8500_SYS_CLK_CTRL, TVOUT_CLK_VALID, __x)
+#define AB8500_SYS_CLK_CTRL_TVOUT_PLL_ENA_SHIFT 0
+#define AB8500_SYS_CLK_CTRL_TVOUT_PLL_ENA_MASK 0x00000001
+#define AB8500_SYS_CLK_CTRL_TVOUT_PLL_ENA(__x) \
+ AB8500_VAL2REG(AB8500_SYS_CLK_CTRL, TVOUT_PLL_ENA, __x)
+#define AB8500_REGU_MISC1 0x00000380
+#define AB8500_REGU_MISC1_V_TVOUT_LP_SHIFT 7
+#define AB8500_REGU_MISC1_V_TVOUT_LP_MASK 0x00000080
+#define AB8500_REGU_MISC1_V_TVOUT_LP(__x) \
+ AB8500_VAL2REG(AB8500_REGU_MISC1, V_TVOUT_LP, __x)
+#define AB8500_REGU_MISC1_V_INT_CORE_12_LP_SHIFT 6
+#define AB8500_REGU_MISC1_V_INT_CORE_12_LP_MASK 0x00000040
+#define AB8500_REGU_MISC1_V_INT_CORE_12_LP(__x) \
+ AB8500_VAL2REG(AB8500_REGU_MISC1, V_INT_CORE_12_LP, __x)
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_SHIFT 3
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_MASK 0x00000038
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_2V 0
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_225V 1
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_25V 2
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_275V 3
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_3V 4
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_325V 5
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_35V 6
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL(__x) \
+ AB8500_VAL2REG(AB8500_REGU_MISC1, V_INT_CORE_12_SEL, \
+ AB8500_REGU_MISC1_V_INT_CORE_12_SEL_##__x)
+#define AB8500_REGU_MISC1_V_INT_CORE_12_ENA_SHIFT 2
+#define AB8500_REGU_MISC1_V_INT_CORE_12_ENA_MASK 0x00000004
+#define AB8500_REGU_MISC1_V_INT_CORE_12_ENA(__x) \
+ AB8500_VAL2REG(AB8500_REGU_MISC1, V_INT_CORE_12_ENA, __x)
+#define AB8500_REGU_MISC1_V_TVOUT_ENA_SHIFT 1
+#define AB8500_REGU_MISC1_V_TVOUT_ENA_MASK 0x00000002
+#define AB8500_REGU_MISC1_V_TVOUT_ENA(__x) \
+ AB8500_VAL2REG(AB8500_REGU_MISC1, V_TVOUT_ENA, __x)
+#define AB8500_VAUX12_REGU 0x00000409
+#define AB8500_VAUX12_REGU_VAUX_1_SHIFT 2
+#define AB8500_VAUX12_REGU_VAUX_1_MASK 0x0000000C
+#define AB8500_VAUX12_REGU_VAUX_1_DISABLE 0
+#define AB8500_VAUX12_REGU_VAUX_1_FORCE_HP 1
+#define AB8500_VAUX12_REGU_VAUX_1_BY_CTRL_REG 2
+#define AB8500_VAUX12_REGU_VAUX_1_FORCE_LP 3
+#define AB8500_VAUX12_REGU_VAUX_1(__x) \
+ AB8500_VAL2REG(AB8500_VAUX12_REGU, VAUX_1, \
+ AB8500_VAUX12_REGU_VAUX_1_##__x)
+#define AB8500_VAUX12_REGU_VAUX_2_SHIFT 0
+#define AB8500_VAUX12_REGU_VAUX_2_MASK 0x00000003
+#define AB8500_VAUX12_REGU_VAUX_2_DISABLE 0
+#define AB8500_VAUX12_REGU_VAUX_2_FORCE_HP 1
+#define AB8500_VAUX12_REGU_VAUX_2_BY_CTRL_REG 2
+#define AB8500_VAUX12_REGU_VAUX_2_FORCE_LP 3
+#define AB8500_VAUX12_REGU_VAUX_2(__x) \
+ AB8500_VAL2REG(AB8500_VAUX12_REGU, VAUX_2, \
+ AB8500_VAUX12_REGU_VAUX_2_##__x)
+#define AB8500_VAUX1_SEL 0x0000041F
+#define AB8500_VAUX1_SEL_VAL_SHIFT 0
+#define AB8500_VAUX1_SEL_VAL_MASK 0x0000000F
+#define AB8500_VAUX1_SEL_VAL_1_1V 0
+#define AB8500_VAUX1_SEL_VAL_1_2V 1
+#define AB8500_VAUX1_SEL_VAL_1_3V 2
+#define AB8500_VAUX1_SEL_VAL_1_4V 3
+#define AB8500_VAUX1_SEL_VAL_1_5V 4
+#define AB8500_VAUX1_SEL_VAL_1_8V 5
+#define AB8500_VAUX1_SEL_VAL_1_85V 6
+#define AB8500_VAUX1_SEL_VAL_1_9V 7
+#define AB8500_VAUX1_SEL_VAL_2_5V 8
+#define AB8500_VAUX1_SEL_VAL_2_65V 9
+#define AB8500_VAUX1_SEL_VAL_2_7V 10
+#define AB8500_VAUX1_SEL_VAL_2_75V 11
+#define AB8500_VAUX1_SEL_VAL_2_8V 12
+#define AB8500_VAUX1_SEL_VAL_2_9V 13
+#define AB8500_VAUX1_SEL_VAL_3_0V 14
+#define AB8500_VAUX1_SEL_VAL_3_3V 15
+#define AB8500_VAUX1_SEL_VAL(__x) \
+ AB8500_VAL2REG(AB8500_VAUX1_SEL, VAL, AB8500_VAUX1_SEL_VAL_##__x)
+#define AB8500_DENC_CONF0 0x00000600
+#define AB8500_DENC_CONF0_STD_SHIFT 6
+#define AB8500_DENC_CONF0_STD_MASK 0x000000C0
+#define AB8500_DENC_CONF0_STD_PAL_BDGHI 0
+#define AB8500_DENC_CONF0_STD_PAL_N 1
+#define AB8500_DENC_CONF0_STD_NTSC_M 2
+#define AB8500_DENC_CONF0_STD_PAL_M 3
+#define AB8500_DENC_CONF0_STD(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF0, STD, AB8500_DENC_CONF0_STD_##__x)
+#define AB8500_DENC_CONF0_SYNC_SHIFT 3
+#define AB8500_DENC_CONF0_SYNC_MASK 0x00000038
+#define AB8500_DENC_CONF0_SYNC_F_BASED_SLAVE 1
+#define AB8500_DENC_CONF0_SYNC_AUTO_TEST 7
+#define AB8500_DENC_CONF0_SYNC(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF0, SYNC, AB8500_DENC_CONF0_SYNC_##__x)
+#define AB8500_DENC_CONF1 0x00000601
+#define AB8500_DENC_CONF1_BLK_LI_SHIFT 7
+#define AB8500_DENC_CONF1_BLK_LI_MASK 0x00000080
+#define AB8500_DENC_CONF1_BLK_LI_PARTIAL 0
+#define AB8500_DENC_CONF1_BLK_LI_FULL 1
+#define AB8500_DENC_CONF1_BLK_LI(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF1, BLK_LI, \
+ AB8500_DENC_CONF1_BLK_LI_##__x)
+#define AB8500_DENC_CONF1_FLT_SHIFT 5
+#define AB8500_DENC_CONF1_FLT_MASK 0x00000060
+#define AB8500_DENC_CONF1_FLT_1_1MHZ 0
+#define AB8500_DENC_CONF1_FLT_1_3MHZ 1
+#define AB8500_DENC_CONF1_FLT_1_6MHZ 2
+#define AB8500_DENC_CONF1_FLT_1_9MHZ 3
+#define AB8500_DENC_CONF1_FLT(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF1, FLT, AB8500_DENC_CONF1_FLT_##__x)
+#define AB8500_DENC_CONF1_CO_KI_SHIFT 3
+#define AB8500_DENC_CONF1_CO_KI_MASK 0x00000008
+#define AB8500_DENC_CONF1_CO_KI(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF1, CO_KI, __x)
+#define AB8500_DENC_CONF1_SETUP_MAIN_SHIFT 2
+#define AB8500_DENC_CONF1_SETUP_MAIN_MASK 0x00000004
+#define AB8500_DENC_CONF1_SETUP_MAIN_BLACK_EQ_BLANK 0
+#define AB8500_DENC_CONF1_SETUP_MAIN_BLACK_GT_BLANK 1
+#define AB8500_DENC_CONF1_SETUP_MAIN(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF1, SETUP_MAIN, \
+ AB8500_DENC_CONF1_SETUP_MAIN_##__x)
+#define AB8500_DENC_CONF1_CC_SHIFT 0
+#define AB8500_DENC_CONF1_CC_MASK 0x00000003
+#define AB8500_DENC_CONF1_CC_NONE 0
+#define AB8500_DENC_CONF1_CC_FIELD_1 1
+#define AB8500_DENC_CONF1_CC_FIELD_2 2
+#define AB8500_DENC_CONF1_CC_ALL 3
+#define AB8500_DENC_CONF1_CC(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF1, CC, AB8500_DENC_CONF1_CC_##__x)
+#define AB8500_DENC_CONF2 0x00000602
+#define AB8500_DENC_CONF2_N_INTRL_SHIFT 7
+#define AB8500_DENC_CONF2_N_INTRL_MASK 0x00000080
+#define AB8500_DENC_CONF2_N_INTRL(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF2, N_INTRL, __x)
+#define AB8500_DENC_CONF2_EN_RST_SHIFT 6
+#define AB8500_DENC_CONF2_EN_RST_MASK 0x00000040
+#define AB8500_DENC_CONF2_EN_RST(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF2, EN_RST, __x)
+#define AB8500_DENC_CONF2_BURST_EN_SHIFT 5
+#define AB8500_DENC_CONF2_BURST_EN_MASK 0x00000020
+#define AB8500_DENC_CONF2_BURST_EN(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF2, BURST_EN, __x)
+#define AB8500_DENC_CONF2_SEL_RST_SHIFT 4
+#define AB8500_DENC_CONF2_SEL_RST_MASK 0x00000010
+#define AB8500_DENC_CONF2_SEL_RST_USE_HW_VAL 0
+#define AB8500_DENC_CONF2_SEL_RST_USE_PROG_VAL 1
+#define AB8500_DENC_CONF2_SEL_RST(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF2, SEL_RST, \
+ AB8500_DENC_CONF2_SEL_RST_##__x)
+#define AB8500_DENC_CONF2_RST_OSC_BUF_SHIFT 2
+#define AB8500_DENC_CONF2_RST_OSC_BUF_MASK 0x00000004
+#define AB8500_DENC_CONF2_RST_OSC_BUF(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF2, RST_OSC_BUF, __x)
+#define AB8500_DENC_CONF2_VAL_RST_SHIFT 0
+#define AB8500_DENC_CONF2_VAL_RST_MASK 0x00000003
+#define AB8500_DENC_CONF2_VAL_RST_ALL_LINES 0
+#define AB8500_DENC_CONF2_VAL_RST_EVERY_2ND_FIELD 1
+#define AB8500_DENC_CONF2_VAL_RST_EVERY_4TH_FIELD 2
+#define AB8500_DENC_CONF2_VAL_RST_EVERY_8TH_FIELD 3
+#define AB8500_DENC_CONF2_VAL_RST(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF2, VAL_RST, \
+ AB8500_DENC_CONF2_VAL_RST_##__x)
+#define AB8500_DENC_CONF6 0x00000606
+#define AB8500_DENC_CONF6_SOFT_RESET_SHIFT 7
+#define AB8500_DENC_CONF6_SOFT_RESET_MASK 0x00000080
+#define AB8500_DENC_CONF6_SOFT_RESET(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF6, SOFT_RESET, __x)
+#define AB8500_DENC_CONF6_JUMP_SHIFT 6
+#define AB8500_DENC_CONF6_JUMP_MASK 0x00000040
+#define AB8500_DENC_CONF6_JUMP(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF6, JUMP, __x)
+#define AB8500_DENC_CONF6_DEC_NINC_SHIFT 5
+#define AB8500_DENC_CONF6_DEC_NINC_MASK 0x00000020
+#define AB8500_DENC_CONF6_DEC_NINC(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF6, DEC_NINC, __x)
+#define AB8500_DENC_CONF6_FREE_JUMP_SHIFT 4
+#define AB8500_DENC_CONF6_FREE_JUMP_MASK 0x00000010
+#define AB8500_DENC_CONF6_FREE_JUMP(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF6, FREE_JUMP, __x)
+#define AB8500_DENC_CONF6_MAX_DYN_SHIFT 0
+#define AB8500_DENC_CONF6_MAX_DYN_MASK 0x00000001
+#define AB8500_DENC_CONF6_MAX_DYN(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF6, MAX_DYN, __x)
+#define AB8500_DENC_CONF8 0x00000608
+#define AB8500_DENC_CONF8_PH_RST_MODE_SHIFT 6
+#define AB8500_DENC_CONF8_PH_RST_MODE_MASK 0x000000C0
+#define AB8500_DENC_CONF8_PH_RST_MODE_DISABLED 0
+#define AB8500_DENC_CONF8_PH_RST_MODE_UPDATE_FROM_PHASE_BUF 1
+#define AB8500_DENC_CONF8_PH_RST_MODE_UPDATE_FROM_INC_DFS 2
+#define AB8500_DENC_CONF8_PH_RST_MODE_RESET 3
+#define AB8500_DENC_CONF8_PH_RST_MODE(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF8, PH_RST_MODE, \
+ AB8500_DENC_CONF8_PH_RST_MODE_##__x)
+#define AB8500_DENC_CONF8_VAL_422_MUX_SHIFT 4
+#define AB8500_DENC_CONF8_VAL_422_MUX_MASK 0x00000010
+#define AB8500_DENC_CONF8_VAL_422_MUX_TEST 0
+#define AB8500_DENC_CONF8_VAL_422_MUX_ACTIVE 1
+#define AB8500_DENC_CONF8_VAL_422_MUX(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF8, VAL_422_MUX, \
+ AB8500_DENC_CONF8_VAL_422_MUX_##__x)
+#define AB8500_DENC_CONF8_BLK_ALL_SHIFT 3
+#define AB8500_DENC_CONF8_BLK_ALL_MASK 0x00000008
+#define AB8500_DENC_CONF8_BLK_ALL(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF8, BLK_ALL, __x)
+#define AB8500_TVOUT_CTRL 0x00000680
+#define AB8500_TVOUT_CTRL_TV_LOAD_RC_SHIFT 6
+#define AB8500_TVOUT_CTRL_TV_LOAD_RC_MASK 0x00000040
+#define AB8500_TVOUT_CTRL_TV_LOAD_RC(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL, TV_LOAD_RC, __x)
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_SHIFT 3
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_MASK 0x00000038
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_0_5S 0
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_1S 0
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_1_5S 0
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_2S 0
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_2_5S 0
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_3S 0
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL, PLUG_TV_TIME, \
+ AB8500_TVOUT_CTRL_PLUG_TV_TIME_##__x)
+#define AB8500_TVOUT_CTRL_TV_PLUG_ON_SHIFT 2
+#define AB8500_TVOUT_CTRL_TV_PLUG_ON_MASK 0x00000004
+#define AB8500_TVOUT_CTRL_TV_PLUG_ON(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL, TV_PLUG_ON, __x)
+#define AB8500_TVOUT_CTRL_DAC_CTRL0_SHIFT 1
+#define AB8500_TVOUT_CTRL_DAC_CTRL0_MASK 0x00000002
+#define AB8500_TVOUT_CTRL_DAC_CTRL0(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL, DAC_CTRL0, __x)
+#define AB8500_TVOUT_CTRL_DAC_CTRL1_SHIFT 0
+#define AB8500_TVOUT_CTRL_DAC_CTRL1_MASK 0x00000001
+#define AB8500_TVOUT_CTRL_DAC_CTRL1(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL, DAC_CTRL1, __x)
+#define AB8500_TVOUT_CTRL2 0x00000681
+#define AB8500_TVOUT_CTRL2_SWAP_DDR_DATA_IN_SHIFT 1
+#define AB8500_TVOUT_CTRL2_SWAP_DDR_DATA_IN_MASK 0x00000002
+#define AB8500_TVOUT_CTRL2_SWAP_DDR_DATA_IN(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL2, SWAP_DDR_DATA_IN, __x)
+#define AB8500_TVOUT_CTRL2_DENC_DDR_SHIFT 0
+#define AB8500_TVOUT_CTRL2_DENC_DDR_MASK 0x00000001
+#define AB8500_TVOUT_CTRL2_DENC_DDR(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL2, DENC_DDR, __x)
+#define AB8500_IT_MASK1 0x00000E40
+#define AB8500_IT_MASK1_PON_KEY1_DBR_SHIFT 7
+#define AB8500_IT_MASK1_PON_KEY1_DBR_MASK 0x00000080
+#define AB8500_IT_MASK1_PON_KEY1_DBR(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, PON_KEY1_DBR, __x)
+#define AB8500_IT_MASK1_PON_KEY1_DBF_SHIFT 6
+#define AB8500_IT_MASK1_PON_KEY1_DBF_MASK 0x00000040
+#define AB8500_IT_MASK1_PON_KEY1_DBF(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, PON_KEY1_DBF, __x)
+#define AB8500_IT_MASK1_PON_KEY2_DBR_SHIFT 5
+#define AB8500_IT_MASK1_PON_KEY2_DBR_MASK 0x00000020
+#define AB8500_IT_MASK1_PON_KEY2_DBR(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, PON_KEY2_DBR, __x)
+#define AB8500_IT_MASK1_PON_KEY2_DBF_SHIFT 4
+#define AB8500_IT_MASK1_PON_KEY2_DBF_MASK 0x00000010
+#define AB8500_IT_MASK1_PON_KEY2_DBF(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, PON_KEY2_DBF, __x)
+#define AB8500_IT_MASK1_TEMP_WARN_SHIFT 3
+#define AB8500_IT_MASK1_TEMP_WARN_MASK 0x00000008
+#define AB8500_IT_MASK1_TEMP_WARN(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, TEMP_WARN, __x)
+#define AB8500_IT_MASK1_PLUG_TV_DET_SHIFT 2
+#define AB8500_IT_MASK1_PLUG_TV_DET_MASK 0x00000004
+#define AB8500_IT_MASK1_PLUG_TV_DET(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, PLUG_TV_DET, __x)
+#define AB8500_IT_MASK1_UNPLUG_TV_DET_SHIFT 1
+#define AB8500_IT_MASK1_UNPLUG_TV_DET_MASK 0x00000002
+#define AB8500_IT_MASK1_UNPLUG_TV_DET(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, UNPLUG_TV_DET, __x)
+#define AB8500_IT_MASK1_MAIN_EXT_CH_NOK_SHIFT 0
+#define AB8500_IT_MASK1_MAIN_EXT_CH_NOK_MASK 0x00000001
+#define AB8500_IT_MASK1_MAIN_EXT_CH_NOK(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, MAIN_EXT_CH_NOK, __x)
+#define AB8500_REV 0x00001080
+#define AB8500_REV_FULL_MASK_SHIFT 4
+#define AB8500_REV_FULL_MASK_MASK 0x000000F0
+#define AB8500_REV_FULL_MASK(__x) \
+ AB8500_VAL2REG(AB8500_REV, FULL_MASK, __x)
+#define AB8500_REV_METAL_FIX_SHIFT 0
+#define AB8500_REV_METAL_FIX_MASK 0x0000000F
+#define AB8500_REV_METAL_FIX(__x) \
+ AB8500_VAL2REG(AB8500_REV, METAL_FIX, __x)
+
+#endif /* __AB8500_DENC_H */
diff --git a/include/linux/mfd/ab8500/denc.h b/include/linux/mfd/ab8500/denc.h
new file mode 100644
index 00000000000..25a09a2c2bd
--- /dev/null
+++ b/include/linux/mfd/ab8500/denc.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * AB8500 tvout driver interface
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __AB8500_DENC__H__
+#define __AB8500_DENC__H__
+
+#include <linux/platform_device.h>
+
+struct ab8500_denc_platform_data {
+ /* Platform info */
+ bool ddr_enable;
+ bool ddr_little_endian;
+};
+
+enum ab8500_denc_TV_std {
+ TV_STD_PAL_BDGHI,
+ TV_STD_PAL_N,
+ TV_STD_PAL_M,
+ TV_STD_NTSC_M,
+};
+
+enum ab8500_denc_cr_filter_bandwidth {
+ TV_CR_NTSC_LOW_DEF_FILTER,
+ TV_CR_PAL_LOW_DEF_FILTER,
+ TV_CR_NTSC_HIGH_DEF_FILTER,
+ TV_CR_PAL_HIGH_DEF_FILTER,
+};
+
+enum ab8500_denc_phase_reset_mode {
+ TV_PHASE_RST_MOD_DISABLE,
+ TV_PHASE_RST_MOD_FROM_PHASE_BUF,
+ TV_PHASE_RST_MOD_FROM_INC_DFS,
+ TV_PHASE_RST_MOD_RST,
+};
+
+enum ab8500_denc_plug_time {
+ TV_PLUG_TIME_0_5S,
+ TV_PLUG_TIME_1S,
+ TV_PLUG_TIME_1_5S,
+ TV_PLUG_TIME_2S,
+ TV_PLUG_TIME_2_5S,
+ TV_PLUG_TIME_3S,
+};
+
+struct ab8500_denc_conf {
+ /* register settings for DENC_configuration */
+ bool act_output;
+ enum ab8500_denc_TV_std TV_std;
+ bool progressive;
+ bool test_pattern;
+ bool partial_blanking;
+ bool blank_all;
+ bool black_level_setup;
+ enum ab8500_denc_cr_filter_bandwidth cr_filter;
+ bool suppress_col;
+ enum ab8500_denc_phase_reset_mode phase_reset_mode;
+ bool dac_enable;
+ bool act_dc_output;
+};
+
+struct platform_device *ab8500_denc_get_device(void);
+void ab8500_denc_put_device(struct platform_device *pdev);
+
+void ab8500_denc_reset(struct platform_device *pdev, bool hard);
+void ab8500_denc_power_up(struct platform_device *pdev);
+void ab8500_denc_power_down(struct platform_device *pdev);
+
+void ab8500_denc_conf(struct platform_device *pdev,
+ struct ab8500_denc_conf *conf);
+void ab8500_denc_conf_plug_detect(struct platform_device *pdev,
+ bool enable, bool load_RC,
+ enum ab8500_denc_plug_time time);
+void ab8500_denc_mask_int_plug_det(struct platform_device *pdev, bool plug,
+ bool unplug);
+#endif /* __AB8500_DENC__H__ */
diff --git a/include/linux/mfd/ab8500/gpadc.h b/include/linux/mfd/ab8500/gpadc.h
index 252966769d9..fa706c5a04a 100644
--- a/include/linux/mfd/ab8500/gpadc.h
+++ b/include/linux/mfd/ab8500/gpadc.h
@@ -26,7 +26,7 @@
struct ab8500_gpadc;
-struct ab8500_gpadc *ab8500_gpadc_get(char *name);
+struct ab8500_gpadc *ab8500_gpadc_get(void);
int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel);
int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel);
int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc,
diff --git a/include/linux/mfd/ab8500/gpio.h b/include/linux/mfd/ab8500/gpio.h
index 488a8c920a2..d88e3025317 100644
--- a/include/linux/mfd/ab8500/gpio.h
+++ b/include/linux/mfd/ab8500/gpio.h
@@ -8,6 +8,8 @@
#ifndef _AB8500_GPIO_H
#define _AB8500_GPIO_H
+#include <mach/gpio.h>
+
/*
* Platform data to register a block: only the initial gpio/irq number.
*/
@@ -16,6 +18,62 @@ struct ab8500_gpio_platform_data {
int gpio_base;
u32 irq_base;
u8 config_reg[7];
+ u8 config_direction[6];
+ u8 config_pullups[6];
+};
+
+enum ab8500_pin {
+ AB8500_PIN_GPIO1 = AB8500_GPIO_BASE,
+ AB8500_PIN_GPIO2,
+ AB8500_PIN_GPIO3,
+ AB8500_PIN_GPIO4,
+ AB8500_PIN_GPIO5,
+ AB8500_PIN_GPIO6,
+ AB8500_PIN_GPIO7,
+ AB8500_PIN_GPIO8,
+ AB8500_PIN_GPIO9,
+ AB8500_PIN_GPIO10,
+ AB8500_PIN_GPIO11,
+ AB8500_PIN_GPIO12,
+ AB8500_PIN_GPIO13,
+ AB8500_PIN_GPIO14,
+ AB8500_PIN_GPIO15,
+ AB8500_PIN_GPIO16,
+ AB8500_PIN_GPIO17,
+ AB8500_PIN_GPIO18,
+ AB8500_PIN_GPIO19,
+ AB8500_PIN_GPIO20,
+ AB8500_PIN_GPIO21,
+ AB8500_PIN_GPIO22,
+ AB8500_PIN_GPIO23,
+ AB8500_PIN_GPIO24,
+ AB8500_PIN_GPIO25,
+ AB8500_PIN_GPIO26,
+ AB8500_PIN_GPIO27,
+ AB8500_PIN_GPIO28,
+ AB8500_PIN_GPIO29,
+ AB8500_PIN_GPIO30,
+ AB8500_PIN_GPIO31,
+ AB8500_PIN_GPIO32,
+ AB8500_PIN_GPIO33,
+ AB8500_PIN_GPIO34,
+ AB8500_PIN_GPIO35,
+ AB8500_PIN_GPIO36,
+ AB8500_PIN_GPIO37,
+ AB8500_PIN_GPIO38,
+ AB8500_PIN_GPIO39,
+ AB8500_PIN_GPIO40,
+ AB8500_PIN_GPIO41,
+ AB8500_PIN_GPIO42,
};
+int ab8500_config_pulldown(struct device *dev,
+ enum ab8500_pin gpio, bool enable);
+
+int ab8500_gpio_config_select(struct device *dev,
+ enum ab8500_pin gpio, bool gpio_select);
+
+int ab8500_gpio_config_get_select(struct device *dev,
+ enum ab8500_pin gpio, bool *gpio_select);
+
#endif /* _AB8500_GPIO_H */
diff --git a/include/linux/mfd/ab8500/ux500_chargalg.h b/include/linux/mfd/ab8500/ux500_chargalg.h
new file mode 100644
index 00000000000..f04e47ff56a
--- /dev/null
+++ b/include/linux/mfd/ab8500/ux500_chargalg.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _UX500_CHARGALG_H
+#define _UX500_CHARGALG_H
+
+#include <linux/power_supply.h>
+
+#define psy_to_ux500_charger(x) container_of((x), \
+ struct ux500_charger, psy)
+
+/* Forward declaration */
+struct ux500_charger;
+
+struct ux500_charger_ops {
+ int (*enable) (struct ux500_charger *, int, int, int);
+ int (*kick_wd) (struct ux500_charger *);
+ int (*update_curr) (struct ux500_charger *, int);
+};
+
+/**
+ * struct ux500_charger - power supply ux500 charger sub class
+ * @psy power supply base class
+ * @ops ux500 charger operations
+ * @max_out_volt maximum output charger voltage in mV
+ * @max_out_curr maximum output charger current in mA
+ */
+struct ux500_charger {
+ struct power_supply psy;
+ struct ux500_charger_ops ops;
+ int max_out_volt;
+ int max_out_curr;
+};
+
+#endif
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 9970337ff04..66a82b2b148 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -178,6 +178,12 @@ int abx500_get_chip_id(struct device *dev);
int abx500_event_registers_startup_state_get(struct device *dev, u8 *event);
int abx500_startup_irq_enabled(struct device *dev, unsigned int irq);
+#define abx500_get abx500_get_register_interruptible
+#define abx500_set abx500_set_register_interruptible
+#define abx500_get_page abx500_get_register_page_interruptible
+#define abx500_set_page abx500_set_register_page_interruptible
+#define abx500_mask_and_set abx500_mask_and_set_register_interruptible
+
struct abx500_ops {
int (*get_chip_id) (struct device *);
int (*get_register) (struct device *, u8, u8, u8 *);
@@ -189,6 +195,6 @@ struct abx500_ops {
int (*startup_irq_enabled) (struct device *, unsigned int);
};
-int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
+int abx500_register_ops(struct device *dev, struct abx500_ops *ops);
void abx500_remove_ops(struct device *dev);
#endif
diff --git a/include/linux/mfd/abx500/ab5500-bm.h b/include/linux/mfd/abx500/ab5500-bm.h
new file mode 100644
index 00000000000..cfad11c530c
--- /dev/null
+++ b/include/linux/mfd/abx500/ab5500-bm.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright ST-Ericsson 2011.
+ *
+ * Author: Arun Murthy <arun.murthy@stericsson.com>
+ * Licensed under GPLv2.
+ */
+
+#ifndef _AB5500_BM_H
+#define _AB5500_BM_H
+
+#define AB5500_MCB 0x2F
+/*
+ * USB/ULPI register offsets
+ * Bank : 0x5
+ */
+#define AB5500_USB_LINE_STATUS 0x80
+#define AB5500_USB_PHY_STATUS 0x89
+#define AB5500_CHGFSM_CHARGER_DETECT 0xBF
+#define AB5500_CHGFSM_USB_BTEMP_CURR_LIM 0xAD
+#define AB5500_USB_LINE_CTRL2 0x82
+#define AB5500_USB_OTG_CTRL 0x87
+
+/*
+ * Charger / control register offfsets
+ * Bank : 0x0B
+ */
+#define AB5500_CVBUSM 0x11
+#define AB5500_LEDT 0x12
+#define AB5500_VSRC 0x13
+#define AB5500_ICSR 0x14
+#define AB5500_OCSRV 0x15
+#define AB5500_CVREC 0x16
+#define AB5500_CREVS 0x17
+#define AB5500_CCTRL 0x18
+#define AB5500_TBDATA 0x19
+#define AB5500_CPWM 0x1A
+#define AB5500_DCIOCURRENT 0x1B
+#define AB5500_USB_HS_CURR_LIM 0x1C
+#define AB5500_WALL_HS_CURR_LIM 0x1D
+
+/*
+ * FG, Battcom and ACC registers offsets
+ * Bank : 0x0C
+ */
+#define AB5500_FG_CH0 0x00
+#define AB5500_FG_CH1 0x01
+#define AB5500_FG_CH2 0x02
+#define AB5500_FG_DIS_CH0 0x03
+#define AB5500_FG_DIS_CH1 0x04
+#define AB5500_FG_DIS_CH2 0x05
+#define AB5500_FGDIS_COUNT0 0x06
+#define AB5500_FGDIS_COUNT1 0x07
+#define AB5500_FG_VAL_COUNT0 0x08
+#define AB5500_FG_VAL_COUNT1 0x09
+#define AB5500_FGDIR_READ0 0x0A
+#define AB5500_FGDIR_READ1 0x0B
+#define AB5500_FG_CONTROL_A 0x0C
+#define AB5500_FG_CONTROL_B 0x0F
+#define AB5500_FG_CONTROL_C 0x10
+#define AB5500_FG_DIS 0x0D
+#define AB5500_FG_EOC 0x0E
+#define AB5500_FG_CB 0x0F
+#define AB5500_FG_CC 0x10
+#define AB5500_UIOR 0x1A
+#define AB5500_UART 0x1B
+#define AB5500_URI 0x1C
+#define AB5500_UART_RQ 0x1D
+#define AB5500_ACC_DETECT1 0x20
+#define AB5500_ACC_DETECT2 0x21
+#define AB5500_ACC_DETECTCTRL 0x23
+#define AB5500_ACC_AVCTRL 0x24
+#define AB5500_ACC_DETECT3_DEG_LITCH_TIME 0x30
+#define AB5500_ACC_DETECT3_KEY_PRESS_TIME 0x31
+#define AB5500_ACC_DETECT3_LONG_KEY_TIME 0x32
+#define AB5500_ACC_DETECT3_TIME_READ_MS 0x33
+#define AB5500_ACC_DETECT3_TIME_READ_LS 0x34
+#define AB5500_ACC_DETECT3_CONTROL 0x35
+#define AB5500_ACC_DETECT3_LEVEL 0x36
+#define AB5500_ACC_DETECT3_TIMER_READ_CTL 0x37
+
+/*
+ * Interrupt register offsets
+ * Bank : 0x0E
+ */
+#define AB5500_IT_SOURCE8 0x28
+#define AB5500_IT_SOURCE9 0x29
+
+/* BatCtrl Current Source Constants */
+#define BAT_CTRL_7U_ENA (0x01 << 0)
+#define BAT_CTRL_15U_ENA (0x01 << 1)
+#define BAT_CTRL_30U_ENA (0x01 << 2)
+#define BAT_CTRL_60U_ENA (0x01 << 3)
+#define BAT_CTRL_120U_ENA (0x01 << 4)
+#define BAT_CTRL_CMP_ENA 0x04
+#define FORCE_BAT_CTRL_CMP_HIGH 0x08
+#define BAT_CTRL_PULL_UP_ENA 0x10
+
+/* Battery type */
+#define BATTERY_UNKNOWN 0
+
+#ifdef CONFIG_AB5500_BM
+struct ab5500_btemp *ab5500_btemp_get(void);
+int ab5500_btemp_get_batctrl_temp(struct ab5500_btemp *btemp);
+#else
+static inline struct ab5500_btemp *ab5500_btemp_get(void)
+{
+ return 0;
+}
+static inline int ab5500_btemp_get_batctrl_temp(struct ab5500_btemp *btemp)
+{
+ return 0;
+}
+#endif
+#endif /* _AB5500_BM_H */
diff --git a/include/linux/mfd/abx500/ab5500-gpadc.h b/include/linux/mfd/abx500/ab5500-gpadc.h
new file mode 100644
index 00000000000..3282b44114e
--- /dev/null
+++ b/include/linux/mfd/abx500/ab5500-gpadc.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2010 ST-Ericsson SA
+ * Licensed under GPLv2.
+ *
+ * Author: Vijaya Kumar K <vijay.kilari@stericsson.com>
+ */
+
+#ifndef _AB5500_GPADC_H
+#define _AB5500_GPADC_H
+
+/*
+ * GPADC source:
+ * The BTEMP_BALL and PCB_TEMP are same. They differ if the
+ * battery supports internal NTC resistor connected to BDATA
+ * line. In this case, the BTEMP_BALL correspondss to BDATA
+ * of GPADC as per AB5500 product spec.
+ */
+
+#define BTEMP_BALL 0
+#define ACC_DETECT2 1
+#define ACC_DETECT3 2
+#define MAIN_BAT_V 3
+#define MAIN_BAT_V_TXON 4
+#define VBUS_V 5
+#define USB_CHARGER_C 6
+#define BK_BAT_V 7
+#define DIE_TEMP 8
+#define PCB_TEMP 9
+#define XTAL_TEMP 10
+#define USB_ID 11
+#define BAT_CTRL 12
+/* VBAT with TXON only min trigger */
+#define MAIN_BAT_V_TXON_TRIG_MIN 13
+/* VBAT with TX off only min trigger */
+#define MAIN_BAT_V_TRIG_MIN 14
+
+/*
+ * Frequency of auto adc conversion
+ */
+#define MS1000 0x0
+#define MS500 0x1
+#define MS200 0x2
+#define MS100 0x3
+#define MS10 0x4
+
+struct ab5500_gpadc;
+
+/*
+ * struct adc_auto_input - AB5500 GPADC auto trigger
+ * @adc_mux Mux input
+ * @freq freq of conversion
+ * @min min value for trigger
+ * @max max value for trigger
+ * @auto_adc_callback notification callback
+ */
+struct adc_auto_input {
+ u8 mux;
+ u8 freq;
+ int min;
+ int max;
+ int (*auto_adc_callback)(int mux);
+};
+
+struct ab5500_gpadc *ab5500_gpadc_get(const char *name);
+int ab5500_gpadc_convert(struct ab5500_gpadc *gpadc, u8 input);
+int ab5500_gpadc_convert_auto(struct ab5500_gpadc *gpadc,
+ struct adc_auto_input *auto_input);
+
+#endif /* _AB5500_GPADC_H */
diff --git a/include/linux/mfd/abx500/ab5500.h b/include/linux/mfd/abx500/ab5500.h
new file mode 100644
index 00000000000..7c200c483b0
--- /dev/null
+++ b/include/linux/mfd/abx500/ab5500.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) ST-Ericsson BLR 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Bibek Basu <bibek.basu@stericsson.com>
+ */
+#ifndef MFD_AB5500_H
+#define MFD_AB5500_H
+
+#include <linux/device.h>
+
+
+enum ab5500_devid {
+ AB5500_DEVID_ADC,
+ AB5500_DEVID_LEDS,
+ AB5500_DEVID_POWER,
+ AB5500_DEVID_REGULATORS,
+ AB5500_DEVID_SIM,
+ AB5500_DEVID_RTC,
+ AB5500_DEVID_CHARGER,
+ AB5500_DEVID_FG,
+ AB5500_DEVID_VIBRATOR,
+ AB5500_DEVID_CODEC,
+ AB5500_DEVID_USB,
+ AB5500_DEVID_OTP,
+ AB5500_DEVID_VIDEO,
+ AB5500_DEVID_DBIECI,
+ AB5500_DEVID_ONSWA,
+ AB5500_DEVID_CHARGALG,
+ AB5500_DEVID_BTEMP,
+ AB5500_DEVID_TEMPMON,
+ AB5500_DEVID_ACCDET,
+ AB5500_NUM_DEVICES,
+};
+
+enum ab5500_banks {
+ AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP = 0,
+ AB5500_BANK_VDDDIG_IO_I2C_CLK_TST = 1,
+ AB5500_BANK_VDENC = 2,
+ AB5500_BANK_SIM_USBSIM = 3,
+ AB5500_BANK_LED = 4,
+ AB5500_BANK_ADC = 5,
+ AB5500_BANK_RTC = 6,
+ AB5500_BANK_STARTUP = 7,
+ AB5500_BANK_DBI_ECI = 8,
+ AB5500_BANK_CHG = 9,
+ AB5500_BANK_FG_BATTCOM_ACC = 10,
+ AB5500_BANK_USB = 11,
+ AB5500_BANK_IT = 12,
+ AB5500_BANK_VIBRA = 13,
+ AB5500_BANK_AUDIO_HEADSETUSB = 14,
+ AB5500_NUM_BANKS = 15,
+};
+
+enum ab5500_banks_addr {
+ AB5500_ADDR_VIT_IO_I2C_CLK_TST_OTP = 0x4A,
+ AB5500_ADDR_VDDDIG_IO_I2C_CLK_TST = 0x4B,
+ AB5500_ADDR_VDENC = 0x06,
+ AB5500_ADDR_SIM_USBSIM = 0x04,
+ AB5500_ADDR_LED = 0x10,
+ AB5500_ADDR_ADC = 0x0A,
+ AB5500_ADDR_RTC = 0x0F,
+ AB5500_ADDR_STARTUP = 0x03,
+ AB5500_ADDR_DBI_ECI = 0x07,
+ AB5500_ADDR_CHG = 0x0B,
+ AB5500_ADDR_FG_BATTCOM_ACC = 0x0C,
+ AB5500_ADDR_USB = 0x05,
+ AB5500_ADDR_IT = 0x0E,
+ AB5500_ADDR_VIBRA = 0x02,
+ AB5500_ADDR_AUDIO_HEADSETUSB = 0x0D,
+};
+
+/*
+ * Interrupt register offsets
+ * Bank : 0x0E
+ */
+#define AB5500_IT_SOURCE0_REG 0x20
+#define AB5500_IT_SOURCE1_REG 0x21
+#define AB5500_IT_SOURCE2_REG 0x22
+#define AB5500_IT_SOURCE3_REG 0x23
+#define AB5500_IT_SOURCE4_REG 0x24
+#define AB5500_IT_SOURCE5_REG 0x25
+#define AB5500_IT_SOURCE6_REG 0x26
+#define AB5500_IT_SOURCE7_REG 0x27
+#define AB5500_IT_SOURCE8_REG 0x28
+#define AB5500_IT_SOURCE9_REG 0x29
+#define AB5500_IT_SOURCE10_REG 0x2A
+#define AB5500_IT_SOURCE11_REG 0x2B
+#define AB5500_IT_SOURCE12_REG 0x2C
+#define AB5500_IT_SOURCE13_REG 0x2D
+#define AB5500_IT_SOURCE14_REG 0x2E
+#define AB5500_IT_SOURCE15_REG 0x2F
+#define AB5500_IT_SOURCE16_REG 0x30
+#define AB5500_IT_SOURCE17_REG 0x31
+#define AB5500_IT_SOURCE18_REG 0x32
+#define AB5500_IT_SOURCE19_REG 0x33
+#define AB5500_IT_SOURCE20_REG 0x34
+#define AB5500_IT_SOURCE21_REG 0x35
+#define AB5500_IT_SOURCE22_REG 0x36
+#define AB5500_IT_SOURCE23_REG 0x37
+#define AB5500_IT_SOURCE24_REG 0x38
+
+#define AB5500_NUM_IRQ_REGS 25
+
+/**
+ * struct ab5500
+ * @access_mutex: lock out concurrent accesses to the AB registers
+ * @dev: a pointer to the device struct for this chip driver
+ * @ab5500_irq: the analog baseband irq
+ * @irq_base: the platform configuration irq base for subdevices
+ * @chip_name: name of this chip variant
+ * @chip_id: 8 bit chip ID for this chip variant
+ * @irq_lock: a lock to protect the mask
+ * @abb_events: a local bit mask of the prcmu wakeup events
+ * @event_mask: a local copy of the mask event registers
+ * @last_event_mask: a copy of the last event_mask written to hardware
+ * @startup_events: a copy of the first reading of the event registers
+ * @startup_events_read: whether the first events have been read
+ */
+struct ab5500 {
+ struct mutex access_mutex;
+ struct device *dev;
+ unsigned int ab5500_irq;
+ unsigned int irq_base;
+ char chip_name[32];
+ u8 chip_id;
+ struct mutex irq_lock;
+ u32 num_event_reg;
+ u32 abb_events;
+ u8 mask[AB5500_NUM_IRQ_REGS];
+ u8 oldmask[AB5500_NUM_IRQ_REGS];
+ u8 startup_events[AB5500_NUM_IRQ_REGS];
+ bool startup_events_read;
+#ifdef CONFIG_DEBUG_FS
+ unsigned int debug_bank;
+ unsigned int debug_address;
+#endif
+};
+
+#ifndef CONFIG_AB5500_CORE
+static inline int ab5500_clock_rtc_enable(int num, bool enable)
+{
+ return -ENOSYS;
+}
+#else
+extern int ab5500_clock_rtc_enable(int num, bool enable);
+#endif
+
+/* Forward Declaration */
+struct ab5500_regulator_platform_data;
+
+struct ab5500_platform_data {
+ struct {unsigned int base; unsigned int count; } irq;
+ void *dev_data[AB5500_NUM_DEVICES];
+ size_t dev_data_sz[AB5500_NUM_DEVICES];
+ struct abx500_init_settings *init_settings;
+ unsigned int init_settings_sz;
+ bool pm_power_off;
+ struct ab5500_regulator_platform_data *regulator;
+ struct ab5500_usbgpio_platform_data *usb;
+ struct abx500_accdet_platform_data *accdet;
+};
+
+struct ab5500_ponkey_platform_data {
+ u8 shutdown_secs;
+};
+#endif /* MFD_AB5500_H */
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
new file mode 100644
index 00000000000..f04e47ff56a
--- /dev/null
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _UX500_CHARGALG_H
+#define _UX500_CHARGALG_H
+
+#include <linux/power_supply.h>
+
+#define psy_to_ux500_charger(x) container_of((x), \
+ struct ux500_charger, psy)
+
+/* Forward declaration */
+struct ux500_charger;
+
+struct ux500_charger_ops {
+ int (*enable) (struct ux500_charger *, int, int, int);
+ int (*kick_wd) (struct ux500_charger *);
+ int (*update_curr) (struct ux500_charger *, int);
+};
+
+/**
+ * struct ux500_charger - power supply ux500 charger sub class
+ * @psy power supply base class
+ * @ops ux500 charger operations
+ * @max_out_volt maximum output charger voltage in mV
+ * @max_out_curr maximum output charger current in mA
+ */
+struct ux500_charger {
+ struct power_supply psy;
+ struct ux500_charger_ops ops;
+ int max_out_volt;
+ int max_out_curr;
+};
+
+#endif
diff --git a/include/linux/mfd/db5500-prcmu.h b/include/linux/mfd/db5500-prcmu.h
index 9890687f582..ffbd415e6c7 100644
--- a/include/linux/mfd/db5500-prcmu.h
+++ b/include/linux/mfd/db5500-prcmu.h
@@ -30,7 +30,30 @@ u16 db5500_prcmu_get_reset_code(void);
bool db5500_prcmu_is_ac_wake_requested(void);
int db5500_prcmu_set_arm_opp(u8 opp);
int db5500_prcmu_get_arm_opp(void);
+int db5500_prcmu_set_ape_opp(u8 opp);
+int db5500_prcmu_get_ape_opp(void);
+int db5500_prcmu_set_ddr_opp(u8 opp);
+int db5500_prcmu_get_ddr_opp(void);
+static inline unsigned long prcmu_clock_rate(u8 clock)
+{
+ return 0;
+}
+
+static inline long prcmu_round_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+
+static inline int prcmu_set_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+int db5500_prcmu_get_hotdog(void);
+int db5500_prcmu_config_hotdog(u8 threshold);
+int db5500_prcmu_config_hotmon(u8 low, u8 high);
+int db5500_prcmu_start_temp_sense(u16 cycles32k);
+int db5500_prcmu_stop_temp_sense(void);
#else /* !CONFIG_UX500_SOC_DB5500 */
static inline void db5500_prcmu_early_init(void) {}
@@ -50,6 +73,11 @@ static inline int db5500_prcmu_request_clock(u8 clock, bool enable)
return 0;
}
+static inline unsigned long db5500_prcmu_clock_rate(u8 clock)
+{
+ return 0;
+}
+
static inline int db5500_prcmu_set_display_clocks(void)
{
return 0;
@@ -72,6 +100,16 @@ static inline int db5500_prcmu_config_esram0_deep_sleep(u8 state)
static inline void db5500_prcmu_enable_wakeups(u32 wakeups) {}
+static inline long db5500_prcmu_round_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_set_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+
static inline int prcmu_resetout(u8 resoutn, u8 state)
{
return 0;
@@ -113,6 +151,48 @@ static inline int db5500_prcmu_get_arm_opp(void)
return 0;
}
+static inline int db5500_prcmu_set_ape_opp(u8 opp)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_get_ape_opp(void)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_set_ddr_opp(u8 opp)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_get_ddr_opp(void)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_get_hotdog(void)
+{
+ return -ENOSYS;
+}
+static inline int db5500_prcmu_config_hotdog(u8 threshold)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_config_hotmon(u8 low, u8 high)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_start_temp_sense(u16 cycles32k)
+{
+ return 0;
+}
+static inline int db5500_prcmu_stop_temp_sense(void)
+{
+ return 0;
+}
#endif /* CONFIG_MFD_DB5500_PRCMU */
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index 60d27f7bfc1..06623d44948 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -501,24 +501,19 @@ enum romcode_read prcmu_get_rc_p2a(void);
enum ap_pwrst prcmu_get_xp70_current_state(void);
bool prcmu_has_arm_maxopp(void);
bool prcmu_is_u8400(void);
-int prcmu_set_ape_opp(u8 opp);
-int prcmu_get_ape_opp(void);
int prcmu_request_ape_opp_100_voltage(bool enable);
int prcmu_release_usb_wakeup_state(void);
-int prcmu_set_ddr_opp(u8 opp);
-int prcmu_get_ddr_opp(void);
/* NOTE! Use regulator framework instead */
int prcmu_set_hwacc(u16 hw_acc_dev, u8 state);
void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
struct prcmu_auto_pm_config *idle);
bool prcmu_is_auto_pm_enabled(void);
-int prcmu_config_clkout(u8 clkout, u8 source, u8 div);
-int prcmu_set_clock_divider(u8 clock, u8 divider);
-int prcmu_config_hotdog(u8 threshold);
-int prcmu_config_hotmon(u8 low, u8 high);
-int prcmu_start_temp_sense(u16 cycles32k);
-int prcmu_stop_temp_sense(void);
+
+int db8500_prcmu_config_hotdog(u8 threshold);
+int db8500_prcmu_config_hotmon(u8 low, u8 high);
+int db8500_prcmu_start_temp_sense(u16 cycles32k);
+int db8500_prcmu_stop_temp_sense(void);
int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
@@ -549,6 +544,10 @@ u16 db8500_prcmu_get_reset_code(void);
bool db8500_prcmu_is_ac_wake_requested(void);
int db8500_prcmu_set_arm_opp(u8 opp);
int db8500_prcmu_get_arm_opp(void);
+int db8500_prcmu_set_ape_opp(u8 opp);
+int db8500_prcmu_get_ape_opp(void);
+int db8500_prcmu_set_ddr_opp(u8 opp);
+int db8500_prcmu_get_ddr_opp(void);
#else /* !CONFIG_MFD_DB8500_PRCMU */
@@ -579,12 +578,12 @@ static inline bool prcmu_is_u8400(void)
return false;
}
-static inline int prcmu_set_ape_opp(u8 opp)
+static inline int db8500_prcmu_set_ape_opp(u8 opp)
{
return 0;
}
-static inline int prcmu_get_ape_opp(void)
+static inline int db8500_prcmu_get_ape_opp(void)
{
return APE_100_OPP;
}
@@ -599,12 +598,12 @@ static inline int prcmu_release_usb_wakeup_state(void)
return 0;
}
-static inline int prcmu_set_ddr_opp(u8 opp)
+static inline int db8500_prcmu_set_ddr_opp(u8 opp)
{
return 0;
}
-static inline int prcmu_get_ddr_opp(void)
+static inline int db8500_prcmu_get_ddr_opp(void)
{
return DDR_100_OPP;
}
@@ -613,7 +612,6 @@ static inline int prcmu_set_hwacc(u16 hw_acc_dev, u8 state)
{
return 0;
}
-
static inline void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
struct prcmu_auto_pm_config *idle)
{
@@ -624,32 +622,22 @@ static inline bool prcmu_is_auto_pm_enabled(void)
return false;
}
-static inline int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
-{
- return 0;
-}
-
-static inline int prcmu_set_clock_divider(u8 clock, u8 divider)
-{
- return 0;
-}
-
-static inline int prcmu_config_hotdog(u8 threshold)
+static inline int db8500_prcmu_config_hotdog(u8 threshold)
{
return 0;
}
-static inline int prcmu_config_hotmon(u8 low, u8 high)
+static inline int db8500_prcmu_config_hotmon(u8 low, u8 high)
{
return 0;
}
-static inline int prcmu_start_temp_sense(u16 cycles32k)
+static inline int db8500_prcmu_start_temp_sense(u16 cycles32k)
{
return 0;
}
-static inline int prcmu_stop_temp_sense(void)
+static inline int db8500_prcmu_stop_temp_sense(void)
{
return 0;
}
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index bac942f959c..4cbd6a8aeed 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -10,7 +10,7 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
-#include <asm/mach-types.h>
+#include <linux/err.h>
/* PRCMU Wakeup defines */
enum prcmu_wakeup_index {
@@ -80,6 +80,29 @@ enum prcmu_wakeup_index {
#define EPOD_STATE_ON_CLK_OFF 0x03
#define EPOD_STATE_ON 0x04
+/* DB5500 CLKOUT IDs */
+enum {
+ DB5500_CLKOUT0 = 0,
+ DB5500_CLKOUT1,
+};
+
+/* DB5500 CLKOUTx sources */
+enum {
+ DB5500_CLKOUT_REF_CLK_SEL0,
+ DB5500_CLKOUT_RTC_CLK0_SEL0,
+ DB5500_CLKOUT_ULP_CLK_SEL0,
+ DB5500_CLKOUT_STATIC0,
+ DB5500_CLKOUT_REFCLK,
+ DB5500_CLKOUT_ULPCLK,
+ DB5500_CLKOUT_ARMCLK,
+ DB5500_CLKOUT_SYSACC0CLK,
+ DB5500_CLKOUT_SOC0PLLCLK,
+ DB5500_CLKOUT_SOC1PLLCLK,
+ DB5500_CLKOUT_DDRPLLCLK,
+ DB5500_CLKOUT_TVCLK,
+ DB5500_CLKOUT_IRDACLK,
+};
+
/*
* CLKOUT sources
*/
@@ -111,6 +134,7 @@ enum prcmu_clock {
PRCMU_MSP1CLK,
PRCMU_I2CCLK,
PRCMU_SDMMCCLK,
+ PRCMU_SPARE1CLK,
PRCMU_SLIMCLK,
PRCMU_PER1CLK,
PRCMU_PER2CLK,
@@ -141,6 +165,7 @@ enum prcmu_clock {
PRCMU_SVACLK,
PRCMU_NUM_REG_CLOCKS,
PRCMU_SYSCLK = PRCMU_NUM_REG_CLOCKS,
+ PRCMU_CDCLK,
PRCMU_TIMCLK,
PRCMU_PLLSOC0,
PRCMU_PLLSOC1,
@@ -153,12 +178,14 @@ enum prcmu_clock {
* @APE_NO_CHANGE: The APE operating point is unchanged
* @APE_100_OPP: The new APE operating point is ape100opp
* @APE_50_OPP: 50%
+ * @APE_50_PARTLY_25_OPP: 50%, except some clocks at 25%.
*/
enum ape_opp {
APE_OPP_INIT = 0x00,
APE_NO_CHANGE = 0x01,
APE_100_OPP = 0x02,
- APE_50_OPP = 0x03
+ APE_50_OPP = 0x03,
+ APE_50_PARTLY_25_OPP = 0xFF,
};
/**
@@ -218,9 +245,11 @@ enum ddr_pwrst {
#if defined(CONFIG_UX500_SOC_DB8500) || defined(CONFIG_UX500_SOC_DB5500)
+#include <mach/id.h>
+
static inline void __init prcmu_early_init(void)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_early_init();
else
return db8500_prcmu_early_init();
@@ -229,7 +258,7 @@ static inline void __init prcmu_early_init(void)
static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
bool keep_ap_pll)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_set_power_state(state, keep_ulp_clk,
keep_ap_pll);
else
@@ -239,15 +268,15 @@ static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
static inline int prcmu_set_epod(u16 epod_id, u8 epod_state)
{
- if (machine_is_u5500())
- return -EINVAL;
+ if (cpu_is_u5500())
+ return db5500_prcmu_set_epod(epod_id, epod_state);
else
return db8500_prcmu_set_epod(epod_id, epod_state);
}
static inline void prcmu_enable_wakeups(u32 wakeups)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
db5500_prcmu_enable_wakeups(wakeups);
else
db8500_prcmu_enable_wakeups(wakeups);
@@ -260,7 +289,7 @@ static inline void prcmu_disable_wakeups(void)
static inline void prcmu_config_abb_event_readout(u32 abb_events)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
db5500_prcmu_config_abb_event_readout(abb_events);
else
db8500_prcmu_config_abb_event_readout(abb_events);
@@ -268,7 +297,7 @@ static inline void prcmu_config_abb_event_readout(u32 abb_events)
static inline void prcmu_get_abb_event_buffer(void __iomem **buf)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
db5500_prcmu_get_abb_event_buffer(buf);
else
db8500_prcmu_get_abb_event_buffer(buf);
@@ -281,36 +310,66 @@ int prcmu_config_clkout(u8 clkout, u8 source, u8 div);
static inline int prcmu_request_clock(u8 clock, bool enable)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_request_clock(clock, enable);
else
return db8500_prcmu_request_clock(clock, enable);
}
-int prcmu_set_ape_opp(u8 opp);
-int prcmu_get_ape_opp(void);
-int prcmu_set_ddr_opp(u8 opp);
-int prcmu_get_ddr_opp(void);
+unsigned long prcmu_clock_rate(u8 clock);
+long prcmu_round_clock_rate(u8 clock, unsigned long rate);
+int prcmu_set_clock_rate(u8 clock, unsigned long rate);
+
+static inline int prcmu_set_ddr_opp(u8 opp)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_set_ddr_opp(opp);
+ else
+ return db8500_prcmu_set_ddr_opp(opp);
+}
+static inline int prcmu_get_ddr_opp(void)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_get_ddr_opp();
+ else
+ return db8500_prcmu_get_ddr_opp();
+}
static inline int prcmu_set_arm_opp(u8 opp)
{
- if (machine_is_u5500())
- return -EINVAL;
+ if (cpu_is_u5500())
+ return db5500_prcmu_set_arm_opp(opp);
else
return db8500_prcmu_set_arm_opp(opp);
}
static inline int prcmu_get_arm_opp(void)
{
- if (machine_is_u5500())
- return -EINVAL;
+ if (cpu_is_u5500())
+ return db5500_prcmu_get_arm_opp();
else
return db8500_prcmu_get_arm_opp();
}
+static inline int prcmu_set_ape_opp(u8 opp)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_set_ape_opp(opp);
+ else
+ return db8500_prcmu_set_ape_opp(opp);
+}
+
+static inline int prcmu_get_ape_opp(void)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_get_ape_opp();
+ else
+ return db8500_prcmu_get_ape_opp();
+}
+
static inline void prcmu_system_reset(u16 reset_code)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_system_reset(reset_code);
else
return db8500_prcmu_system_reset(reset_code);
@@ -318,7 +377,7 @@ static inline void prcmu_system_reset(u16 reset_code)
static inline u16 prcmu_get_reset_code(void)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_get_reset_code();
else
return db8500_prcmu_get_reset_code();
@@ -329,7 +388,7 @@ void prcmu_ac_sleep_req(void);
void prcmu_modem_reset(void);
static inline bool prcmu_is_ac_wake_requested(void)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_is_ac_wake_requested();
else
return db8500_prcmu_is_ac_wake_requested();
@@ -337,7 +396,7 @@ static inline bool prcmu_is_ac_wake_requested(void)
static inline int prcmu_set_display_clocks(void)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_set_display_clocks();
else
return db8500_prcmu_set_display_clocks();
@@ -345,7 +404,7 @@ static inline int prcmu_set_display_clocks(void)
static inline int prcmu_disable_dsipll(void)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_disable_dsipll();
else
return db8500_prcmu_disable_dsipll();
@@ -353,7 +412,7 @@ static inline int prcmu_disable_dsipll(void)
static inline int prcmu_enable_dsipll(void)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_enable_dsipll();
else
return db8500_prcmu_enable_dsipll();
@@ -361,11 +420,44 @@ static inline int prcmu_enable_dsipll(void)
static inline int prcmu_config_esram0_deep_sleep(u8 state)
{
- if (machine_is_u5500())
- return -EINVAL;
+ if (cpu_is_u5500())
+ return db5500_prcmu_config_esram0_deep_sleep(state);
else
return db8500_prcmu_config_esram0_deep_sleep(state);
}
+
+static inline int prcmu_config_hotdog(u8 threshold)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_config_hotdog(threshold);
+ else
+ return db8500_prcmu_config_hotdog(threshold);
+}
+
+static inline int prcmu_config_hotmon(u8 low, u8 high)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_config_hotmon(low, high);
+ else
+ return db8500_prcmu_config_hotmon(low, high);
+}
+
+static inline int prcmu_start_temp_sense(u16 cycles32k)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_start_temp_sense(cycles32k);
+ else
+ return db8500_prcmu_start_temp_sense(cycles32k);
+}
+
+static inline int prcmu_stop_temp_sense(void)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_stop_temp_sense();
+ else
+ return db8500_prcmu_stop_temp_sense();
+}
+
#else
static inline void __init prcmu_early_init(void) {}
@@ -405,6 +497,21 @@ static inline int prcmu_request_clock(u8 clock, bool enable)
return 0;
}
+static inline long prcmu_round_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+
+static inline int prcmu_set_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+
+static inline unsigned long prcmu_clock_rate(u8 clock)
+{
+ return 0;
+}
+
static inline int prcmu_set_ape_opp(u8 opp)
{
return 0;
@@ -480,14 +587,35 @@ static inline void prcmu_get_abb_event_buffer(void __iomem **buf)
*buf = NULL;
}
+static inline int prcmu_config_hotdog(u8 threshold)
+{
+ return 0;
+}
+
+static inline int prcmu_config_hotmon(u8 low, u8 high)
+{
+ return 0;
+}
+
+static inline int prcmu_start_temp_sense(u16 cycles32k)
+{
+ return 0;
+}
+
+static inline int prcmu_stop_temp_sense(void)
+{
+ return 0;
+}
+
#endif
/* PRCMU QoS APE OPP class */
#define PRCMU_QOS_APE_OPP 1
#define PRCMU_QOS_DDR_OPP 2
+#define PRCMU_QOS_ARM_OPP 3
#define PRCMU_QOS_DEFAULT_VALUE -1
-#ifdef CONFIG_UX500_PRCMU_QOS_POWER
+#ifdef CONFIG_DBX500_PRCMU_QOS_POWER
unsigned long prcmu_qos_get_cpufreq_opp_delay(void);
void prcmu_qos_set_cpufreq_opp_delay(unsigned long);
@@ -500,6 +628,7 @@ int prcmu_qos_add_notifier(int prcmu_qos_class,
struct notifier_block *notifier);
int prcmu_qos_remove_notifier(int prcmu_qos_class,
struct notifier_block *notifier);
+void prcmu_qos_voice_call_override(bool enable);
#else
@@ -543,7 +672,7 @@ static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
{
return 0;
}
-
+static inline void prcmu_qos_voice_call_override(bool enable) {}
#endif
#endif /* __MACH_PRCMU_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index be1af7c42e5..54046d1d6b5 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -107,7 +107,7 @@ struct matrix_keymap_data;
* @no_autorepeat: disable key autorepeat
*/
struct stmpe_keypad_platform_data {
- struct matrix_keymap_data *keymap_data;
+ const struct matrix_keymap_data *keymap_data;
unsigned int debounce_ms;
unsigned int scan_count;
bool no_autorepeat;
diff --git a/include/linux/mfd/tc35892.h b/include/linux/mfd/tc35892.h
new file mode 100644
index 00000000000..8c5385c2191
--- /dev/null
+++ b/include/linux/mfd/tc35892.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ */
+
+#ifndef __LINUX_MFD_TC35892_H
+#define __LINUX_MFD_TC35892_H
+
+#include <linux/device.h>
+
+#define TC35892_RSTCTRL_IRQRST (1 << 4)
+#define TC35892_RSTCTRL_TIMRST (1 << 3)
+#define TC35892_RSTCTRL_ROTRST (1 << 2)
+#define TC35892_RSTCTRL_KBDRST (1 << 1)
+#define TC35892_RSTCTRL_GPIRST (1 << 0)
+
+
+#define TC35892_MANFCODE 0x80
+#define TC35892_MANFCODE_MAGIC 0x03
+#define TC35892_VERSION 0x81
+#define TC35892_RSTCTRL 0x82
+#define TC35892_EXTRSTN 0x83
+#define TC35892_RSTINTCLR 0x84
+#define TC35892_CLKMODE 0x88
+#define TC35892_CLKCFG 0x89
+#define TC35892_CLKEN 0x8A
+#define TC35892_IRQST 0x91
+
+#define TC35892_DRIVE0_L 0xA0
+#define TC35892_DRIVE0_H 0xA1
+#define TC35892_DRIVE1_L 0xA2
+#define TC35892_DRIVE1_H 0xA3
+#define TC35892_DRIVE2_L 0xA4
+#define TC35892_DRIVE2_H 0XA5
+#define TC35892_DRIVE3 0xA6
+#define TC35892_IOCFG 0xA7
+
+#define TC35892_IOPC0_L 0xAA
+#define TC35892_IOPC0_H 0xAB
+#define TC35892_IOPC1_L 0xAC
+#define TC35892_IOPC1_H 0xAD
+#define TC35892_IOPC2_L 0xAE
+#define TC35892_IOPC2_H 0xAF
+
+#define TC35892_GPIODATA0 0xC0
+#define TC35892_GPIOMASK0 0xC1
+#define TC35892_GPIODATA1 0xC2
+#define TC35892_GPIOMASK1 0xC3
+#define TC35892_GPIODATA2 0xC4
+#define TC35892_GPIOMASK2 0xC5
+#define TC35892_GPIODIR0 0xC6
+#define TC35892_GPIODIR1 0xC7
+#define TC35892_GPIODIR2 0xC8
+#define TC35892_GPIOIS0 0xC9
+#define TC35892_GPIOIS1 0xCA
+#define TC35892_GPIOIS2 0xCB
+#define TC35892_GPIOIBE0 0xCC
+#define TC35892_GPIOIBE1 0xCD
+#define TC35892_GPIOIBE2 0xCE
+#define TC35892_GPIOIEV0 0xCF
+#define TC35892_GPIOIEV1 0xD0
+#define TC35892_GPIOIEV2 0xD1
+#define TC35892_GPIOIE0 0xD2
+#define TC35892_GPIOIE1 0xD3
+#define TC35892_GPIOIE2 0xD4
+#define TC35892_GPIORIS0 0xD6
+#define TC35892_GPIORIS1 0xD7
+#define TC35892_GPIORIS2 0xD8
+#define TC35892_GPIOMIS0 0xD9
+#define TC35892_GPIOMIS1 0xDA
+#define TC35892_GPIOMIS2 0xDB
+#define TC35892_GPIOIC0 0xDC
+#define TC35892_GPIOIC1 0xDD
+#define TC35892_GPIOIC2 0xDE
+#define TC35892_GPIOODM0 0xE0
+#define TC35892_GPIOODE0 0xE1
+#define TC35892_GPIOODM1 0xE2
+#define TC35892_GPIOODE1 0xE3
+#define TC35892_GPIOODM2 0xE4
+#define TC35892_GPIOODE2 0xE5
+
+#define TC35892_GPIOSYNC0 0xE6
+#define TC35892_GPIOSYNC1 0xE7
+#define TC35892_GPIOSYNC2 0xE8
+
+#define TC35892_GPIOWAKE0 0xE9
+#define TC35892_GPIOWAKE1 0xEA
+#define TC35892_GPIOWAKE2 0xEB
+
+#define TC35892_INT_GPIIRQ 0
+#define TC35892_INT_TI0IRQ 1
+#define TC35892_INT_TI1IRQ 2
+#define TC35892_INT_TI2IRQ 3
+#define TC35892_INT_ROTIRQ 5
+#define TC35892_INT_KBDIRQ 6
+#define TC35892_INT_PORIRQ 7
+
+#define TC35892_NR_INTERNAL_IRQS 8
+#define TC35892_INT_GPIO(x) (TC35892_NR_INTERNAL_IRQS + (x))
+
+struct tc35892 {
+ struct mutex lock;
+ struct device *dev;
+ struct i2c_client *i2c;
+
+ int irq_base;
+ int num_gpio;
+ struct tc35892_platform_data *pdata;
+};
+
+extern int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data);
+extern int tc35892_reg_read(struct tc35892 *tc35892, u8 reg);
+extern int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length,
+ u8 *values);
+extern int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length,
+ const u8 *values);
+extern int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val);
+
+/**
+ * struct tc35892_gpio_platform_data - TC35892 GPIO platform data
+ * @gpio_base: first gpio number assigned to TC35892. A maximum of
+ * %TC35892_NR_GPIOS GPIOs will be allocated.
+ * @setup: callback for board-specific initialization
+ * @remove: callback for board-specific teardown
+ */
+struct tc35892_gpio_platform_data {
+ int gpio_base;
+ void (*setup)(struct tc35892 *tc35892, unsigned gpio_base);
+ void (*remove)(struct tc35892 *tc35892, unsigned gpio_base);
+};
+
+/**
+ * struct tc35892_platform_data - TC35892 platform data
+ * @irq_base: base IRQ number. %TC35892_NR_IRQS irqs will be used.
+ * @gpio: GPIO-specific platform data
+ */
+struct tc35892_platform_data {
+ int irq_base;
+ struct tc35892_gpio_platform_data *gpio;
+};
+
+#define TC35892_NR_GPIOS 24
+#define TC35892_NR_IRQS TC35892_INT_GPIO(TC35892_NR_GPIOS)
+
+#endif
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index 16c76e124f9..b8c6a941071 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -31,20 +31,43 @@ enum tx3589x_block {
#define TC3589x_EVTCODE_FIFO 0x10
#define TC3589x_KBDMFS 0x8F
-#define TC3589x_IRQST 0x91
-
-#define TC3589x_MANFCODE_MAGIC 0x03
#define TC3589x_MANFCODE 0x80
+#define TC3589x_MANFCODE_MAGIC 0x03
#define TC3589x_VERSION 0x81
-#define TC3589x_IOCFG 0xA7
+#define TC3589x_RSTCTRL 0x82
+#define TC3589x_EXTRSTN 0x83
+#define TC3589x_RSTINTCLR 0x84
#define TC3589x_CLKMODE 0x88
#define TC3589x_CLKCFG 0x89
#define TC3589x_CLKEN 0x8A
+#define TC3589x_IRQST 0x91
-#define TC3589x_RSTCTRL 0x82
-#define TC3589x_EXTRSTN 0x83
-#define TC3589x_RSTINTCLR 0x84
+#define TC3589x_DRIVE0_L 0xA0
+#define TC3589x_DRIVE0_H 0xA1
+#define TC3589x_DRIVE1_L 0xA2
+#define TC3589x_DRIVE1_H 0xA3
+#define TC3589x_DRIVE2_L 0xA4
+#define TC3589x_DRIVE2_H 0XA5
+#define TC3589x_DRIVE3 0xA6
+#define TC3589x_IOCFG 0xA7
+
+#define TC3589x_IOPC0_L 0xAA
+#define TC3589x_IOPC0_H 0xAB
+#define TC3589x_IOPC1_L 0xAC
+#define TC3589x_IOPC1_H 0xAD
+#define TC3589x_IOPC2_L 0xAE
+#define TC3589x_IOPC2_H 0xAF
+
+#define TC3589x_GPIODATA0 0xC0
+#define TC3589x_GPIOMASK0 0xC1
+#define TC3589x_GPIODATA1 0xC2
+#define TC3589x_GPIOMASK1 0xC3
+#define TC3589x_GPIODATA2 0xC4
+#define TC3589x_GPIOMASK2 0xC5
+#define TC3589x_GPIODIR0 0xC6
+#define TC3589x_GPIODIR1 0xC7
+#define TC3589x_GPIODIR2 0xC8
/* Pull up/down configuration registers */
#define TC3589x_IOCFG 0xA7
@@ -75,17 +98,12 @@ enum tx3589x_block {
#define TC3589x_GPIOIC0 0xDC
#define TC3589x_GPIOIC1 0xDD
#define TC3589x_GPIOIC2 0xDE
-
-#define TC3589x_GPIODATA0 0xC0
-#define TC3589x_GPIOMASK0 0xc1
-#define TC3589x_GPIODATA1 0xC2
-#define TC3589x_GPIOMASK1 0xc3
-#define TC3589x_GPIODATA2 0xC4
-#define TC3589x_GPIOMASK2 0xC5
-
-#define TC3589x_GPIODIR0 0xC6
-#define TC3589x_GPIODIR1 0xC7
-#define TC3589x_GPIODIR2 0xC8
+#define TC3589x_GPIOODM0 0xE0
+#define TC3589x_GPIOODE0 0xE1
+#define TC3589x_GPIOODM1 0xE2
+#define TC3589x_GPIOODE1 0xE3
+#define TC3589x_GPIOODM2 0xE4
+#define TC3589x_GPIOODE2 0xE5
#define TC3589x_GPIOSYNC0 0xE6
#define TC3589x_GPIOSYNC1 0xE7
@@ -95,13 +113,6 @@ enum tx3589x_block {
#define TC3589x_GPIOWAKE1 0xEA
#define TC3589x_GPIOWAKE2 0xEB
-#define TC3589x_GPIOODM0 0xE0
-#define TC3589x_GPIOODE0 0xE1
-#define TC3589x_GPIOODM1 0xE2
-#define TC3589x_GPIOODE1 0xE3
-#define TC3589x_GPIOODM2 0xE4
-#define TC3589x_GPIOODE2 0xE5
-
#define TC3589x_INT_GPIIRQ 0
#define TC3589x_INT_TI0IRQ 1
#define TC3589x_INT_TI1IRQ 2
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index c8ef9bc54d5..4c896602af8 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -71,6 +71,7 @@ struct mmc_ext_csd {
bool hpi_en; /* HPI enablebit */
bool hpi; /* HPI support bit */
unsigned int hpi_cmd; /* cmd used as HPI */
+ unsigned int boot_locked;
u8 raw_partition_support; /* 160 */
u8 raw_erased_mem_count; /* 181 */
u8 raw_ext_csd_structure; /* 194 */
@@ -177,6 +178,14 @@ struct sdio_func_tuple;
#define MAX_MMC_PART_NAME_LEN 20
/*
+ * Partition area type, boot or gp
+ */
+enum mmc_part_area_type {
+ MMC_BLK_DATA_AREA_BOOT,
+ MMC_BLK_DATA_AREA_GP,
+};
+
+/*
* MMC Physical partitions
*/
struct mmc_part {
@@ -184,6 +193,7 @@ struct mmc_part {
unsigned int part_cfg; /* partition type */
char name[MAX_MMC_PART_NAME_LEN];
bool force_ro; /* to make boot parts RO by default */
+ int area_type;
};
/*
@@ -261,12 +271,14 @@ struct mmc_card {
* This function fill contents in mmc_part.
*/
static inline void mmc_part_add(struct mmc_card *card, unsigned int size,
- unsigned int part_cfg, char *name, int idx, bool ro)
+ unsigned int part_cfg, char *name, int idx, bool ro,
+ int area_type)
{
card->part[card->nr_parts].size = size;
card->part[card->nr_parts].part_cfg = part_cfg;
sprintf(card->part[card->nr_parts].name, name, idx);
card->part[card->nr_parts].force_ro = ro;
+ card->part[card->nr_parts].area_type = area_type;
card->nr_parts++;
}
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 0e7135697d1..665548e639e 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -280,6 +280,7 @@ struct _mmc_csd {
#define EXT_CSD_RST_N_FUNCTION 162 /* R/W */
#define EXT_CSD_SANITIZE_START 165 /* W */
#define EXT_CSD_WR_REL_PARAM 166 /* RO */
+#define EXT_CSD_BOOT_WP 173 /* R/W */
#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
#define EXT_CSD_PART_CONFIG 179 /* R/W */
#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */
@@ -321,6 +322,11 @@ struct _mmc_csd {
#define EXT_CSD_WR_REL_PARAM_EN (1<<2)
+#define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40)
+#define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10)
+#define EXT_CSD_BOOT_WP_B_PERM_WP_EN (0x04)
+#define EXT_CSD_BOOT_WP_B_PWR_WP_EN (0x01)
+
#define EXT_CSD_PART_CONFIG_ACC_MASK (0x7)
#define EXT_CSD_PART_CONFIG_ACC_BOOT0 (0x1)
#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4)
diff --git a/include/linux/regulator/ab5500.h b/include/linux/regulator/ab5500.h
new file mode 100644
index 00000000000..2d85b57cae8
--- /dev/null
+++ b/include/linux/regulator/ab5500.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#ifndef __LINUX_REGULATOR_AB5500_H
+#define __LINUX_REGULATOR_AB5500_H
+
+enum ab5500_regulator_id {
+ AB5500_LDO_G,
+ AB5500_LDO_H,
+ AB5500_LDO_K,
+ AB5500_LDO_L,
+ AB5500_LDO_VDIGMIC,
+ AB5500_LDO_SIM,
+ AB5500_BIAS2,
+ AB5500_NUM_REGULATORS,
+};
+
+struct regulator_init_data;
+
+struct ab5500_regulator_platform_data {
+ struct regulator_init_data *regulator;
+ int num_regulator;
+};
+
+#endif
diff --git a/include/linux/regulator/ab8500-debug.h b/include/linux/regulator/ab8500-debug.h
new file mode 100644
index 00000000000..01655fc7fc1
--- /dev/null
+++ b/include/linux/regulator/ab8500-debug.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Authors: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ */
+
+#ifndef __LINUX_MFD_AB8500_REGULATOR_DEBUG_H
+#define __LINUX_MFD_AB8500_REGULATOR_DEBUG_H
+
+#ifdef CONFIG_REGULATOR_AB8500_DEBUG
+/* AB8500 debug force/restore functions */
+void ab8500_regulator_debug_force(void);
+void ab8500_regulator_debug_restore(void);
+#else
+static inline void ab8500_regulator_debug_force(void) {}
+static inline void ab8500_regulator_debug_restore(void) {}
+#endif
+
+#endif
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
index 76579f964a2..df1e148da73 100644
--- a/include/linux/regulator/ab8500.h
+++ b/include/linux/regulator/ab8500.h
@@ -10,6 +10,8 @@
#ifndef __LINUX_MFD_AB8500_REGULATOR_H
#define __LINUX_MFD_AB8500_REGULATOR_H
+#include <linux/platform_device.h>
+
/* AB8500 regulators */
enum ab8500_regulator_id {
AB8500_LDO_AUX1,
@@ -23,23 +25,28 @@ enum ab8500_regulator_id {
AB8500_LDO_ANAMIC2,
AB8500_LDO_DMIC,
AB8500_LDO_ANA,
+ AB8500_SYSCLKREQ_2,
+ AB8500_SYSCLKREQ_4,
AB8500_NUM_REGULATORS,
};
/* AB8500 register initialization */
struct ab8500_regulator_reg_init {
int id;
+ u8 mask;
u8 value;
};
-#define INIT_REGULATOR_REGISTER(_id, _value) \
- { \
- .id = _id, \
- .value = _value, \
+#define INIT_REGULATOR_REGISTER(_id, _mask, _value) \
+ { \
+ .id = _id, \
+ .mask = _mask, \
+ .value = _value, \
}
/* AB8500 registers */
enum ab8500_regulator_reg {
+ AB8500_REGUREQUESTCTRL1,
AB8500_REGUREQUESTCTRL2,
AB8500_REGUREQUESTCTRL3,
AB8500_REGUREQUESTCTRL4,
@@ -56,19 +63,52 @@ enum ab8500_regulator_reg {
AB8500_REGUMISC1,
AB8500_VAUDIOSUPPLY,
AB8500_REGUCTRL1VAMIC,
+ AB8500_VSMPS1REGU,
+ AB8500_VSMPS2REGU,
+ AB8500_VSMPS3REGU, /* NOTE! PRCMU register */
AB8500_VPLLVANAREGU,
AB8500_VREFDDR,
AB8500_EXTSUPPLYREGU,
AB8500_VAUX12REGU,
AB8500_VRF1VAUX3REGU,
+ AB8500_VSMPS1SEL1,
+ AB8500_VSMPS1SEL2,
+ AB8500_VSMPS1SEL3,
+ AB8500_VSMPS2SEL1,
+ AB8500_VSMPS2SEL2,
+ AB8500_VSMPS2SEL3,
+ AB8500_VSMPS3SEL1, /* NOTE! PRCMU register */
+ AB8500_VSMPS3SEL2, /* NOTE! PRCMU register */
AB8500_VAUX1SEL,
AB8500_VAUX2SEL,
AB8500_VRF1VAUX3SEL,
AB8500_REGUCTRL2SPARE,
AB8500_REGUCTRLDISCH,
AB8500_REGUCTRLDISCH2,
- AB8500_VSMPS1SEL1,
AB8500_NUM_REGULATOR_REGISTERS,
};
+/* AB8500 external regulators */
+enum ab8500_ext_regulator_id {
+ AB8500_EXT_SUPPLY3,
+ AB8500_NUM_EXT_REGULATORS,
+};
+
+struct ab8500_regulator_platform_data {
+ int num_reg_init;
+ struct ab8500_regulator_reg_init *reg_init;
+ int num_regulator;
+ struct regulator_init_data *regulator;
+ int num_ext_regulator;
+ struct regulator_init_data *ext_regulator;
+};
+
+#ifdef CONFIG_REGULATOR_AB8500_EXT
+__devinit int ab8500_ext_regulator_init(struct platform_device *pdev);
+__devexit int ab8500_ext_regulator_exit(struct platform_device *pdev);
+#else
+inline __devinit int ab8500_ext_regulator_init(struct platform_device *pdev) {}
+inline __devexit int ab8500_ext_regulator_exit(struct platform_device *pdev) {}
+#endif
+
#endif
diff --git a/include/linux/regulator/db5500-prcmu.h b/include/linux/regulator/db5500-prcmu.h
new file mode 100644
index 00000000000..fee68795867
--- /dev/null
+++ b/include/linux/regulator/db5500-prcmu.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ *
+ * Interface to power domain regulators on DB5500
+ */
+
+#ifndef __DB5500_REGULATOR_H__
+#define __DB5500_REGULATOR_H__
+
+#include <linux/regulator/dbx500-prcmu.h>
+
+/* Number of DB5500 regulators and regulator enumeration */
+enum db5500_regulator_id {
+ DB5500_REGULATOR_VAPE,
+ DB5500_REGULATOR_SWITCH_SGA,
+ DB5500_REGULATOR_SWITCH_HVA,
+ DB5500_REGULATOR_SWITCH_SIA,
+ DB5500_REGULATOR_SWITCH_DISP,
+ DB5500_REGULATOR_SWITCH_ESRAM12,
+ DB5500_NUM_REGULATORS
+};
+
+#endif
diff --git a/include/linux/regulator/dbx500-prcmu.h b/include/linux/regulator/dbx500-prcmu.h
new file mode 100644
index 00000000000..2ecb34c56aa
--- /dev/null
+++ b/include/linux/regulator/dbx500-prcmu.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) ST Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ */
+#ifndef __LINUX_REGULATOR_DBX500_H
+#define __LINUX_REGULATOR_DBX500_H
+
+struct ux500_regulator;
+
+#ifdef CONFIG_REGULATOR
+/*
+ * NOTE! The device will be connected to the correct regulator by this
+ * new framework. A list with connections will match up dev_name(dev)
+ * to the specific regulator. This follows the same principle as the
+ * normal regulator framework.
+ *
+ * This framework shall only be used in special cases when a regulator
+ * has to be enabled/disabled in atomic context.
+ */
+
+/**
+ * ux500_regulator_get()
+ *
+ * @dev: Drivers device struct
+ *
+ * Returns a ux500_regulator struct. Shall be used as argument for
+ * ux500_regulator_atomic_enable/disable calls.
+ * Return ERR_PTR(-EINVAL) upon no matching regulator found.
+ */
+struct ux500_regulator *__must_check ux500_regulator_get(struct device *dev);
+
+/**
+ * ux500_regulator_atomic_enable()
+ *
+ * @regulator: Regulator handle, provided from ux500_regulator_get.
+ *
+ * The enable/disable functions keep an internal counter, so every
+ * enable must be paired with an disable in order to turn off regulator.
+ */
+int ux500_regulator_atomic_enable(struct ux500_regulator *regulator);
+
+/**
+ * ux500_regulator_atomic_disable()
+ *
+ * @regulator: Regulator handle, provided from ux500_regulator_get.
+ *
+ */
+int ux500_regulator_atomic_disable(struct ux500_regulator *regulator);
+
+/**
+ * ux500_regulator_put()
+ *
+ * @regulator: Regulator handle, provided from ux500_regulator_get.
+ */
+void ux500_regulator_put(struct ux500_regulator *regulator);
+
+#else
+
+static inline struct ux500_regulator *__must_check
+ux500_regulator_get(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int
+ux500_regulator_atomic_enable(struct ux500_regulator *regulator)
+{
+ return -EINVAL;
+}
+
+static inline int
+ux500_regulator_atomic_disable(struct ux500_regulator *regulator)
+{
+ return -EINVAL;
+}
+
+static inline void ux500_regulator_put(struct ux500_regulator *regulator)
+{
+}
+#endif /* CONFIG_REGULATOR */
+
+#ifdef CONFIG_REGULATOR_DEBUG
+void ux500_regulator_suspend_debug(void);
+void ux500_regulator_resume_debug(void);
+#else
+static inline void ux500_regulator_suspend_debug(void) { }
+static inline void ux500_regulator_resume_debug(void) { }
+#endif
+
+#endif
diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h
new file mode 100644
index 00000000000..05e5529a6aa
--- /dev/null
+++ b/include/linux/sys_soc.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Maxime Coquelin <maxime.coquelin-nonst@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#ifndef __SYS_SOC_H
+#define __SYS_SOC_H
+
+#include <linux/kobject.h>
+
+/**
+ * struct sys_soc_info - SoC exports related informations
+ * @name: name of the export
+ * @info: pointer on the key to export
+ * @get_info: callback to retrieve key if info field is NULL
+ * @attr: export's sysdev class attribute
+ */
+struct sysfs_soc_info {
+ const char *info;
+ ssize_t (*get_info)(char *buf, struct sysfs_soc_info *);
+ struct kobj_attribute attr;
+};
+
+ssize_t show_soc_info(struct kobject *, struct kobj_attribute *, char *);
+
+#define SYSFS_SOC_ATTR_VALUE(_name, _value) { \
+ .attr.attr.name = _name, \
+ .attr.attr.mode = S_IRUGO, \
+ .attr.show = show_soc_info, \
+ .info = _value, \
+}
+
+#define SYSFS_SOC_ATTR_CALLBACK(_name, _callback) { \
+ .attr.attr.name = _name, \
+ .attr.attr.mode = S_IRUGO, \
+ .attr.show = show_soc_info, \
+ .get_info = _callback, \
+}
+
+/**
+ * register_sys_soc - register the soc information
+ * @name: name of the machine
+ * @info: pointer on the info table to export
+ * @num: number of info to export
+ *
+ * NOTE: This function must only be called once
+ */
+int register_sysfs_soc(struct sysfs_soc_info *info, size_t num);
+
+#endif /* __SYS_SOC_H */
diff --git a/include/linux/tee.h b/include/linux/tee.h
new file mode 100644
index 00000000000..8b71224ac77
--- /dev/null
+++ b/include/linux/tee.h
@@ -0,0 +1,312 @@
+/*
+ * Trusted Execution Environment (TEE) interface for TrustZone enabled ARM CPUs.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com>
+ * Author: Martin Hovang <martin.xm.hovang@stericsson.com>
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef TEE_H
+#define TEE_H
+
+/* tee_cmd id values */
+#define TEED_OPEN_SESSION 0x00000000U
+#define TEED_CLOSE_SESSION 0x00000001U
+#define TEED_INVOKE 0x00000002U
+
+/* tee_retval id values */
+#define TEED_SUCCESS 0x00000000U
+#define TEED_ERROR_GENERIC 0xFFFF0000U
+#define TEED_ERROR_ACCESS_DENIED 0xFFFF0001U
+#define TEED_ERROR_CANCEL 0xFFFF0002U
+#define TEED_ERROR_ACCESS_CONFLICT 0xFFFF0003U
+#define TEED_ERROR_EXCESS_DATA 0xFFFF0004U
+#define TEED_ERROR_BAD_FORMAT 0xFFFF0005U
+#define TEED_ERROR_BAD_PARAMETERS 0xFFFF0006U
+#define TEED_ERROR_BAD_STATE 0xFFFF0007U
+#define TEED_ERROR_ITEM_NOT_FOUND 0xFFFF0008U
+#define TEED_ERROR_NOT_IMPLEMENTED 0xFFFF0009U
+#define TEED_ERROR_NOT_SUPPORTED 0xFFFF000AU
+#define TEED_ERROR_NO_DATA 0xFFFF000BU
+#define TEED_ERROR_OUT_OF_MEMORY 0xFFFF000CU
+#define TEED_ERROR_BUSY 0xFFFF000DU
+#define TEED_ERROR_COMMUNICATION 0xFFFF000EU
+#define TEED_ERROR_SECURITY 0xFFFF000FU
+#define TEED_ERROR_SHORT_BUFFER 0xFFFF0010U
+
+/* TEE origin codes */
+#define TEED_ORIGIN_DRIVER 0x00000002U
+#define TEED_ORIGIN_TEE 0x00000003U
+#define TEED_ORIGIN_TEE_APPLICATION 0x00000004U
+
+#define TEE_UUID_CLOCK_SIZE 8
+
+#define TEEC_CONFIG_PAYLOAD_REF_COUNT 4
+
+/*
+ * Flag constants indicating which of the memory references in an open session
+ * or invoke command operation payload (TEEC_Operation) that are used.
+ */
+#define TEEC_MEMREF_0_USED 0x00000001
+#define TEEC_MEMREF_1_USED 0x00000002
+#define TEEC_MEMREF_2_USED 0x00000004
+#define TEEC_MEMREF_3_USED 0x00000008
+
+/*
+ * Flag constants indicating the data transfer direction of memory in
+ * TEEC_SharedMemory and TEEC_MemoryReference. TEEC_MEM_INPUT signifies data
+ * transfer direction from the client application to the TEE. TEEC_MEM_OUTPUT
+ * signifies data transfer direction from the TEE to the client application.
+ */
+#define TEEC_MEM_INPUT 0x00000001
+#define TEEC_MEM_OUTPUT 0x00000002
+
+/*
+ * Session login methods, for use in TEEC_OpenSession() as parameter
+ * connectionMethod. Type is t_uint32.
+ *
+ * TEEC_LOGIN_PUBLIC No login data is provided.
+ */
+#define TEEC_LOGIN_PUBLIC 0x0
+
+/*
+ * Exposed functions (command_id) in the static TA
+ */
+#define TEE_STA_GET_PRODUCT_CONFIG 10
+#define TEE_STA_SET_L2CC_PREFETCH_CTRL_REGISTER 11
+
+/* Flags indicating run-time environment */
+#define TEE_RT_FLAGS_NORMAL 0x00000000
+#define TEE_RT_FLAGS_MASK_ITP_PROD 0x00000001
+#define TEE_RT_FLAGS_MODEM_DEBUG 0x00000002
+#define TEE_RT_FLAGS_RNG_REG_PUBLIC 0x00000004
+#define TEE_RT_FLAGS_JTAG_ENABLED 0x00000008
+
+/*
+ * Product id numbers
+ */
+#define TEE_PRODUCT_ID_UNKNOWN 0
+#define TEE_PRODUCT_ID_8400 1
+#define TEE_PRODUCT_ID_8500 2
+#define TEE_PRODUCT_ID_9500 3
+#define TEE_PRODUCT_ID_5500 4
+#define TEE_PRODUCT_ID_7400 5
+#define TEE_PRODUCT_ID_8500C 6
+
+/* Flags indicating fuses */
+#define TEE_FUSE_FLAGS_MODEM_DISABLE 0x00000001
+
+/**
+ * struct tee_product_config - System configuration structure.
+ *
+ * @product_id: Product identification.
+ * @rt_flags: Runtime configuration flags.
+ * @fuse_flags: Fuse flags.
+ *
+ */
+struct tee_product_config {
+ uint32_t product_id;
+ uint32_t rt_flags;
+ uint32_t fuse_flags;
+};
+
+/**
+ * struct tee_uuid - Structure that represent an uuid.
+ * @timeLow: The low field of the time stamp.
+ * @timeMid: The middle field of the time stamp.
+ * @timeHiAndVersion: The high field of the timestamp multiplexed
+ * with the version number.
+ * @clockSeqAndNode: The clock sequence and the node.
+ *
+ * This structure have different naming (camel case) to comply with Global
+ * Platforms TEE Client API spec. This type is defined in RFC4122.
+ */
+struct tee_uuid {
+ uint32_t timeLow;
+ uint16_t timeMid;
+ uint16_t timeHiAndVersion;
+ uint8_t clockSeqAndNode[TEE_UUID_CLOCK_SIZE];
+};
+
+/**
+ * struct tee_sharedmemory - Shared memory block for TEE.
+ * @buffer: The in/out data to TEE.
+ * @size: The size of the data.
+ * @flags: Variable telling whether it is a in, out or in/out parameter.
+ */
+struct tee_sharedmemory {
+ void *buffer;
+ size_t size;
+ uint32_t flags;
+};
+
+/**
+ * struct tee_operation - Payload for sessions or invoke operation.
+ * @shm: Array containing the shared memory buffers.
+ * @flags: Tells which if memory buffers that are in use.
+ */
+struct tee_operation {
+ struct tee_sharedmemory shm[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+ uint32_t flags;
+};
+
+struct tee_context {};
+
+/**
+ * struct tee_session - The session of an open tee device.
+ * @state: The current state in the linux kernel.
+ * @err: Error code (as in Global Platform TEE Client API spec)
+ * @origin: Origin for the error code (also from spec).
+ * @id: Implementation defined type, 0 if not used.
+ * @ta: The trusted application.
+ * @uuid: The uuid for the trusted application.
+ * @cmd: The command to be executed in the trusted application.
+ * @driver_cmd: The command type in the driver. This is used from a client (user
+ * space to tell the Linux kernel whether it's a open-,
+ * close-session or if it is an invoke command.
+ * @ta_size: The size of the trusted application.
+ * @op: The payload for the trusted application.
+ * @sync: Mutex to handle multiple use of clients.
+ *
+ * This structure is mainly used in the Linux kernel as a session context for
+ * ongoing operations. Other than that it is also used in the communication with
+ * the user space.
+ */
+struct tee_session {
+ uint32_t state;
+ uint32_t err;
+ uint32_t origin;
+ uint32_t id;
+ void *ta;
+ struct tee_uuid *uuid;
+ unsigned int cmd;
+ unsigned int driver_cmd;
+ unsigned int ta_size;
+ struct tee_operation *op;
+ struct mutex *sync;
+};
+
+/**
+ * struct tee_read - Contains the error message and the origin.
+ * @err: Error code (as in Global Platform TEE Client API spec)
+ * @origin: Origin for the error code (also from spec).
+ *
+ * This is used by user space when a user space application wants to get more
+ * information about an error.
+ */
+struct tee_read {
+ unsigned int err; /* return value */
+ unsigned int origin; /* error origin */
+};
+
+/**
+ * Function that handles the function calls to trusted applications.
+ * @param ts: The session of a operation to be executed.
+ * @param sec_cmd: The type of command to be executed, open-, close-session,
+ * invoke command.
+ */
+int call_sec_world(struct tee_session *ts, int sec_cmd);
+
+
+/**
+ * teec_initialize_context() - Initializes a context holding connection
+ * information on the specific TEE.
+ * @param name: A zero-terminated string identifying the TEE to connect to.
+ * If name is set to NULL, the default TEE is connected to.
+ * NULL is the only supported value in this version of the
+ * API implementation.
+ * @param context: The context structure which is to be initialized.
+ *
+ * Initializes a context holding connection information between the calling
+ * client application and the TEE designated by the name string.
+ */
+int teec_initialize_context(const char *name, struct tee_context *context);
+
+/**
+ * teec_finalize_context() - Destroys a context holding connection information
+ * on the specific TEE.
+ * @param context: The context to be destroyed.
+ *
+ * This function destroys an initialized TEE context, closing the connection
+ * between the client application and the TEE. This function must only be
+ * called when all sessions related to this TEE context have been closed and
+ * all shared memory blocks have been released.
+ */
+int teec_finalize_context(struct tee_context *context);
+
+/**
+ * teec_open_session() - Opens a new session with the specified trusted
+ * application.
+ * @param context: The initialized TEE context structure in which scope to
+ * open the session.
+ * @param session: The session to initialize.
+ * @param destination: A structure identifying the trusted application with
+ * which to open a session. If this is set to NULL the
+ * operation TEEC_MEMREF_0 is expected to contain the blob
+ * which holds the Trusted Application.
+ * @param connection_method: The connection method to use.
+ * @param connection_data: Any data necessary to connect with the chosen
+ * connection method. Not supported should be set to
+ * NULL.
+ * @param operation: An operation structure to use in the session. May be
+ * set to NULL to signify no operation structure needed.
+ * If destination is set to NULL, TEEC_MEMREF_0 is
+ * expected to hold the TA binary as described above.
+ * @param error_origin: A parameter which will hold the error origin if this
+ * function returns any value other than TEEC_SUCCESS.
+ *
+ * Opens a new session with the specified trusted application. Only
+ * connectionMethod == TEEC_LOGIN_PUBLIC is supported. connectionData and
+ * operation shall be set to NULL.
+ */
+int teec_open_session(struct tee_context *context, struct tee_session *session,
+ const struct tee_uuid *destination,
+ unsigned int connection_method,
+ void *connection_data, struct tee_operation *operation,
+ unsigned int *error_origin);
+
+/**
+ * teec_close_session() - Closes the session which has been opened with the
+ * specific trusted application.
+ * @param session: The opened session to close.
+ *
+ * Closes the session which has been opened with the specific trusted
+ * application.
+ */
+int teec_close_session(struct tee_session *session);
+
+/**
+ * teec_invoke_command() - Executes a command in the specified trusted
+ * application.
+ * @param destination: A structure identifying the trusted application.
+ * @param command_id: Identifier of the command in the trusted application to
+ * invoke.
+ * @param operation: An operation structure to use in the invoke command. May
+ * be set to NULL to signify no operation structure needed.
+ * @param error_origin: A parameter which will hold the error origin if this
+ * function returns any value other than TEEC_SUCCESS.
+ *
+ * Executes a command in the specified trusted application.
+ */
+int teec_invoke_command(struct tee_session *session, unsigned int command_id,
+ struct tee_operation *operation,
+ unsigned int *error_origin);
+
+/**
+ * teec_allocate_shared_memory() - Allocate shared memory for TEE.
+ * @param context: The initialized TEE context structure in which scope to
+ * open the session.
+ * @param shared_memory: Pointer to the allocated shared memory.
+ */
+int teec_allocate_shared_memory(struct tee_context *context,
+ struct tee_sharedmemory *shared_memory);
+
+/**
+ * teec_release_shared_memory() - Free the shared memory.
+ * @param shared_memory: Pointer to the shared memory to be freed.
+ */
+void teec_release_shared_memory(struct tee_sharedmemory *shared_memory);
+
+#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index d3d0c137433..cd39382e2f3 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -372,7 +372,15 @@ struct usb_bus {
* limit. Because the arrays need to add a bit for hub status data, we
* do 31, so plus one evens out to four bytes.
*/
+
+#ifdef CONFIG_ARCH_U8500
+/**
+* On U8500 platform we support 16 ports only
+*/
+#define USB_MAXCHILDREN (16)
+#else
#define USB_MAXCHILDREN (31)
+#endif
struct usb_tt;
diff --git a/include/trace/Kbuild b/include/trace/Kbuild
new file mode 100644
index 00000000000..7e8b704d610
--- /dev/null
+++ b/include/trace/Kbuild
@@ -0,0 +1 @@
+header-y += stm.h
diff --git a/include/trace/stm.h b/include/trace/stm.h
new file mode 100644
index 00000000000..72e9136c25e
--- /dev/null
+++ b/include/trace/stm.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson STM Trace driver
+ *
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * Philippe Langlais <philippe.langlais@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef STM_H
+#define STM_H
+
+#define STM_DEV_NAME "stm"
+
+/* One single channel mapping */
+struct stm_channel {
+ union {
+ __u8 no_stamp8;
+ __u16 no_stamp16;
+ __u32 no_stamp32;
+ __u64 no_stamp64;
+ };
+ union {
+ __u8 stamp8;
+ __u16 stamp16;
+ __u32 stamp32;
+ __u64 stamp64;
+ };
+};
+
+/* Possible trace modes */
+#define STM_SW_LOSSLESS 0 /* Software mode: lossless data but intrusive */
+#define STM_HW_LOSSY 1 /* Hardware mode: lossy data but less intrusive */
+
+/* Possible clock setting */
+enum clock_div {
+ STM_CLOCK_DIV2 = 0,
+ STM_CLOCK_DIV4,
+ STM_CLOCK_DIV6,
+ STM_CLOCK_DIV8,
+ STM_CLOCK_DIV10,
+ STM_CLOCK_DIV12,
+ STM_CLOCK_DIV14,
+ STM_CLOCK_DIV16,
+};
+
+/* ioctl commands */
+#define STM_CONNECTION _IOW('t', 0, enum stm_connection_type)
+#define STM_DISABLE _IO('t', 1)
+#define STM_GET_NB_MAX_CHANNELS _IOR('t', 2, int)
+#define STM_GET_NB_FREE_CHANNELS _IOR('t', 3, int)
+#define STM_GET_CHANNEL_NO _IOR('t', 4, int)
+#define STM_SET_CLOCK_DIV _IOW('t', 5, enum clock_div)
+#define STM_GET_CTRL_REG _IOR('t', 6, int)
+#define STM_ENABLE_SRC _IOWR('t', 7, int)
+#define STM_GET_FREE_CHANNEL _IOW('t', 8, int)
+#define STM_RELEASE_CHANNEL _IOW('t', 9, int)
+#define STM_SET_MODE _IOWR('t', 10, int)
+#define STM_GET_MODE _IOR('t', 11, int)
+
+enum stm_connection_type {
+ STM_DISCONNECT = 0,
+ STM_DEFAULT_CONNECTION = 1,
+ STM_STE_MODEM_ON_MIPI34_NONE_ON_MIPI60 = 2,
+ STM_STE_APE_ON_MIPI34_NONE_ON_MIPI60 = 3,
+ STM_STE_MODEM_ON_MIPI34_APE_ON_MIPI60 = 4
+};
+
+#ifdef __KERNEL__
+
+struct stm_platform_data {
+ u32 regs_phys_base;
+ u32 channels_phys_base;
+ u32 id_mask;
+ u32 masters_enabled;
+ const s16 *channels_reserved;
+ int channels_reserved_sz;
+ int (*stm_connection)(enum stm_connection_type);
+};
+
+/* Channels base address */
+extern volatile struct stm_channel __iomem *stm_channels;
+
+/* Provides stm_trace_XX() and stm_tracet_XX() trace API */
+#define DEFLLTFUN(size) \
+static inline void stm_trace_##size(int channel, __u##size data) \
+{ \
+ stm_channels[channel].no_stamp##size = data; \
+} \
+static inline void stm_tracet_##size(int channel, __u##size data) \
+{ \
+ stm_channels[channel].stamp##size = data; \
+} \
+
+DEFLLTFUN(8);
+DEFLLTFUN(16);
+DEFLLTFUN(32);
+DEFLLTFUN(64);
+
+/*
+ * Trace a buffer on a given channel
+ * with auto time stamping on the last byte(s) only
+ */
+int stm_trace_buffer_onchannel(int channel, const void *data, size_t length);
+/*
+ * Trace a buffer on a dynamically allocated channel
+ * with auto time stamping on the last byte(s) only
+ * Dynamic channel are allocated in the 128 highest channels
+ */
+int stm_trace_buffer(const void *data, size_t length);
+
+/* printk equivalent for STM */
+int stm_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
+
+#if defined(CONFIG_STM_PRINTK)
+#define stm_dup_printk(buf, length) \
+ stm_trace_buffer_onchannel(CONFIG_STM_PRINTK_CHANNEL, buf, length)
+
+#else
+static inline int stm_dup_printk(char *buf, size_t size)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_STM_TRACE_PRINTK)
+static inline int stm_trace_printk_buf(
+ unsigned long ip, const char *buf, size_t size)
+{
+ stm_trace_32(CONFIG_STM_TRACE_PRINTK_CHANNEL, ip);
+ return stm_trace_buffer_onchannel(CONFIG_STM_TRACE_PRINTK_CHANNEL,
+ buf, size);
+}
+
+static inline int stm_trace_bprintk_buf(
+ unsigned long ip, const char *fmt, const void *buf, size_t size)
+{
+ stm_trace_64(CONFIG_STM_TRACE_BPRINTK_CHANNEL, ((u64)ip<<32)+(u32)fmt);
+ return stm_trace_buffer_onchannel(CONFIG_STM_TRACE_PRINTK_CHANNEL,
+ buf, size);
+}
+#else
+static inline int stm_trace_printk_buf(
+ unsigned long ip, const char *buf, size_t size)
+{
+ return 0;
+}
+
+static inline int stm_trace_bprintk_buf(
+ unsigned long ip, const char *fmt, const void *buf, size_t size)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_STM_FTRACE)
+static inline void stm_ftrace(unsigned long ip, unsigned long parent_ip)
+{
+ stm_tracet_64(CONFIG_STM_FTRACE_CHANNEL, (((__u64)ip)<<32) + parent_ip);
+}
+#else
+static inline void stm_ftrace(unsigned long ip, unsigned long parent_ip)
+{
+}
+#endif
+
+#if defined(CONFIG_STM_CTX_SWITCH)
+static inline void stm_sched_switch(u32 prev_pid, u8 prev_prio, u8 prev_state,
+ u32 next_pid, u8 next_prio, u8 next_state, u32 next_cpu)
+{
+ stm_trace_64(CONFIG_STM_CTX_SWITCH_CHANNEL,
+ (((__u64)prev_pid)<<32) + next_pid);
+ stm_tracet_64(CONFIG_STM_CTX_SWITCH_CHANNEL, (((__u64)next_cpu)<<32)
+ + (prev_prio<<24) + (prev_state<<16)
+ + (next_prio<<8) + next_state);
+}
+#else
+static inline void stm_sched_switch(u32 prev_pid, u8 prev_prio, u8 prev_state,
+ u32 next_pid, u8 next_prio, u8 next_state, u32 next_cpu)
+{
+}
+#endif
+
+#if defined(CONFIG_STM_WAKEUP)
+static inline void stm_sched_wakeup(u32 prev_pid, u8 prev_prio, u8 prev_state,
+ u32 next_pid, u8 next_prio, u8 next_state, u32 next_cpu)
+{
+ stm_trace_64(CONFIG_STM_WAKEUP_CHANNEL,
+ (((__u64)prev_pid)<<32) + next_pid);
+ stm_tracet_64(CONFIG_STM_WAKEUP_CHANNEL, (((__u64)next_cpu)<<32)
+ + (prev_prio<<24) + (prev_state<<16)
+ + (next_prio<<8) + next_state);
+}
+#else
+static inline void stm_sched_wakeup(u32 prev_pid, u8 prev_prio, u8 prev_state,
+ u32 next_pid, u8 next_prio, u8 next_state, u32 next_cpu)
+{
+}
+#endif
+
+#if defined(CONFIG_STM_STACK_TRACE)
+static inline void stm_stack_trace(unsigned long *callers)
+{
+ while (*(callers + 1) != ULONG_MAX) {
+ stm_trace_32(CONFIG_STM_STACK_TRACE_CHANNEL, *callers++);
+ }
+ /* Time stamp the latest */
+ stm_tracet_32(CONFIG_STM_STACK_TRACE_CHANNEL, *callers);
+}
+#else
+static inline void stm_stack_trace(unsigned long *callers)
+{
+}
+#endif
+
+/* Alloc/Free STM channel */
+int stm_alloc_channel(int offset);
+void stm_free_channel(int channel);
+
+#endif /* __KERNEL__ */
+
+#endif /* STM_H */
diff --git a/include/video/av8100.h b/include/video/av8100.h
new file mode 100644
index 00000000000..23e96a0b871
--- /dev/null
+++ b/include/video/av8100.h
@@ -0,0 +1,549 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * AV8100 driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __AV8100__H__
+#define __AV8100__H__
+
+#define AV8100_CEC_MESSAGE_SIZE 16
+#define AV8100_HDCP_SEND_KEY_SIZE 16
+#define AV8100_INFOFRAME_SIZE 28
+#define AV8100_FUSE_KEY_SIZE 16
+#define AV8100_CHIPVER_1 1
+#define AV8100_CHIPVER_2 2
+
+struct av8100_platform_data {
+ unsigned gpio_base;
+ int irq;
+ int reset;
+ const char *inputclk_id;
+ const char *regulator_pwr_id;
+ bool alt_powerupseq;
+ unsigned char mclk_freq;
+};
+
+enum av8100_command_type {
+ AV8100_COMMAND_VIDEO_INPUT_FORMAT = 0x1,
+ AV8100_COMMAND_AUDIO_INPUT_FORMAT,
+ AV8100_COMMAND_VIDEO_OUTPUT_FORMAT,
+ AV8100_COMMAND_VIDEO_SCALING_FORMAT,
+ AV8100_COMMAND_COLORSPACECONVERSION,
+ AV8100_COMMAND_CEC_MESSAGE_WRITE,
+ AV8100_COMMAND_CEC_MESSAGE_READ_BACK,
+ AV8100_COMMAND_DENC,
+ AV8100_COMMAND_HDMI,
+ AV8100_COMMAND_HDCP_SENDKEY,
+ AV8100_COMMAND_HDCP_MANAGEMENT,
+ AV8100_COMMAND_INFOFRAMES,
+ AV8100_COMMAND_EDID_SECTION_READBACK,
+ AV8100_COMMAND_PATTERNGENERATOR,
+ AV8100_COMMAND_FUSE_AES_KEY,
+};
+
+enum interface_type {
+ I2C_INTERFACE = 0x0,
+ DSI_INTERFACE = 0x1,
+};
+
+enum av8100_dsi_mode {
+ AV8100_HDMI_DSI_OFF,
+ AV8100_HDMI_DSI_COMMAND_MODE,
+ AV8100_HDMI_DSI_VIDEO_MODE
+};
+
+enum av8100_pixel_format {
+ AV8100_INPUT_PIX_RGB565,
+ AV8100_INPUT_PIX_RGB666,
+ AV8100_INPUT_PIX_RGB666P,
+ AV8100_INPUT_PIX_RGB888,
+ AV8100_INPUT_PIX_YCBCR422
+};
+
+enum av8100_video_mode {
+ AV8100_VIDEO_INTERLACE,
+ AV8100_VIDEO_PROGRESSIVE
+};
+
+enum av8100_dsi_nb_data_lane {
+ AV8100_DATA_LANES_USED_0,
+ AV8100_DATA_LANES_USED_1,
+ AV8100_DATA_LANES_USED_2,
+ AV8100_DATA_LANES_USED_3,
+ AV8100_DATA_LANES_USED_4
+};
+
+enum av8100_te_config {
+ AV8100_TE_OFF, /* NO TE*/
+ AV8100_TE_DSI_LANE, /* TE generated on DSI lane */
+ AV8100_TE_IT_LINE, /* TE generated on IT line (GPIO) */
+ AV8100_TE_DSI_IT, /* TE generatedon both DSI lane & IT line*/
+ AV8100_TE_GPIO_IT /* TE on GPIO I2S DAT3 & or IT line*/
+};
+
+enum av8100_audio_if_format {
+ AV8100_AUDIO_I2S_MODE,
+ AV8100_AUDIO_I2SDELAYED_MODE, /* I2S Mode by default*/
+ AV8100_AUDIO_TDM_MODE /* 8 Channels by default*/
+};
+
+enum av8100_sample_freq {
+ AV8100_AUDIO_FREQ_32KHZ,
+ AV8100_AUDIO_FREQ_44_1KHZ,
+ AV8100_AUDIO_FREQ_48KHZ,
+ AV8100_AUDIO_FREQ_64KHZ,
+ AV8100_AUDIO_FREQ_88_2KHZ,
+ AV8100_AUDIO_FREQ_96KHZ,
+ AV8100_AUDIO_FREQ_128KHZ,
+ AV8100_AUDIO_FREQ_176_1KHZ,
+ AV8100_AUDIO_FREQ_192KHZ
+};
+
+enum av8100_audio_word_length {
+ AV8100_AUDIO_16BITS,
+ AV8100_AUDIO_20BITS,
+ AV8100_AUDIO_24BITS
+};
+
+enum av8100_audio_format {
+ AV8100_AUDIO_LPCM_MODE,
+ AV8100_AUDIO_COMPRESS_MODE
+};
+
+enum av8100_audio_if_mode {
+ AV8100_AUDIO_SLAVE,
+ AV8100_AUDIO_MASTER
+};
+
+enum av8100_audio_mute {
+ AV8100_AUDIO_MUTE_DISABLE,
+ AV8100_AUDIO_MUTE_ENABLE
+};
+
+enum av8100_output_CEA_VESA {
+ AV8100_CUSTOM,
+ AV8100_CEA1_640X480P_59_94HZ,
+ AV8100_CEA2_3_720X480P_59_94HZ,
+ AV8100_CEA4_1280X720P_60HZ,
+ AV8100_CEA5_1920X1080I_60HZ,
+ AV8100_CEA6_7_NTSC_60HZ,
+ AV8100_CEA14_15_480p_60HZ,
+ AV8100_CEA16_1920X1080P_60HZ,
+ AV8100_CEA17_18_720X576P_50HZ,
+ AV8100_CEA19_1280X720P_50HZ,
+ AV8100_CEA20_1920X1080I_50HZ,
+ AV8100_CEA21_22_576I_PAL_50HZ,
+ AV8100_CEA29_30_576P_50HZ,
+ AV8100_CEA31_1920x1080P_50Hz,
+ AV8100_CEA32_1920X1080P_24HZ,
+ AV8100_CEA33_1920X1080P_25HZ,
+ AV8100_CEA34_1920X1080P_30HZ,
+ AV8100_CEA60_1280X720P_24HZ,
+ AV8100_CEA61_1280X720P_25HZ,
+ AV8100_CEA62_1280X720P_30HZ,
+ AV8100_VESA9_800X600P_60_32HZ,
+ AV8100_VESA14_848X480P_60HZ,
+ AV8100_VESA16_1024X768P_60HZ,
+ AV8100_VESA22_1280X768P_59_99HZ,
+ AV8100_VESA23_1280X768P_59_87HZ,
+ AV8100_VESA27_1280X800P_59_91HZ,
+ AV8100_VESA28_1280X800P_59_81HZ,
+ AV8100_VESA39_1360X768P_60_02HZ,
+ AV8100_VESA81_1366X768P_59_79HZ,
+ AV8100_VIDEO_OUTPUT_CEA_VESA_MAX
+};
+
+enum av8100_video_sync_pol {
+ AV8100_SYNC_POSITIVE,
+ AV8100_SYNC_NEGATIVE
+};
+
+enum av8100_hdmi_mode {
+ AV8100_HDMI_OFF,
+ AV8100_HDMI_ON,
+ AV8100_HDMI_AVMUTE
+};
+
+enum av8100_hdmi_format {
+ AV8100_HDMI,
+ AV8100_DVI
+};
+
+enum av8100_DVI_format {
+ AV8100_DVI_CTRL_CTL0,
+ AV8100_DVI_CTRL_CTL1,
+ AV8100_DVI_CTRL_CTL2
+};
+
+enum av8100_pattern_type {
+ AV8100_PATTERN_OFF,
+ AV8100_PATTERN_GENERATOR,
+ AV8100_PRODUCTION_TESTING
+};
+
+enum av8100_pattern_format {
+ AV8100_NO_PATTERN,
+ AV8100_PATTERN_VGA,
+ AV8100_PATTERN_720P,
+ AV8100_PATTERN_1080P
+};
+
+enum av8100_pattern_audio {
+ AV8100_PATTERN_AUDIO_OFF,
+ AV8100_PATTERN_AUDIO_ON,
+ AV8100_PATTERN_AUDIO_I2S_MEM
+};
+
+struct av8100_video_input_format_cmd {
+ enum av8100_dsi_mode dsi_input_mode;
+ enum av8100_pixel_format input_pixel_format;
+ unsigned short total_horizontal_pixel;
+ unsigned short total_horizontal_active_pixel;
+ unsigned short total_vertical_lines;
+ unsigned short total_vertical_active_lines;
+ enum av8100_video_mode video_mode;
+ enum av8100_dsi_nb_data_lane nb_data_lane;
+ unsigned char nb_virtual_ch_command_mode;
+ unsigned char nb_virtual_ch_video_mode;
+ unsigned short TE_line_nb;
+ enum av8100_te_config TE_config;
+ unsigned long master_clock_freq;
+ unsigned char ui_x4;
+};
+
+struct av8100_audio_input_format_cmd {
+ enum av8100_audio_if_format audio_input_if_format;
+ unsigned char i2s_input_nb;
+ enum av8100_sample_freq sample_audio_freq;
+ enum av8100_audio_word_length audio_word_lg;
+ enum av8100_audio_format audio_format;
+ enum av8100_audio_if_mode audio_if_mode;
+ enum av8100_audio_mute audio_mute;
+};
+
+struct av8100_video_output_format_cmd {
+ enum av8100_output_CEA_VESA video_output_cea_vesa;
+ enum av8100_video_sync_pol vsync_polarity;
+ enum av8100_video_sync_pol hsync_polarity;
+ unsigned short total_horizontal_pixel;
+ unsigned short total_horizontal_active_pixel;
+ unsigned short total_vertical_in_half_lines;
+ unsigned short total_vertical_active_in_half_lines;
+ unsigned short hsync_start_in_pixel;
+ unsigned short hsync_length_in_pixel;
+ unsigned short vsync_start_in_half_line;
+ unsigned short vsync_length_in_half_line;
+ unsigned short hor_video_start_pixel;
+ unsigned short vert_video_start_pixel;
+ enum av8100_video_mode video_type;
+ unsigned short pixel_repeat;
+ unsigned long pixel_clock_freq_Hz;
+};
+
+struct av8100_video_scaling_format_cmd {
+ unsigned short h_start_in_pixel;
+ unsigned short h_stop_in_pixel;
+ unsigned short v_start_in_line;
+ unsigned short v_stop_in_line;
+ unsigned short h_start_out_pixel;
+ unsigned short h_stop_out_pixel;
+ unsigned short v_start_out_line;
+ unsigned short v_stop_out_line;
+};
+
+enum av8100_color_transform {
+ AV8100_COLOR_TRANSFORM_INDENTITY,
+ AV8100_COLOR_TRANSFORM_INDENTITY_CLAMP_YUV,
+ AV8100_COLOR_TRANSFORM_YUV_TO_RGB,
+ AV8100_COLOR_TRANSFORM_YUV_TO_DENC,
+ AV8100_COLOR_TRANSFORM_RGB_TO_DENC,
+};
+
+struct av8100_cec_message_write_format_cmd {
+ unsigned char buffer_length;
+ unsigned char buffer[AV8100_CEC_MESSAGE_SIZE];
+};
+
+struct av8100_cec_message_read_back_format_cmd {
+};
+
+enum av8100_cvbs_video_format {
+ AV8100_CVBS_625,
+ AV8100_CVBS_525,
+};
+
+enum av8100_standard_selection {
+ AV8100_PAL_BDGHI,
+ AV8100_PAL_N,
+ AV8100_NTSC_M,
+ AV8100_PAL_M
+};
+
+struct av8100_denc_format_cmd {
+ enum av8100_cvbs_video_format cvbs_video_format;
+ enum av8100_standard_selection standard_selection;
+ unsigned char enable;
+ unsigned char macrovision_enable;
+ unsigned char internal_generator;
+};
+
+struct av8100_hdmi_cmd {
+ enum av8100_hdmi_mode hdmi_mode;
+ enum av8100_hdmi_format hdmi_format;
+ enum av8100_DVI_format dvi_format; /* used only if HDMI_format = DVI*/
+};
+
+struct av8100_hdcp_send_key_format_cmd {
+ unsigned char key_number;
+ unsigned char data_len;
+ unsigned char data[AV8100_HDCP_SEND_KEY_SIZE];
+};
+
+enum av8100_hdcp_auth_req_type {
+ AV8100_HDCP_AUTH_REQ_OFF = 0,
+ AV8100_HDCP_AUTH_REQ_ON = 1,
+ AV8100_HDCP_REV_LIST_REQ = 2,
+ AV8100_HDCP_AUTH_CONT = 3,
+};
+
+enum av8100_hdcp_encr_use {
+ AV8100_HDCP_ENCR_USE_OESS = 0,
+ AV8100_HDCP_ENCR_USE_EESS = 1,
+};
+
+struct av8100_hdcp_management_format_cmd {
+ unsigned char req_type;
+ unsigned char encr_use;
+};
+
+struct av8100_infoframes_format_cmd {
+ unsigned char type;
+ unsigned char version;
+ unsigned char length;
+ unsigned char crc;
+ unsigned char data[AV8100_INFOFRAME_SIZE];
+};
+
+struct av8100_edid_section_readback_format_cmd {
+ unsigned char address;
+ unsigned char block_number;
+};
+
+struct av8100_pattern_generator_format_cmd {
+ enum av8100_pattern_type pattern_type;
+ enum av8100_pattern_format pattern_video_format;
+ enum av8100_pattern_audio pattern_audio_mode;
+};
+
+enum av8100_fuse_operation {
+ AV8100_FUSE_READ = 0,
+ AV8100_FUSE_WRITE = 1,
+};
+
+struct av8100_fuse_aes_key_format_cmd {
+ unsigned char fuse_operation;
+ unsigned char key[AV8100_FUSE_KEY_SIZE];
+};
+
+union av8100_configuration {
+ struct av8100_video_input_format_cmd video_input_format;
+ struct av8100_audio_input_format_cmd audio_input_format;
+ struct av8100_video_output_format_cmd video_output_format;
+ struct av8100_video_scaling_format_cmd video_scaling_format;
+ enum av8100_color_transform color_transform;
+ struct av8100_cec_message_write_format_cmd
+ cec_message_write_format;
+ struct av8100_cec_message_read_back_format_cmd
+ cec_message_read_back_format;
+ struct av8100_denc_format_cmd denc_format;
+ struct av8100_hdmi_cmd hdmi_format;
+ struct av8100_hdcp_send_key_format_cmd hdcp_send_key_format;
+ struct av8100_hdcp_management_format_cmd hdcp_management_format;
+ struct av8100_infoframes_format_cmd infoframes_format;
+ struct av8100_edid_section_readback_format_cmd
+ edid_section_readback_format;
+ struct av8100_pattern_generator_format_cmd pattern_generator_format;
+ struct av8100_fuse_aes_key_format_cmd fuse_aes_key_format;
+};
+
+enum av8100_operating_mode {
+ AV8100_OPMODE_UNDEFINED = 0,
+ AV8100_OPMODE_SHUTDOWN,
+ AV8100_OPMODE_STANDBY,
+ AV8100_OPMODE_SCAN,
+ AV8100_OPMODE_INIT,
+ AV8100_OPMODE_IDLE,
+ AV8100_OPMODE_VIDEO,
+};
+
+enum av8100_plugin_status {
+ AV8100_PLUGIN_NONE = 0x0,
+ AV8100_HDMI_PLUGIN = 0x1,
+ AV8100_CVBS_PLUGIN = 0x2,
+};
+
+enum av8100_hdmi_event {
+ AV8100_HDMI_EVENT_NONE = 0x0,
+ AV8100_HDMI_EVENT_HDMI_PLUGIN = 0x1,
+ AV8100_HDMI_EVENT_HDMI_PLUGOUT = 0x2,
+ AV8100_HDMI_EVENT_CEC = 0x4,
+ AV8100_HDMI_EVENT_HDCP = 0x8,
+ AV8100_HDMI_EVENT_CECTXERR = 0x10,
+ AV8100_HDMI_EVENT_CECTX = 0x20, /* Transm no error */
+};
+
+struct av8100_status {
+ enum av8100_operating_mode av8100_state;
+ enum av8100_plugin_status av8100_plugin_status;
+ int hdmi_on;
+};
+
+
+int av8100_init(void);
+void av8100_exit(void);
+int av8100_powerscan(void);
+int av8100_powerup(void);
+int av8100_powerdown(void);
+int av8100_disable_interrupt(void);
+int av8100_enable_interrupt(void);
+int av8100_download_firmware(enum interface_type if_type);
+int av8100_reg_stby_w(
+ unsigned char cpd,
+ unsigned char stby,
+ unsigned char mclkrng);
+int av8100_reg_hdmi_5_volt_time_w(
+ unsigned char denc_off_time,
+ unsigned char hdmi_off_time,
+ unsigned char on_time);
+int av8100_reg_stby_int_mask_w(
+ unsigned char hpdm,
+ unsigned char cpdm,
+ unsigned char stbygpiocfg,
+ unsigned char ipol);
+int av8100_reg_stby_pend_int_w(
+ unsigned char hpdi,
+ unsigned char cpdi,
+ unsigned char oni,
+ unsigned char bpdig);
+int av8100_reg_gen_int_mask_w(
+ unsigned char eocm,
+ unsigned char vsim,
+ unsigned char vsom,
+ unsigned char cecm,
+ unsigned char hdcpm,
+ unsigned char uovbm,
+ unsigned char tem);
+int av8100_reg_gen_int_w(
+ unsigned char eoci,
+ unsigned char vsii,
+ unsigned char vsoi,
+ unsigned char ceci,
+ unsigned char hdcpi,
+ unsigned char uovbi);
+int av8100_reg_gpio_conf_w(
+ unsigned char dat3dir,
+ unsigned char dat3val,
+ unsigned char dat2dir,
+ unsigned char dat2val,
+ unsigned char dat1dir,
+ unsigned char dat1val,
+ unsigned char ucdbg);
+int av8100_reg_gen_ctrl_w(
+ unsigned char fdl,
+ unsigned char hld,
+ unsigned char wa,
+ unsigned char ra);
+int av8100_reg_fw_dl_entry_w(
+ unsigned char mbyte_code_entry);
+int av8100_reg_w(
+ unsigned char offset,
+ unsigned char value);
+int av8100_reg_stby_r(
+ unsigned char *cpd,
+ unsigned char *stby,
+ unsigned char *hpds,
+ unsigned char *cpds,
+ unsigned char *mclkrng);
+int av8100_reg_hdmi_5_volt_time_r(
+ unsigned char *denc_off_time,
+ unsigned char *hdmi_off_time,
+ unsigned char *on_time);
+int av8100_reg_stby_int_mask_r(
+ unsigned char *hpdm,
+ unsigned char *cpdm,
+ unsigned char *stbygpiocfg,
+ unsigned char *ipol);
+int av8100_reg_stby_pend_int_r(
+ unsigned char *hpdi,
+ unsigned char *cpdi,
+ unsigned char *oni,
+ unsigned char *sid);
+int av8100_reg_gen_int_mask_r(
+ unsigned char *eocm,
+ unsigned char *vsim,
+ unsigned char *vsom,
+ unsigned char *cecm,
+ unsigned char *hdcpm,
+ unsigned char *uovbm,
+ unsigned char *tem);
+int av8100_reg_gen_int_r(
+ unsigned char *eoci,
+ unsigned char *vsii,
+ unsigned char *vsoi,
+ unsigned char *ceci,
+ unsigned char *hdcpi,
+ unsigned char *uovbi,
+ unsigned char *tei);
+int av8100_reg_gen_status_r(
+ unsigned char *cectxerr,
+ unsigned char *cecrec,
+ unsigned char *cectrx,
+ unsigned char *uc,
+ unsigned char *onuvb,
+ unsigned char *hdcps);
+int av8100_reg_gpio_conf_r(
+ unsigned char *dat3dir,
+ unsigned char *dat3val,
+ unsigned char *dat2dir,
+ unsigned char *dat2val,
+ unsigned char *dat1dir,
+ unsigned char *dat1val,
+ unsigned char *ucdbg);
+int av8100_reg_gen_ctrl_r(
+ unsigned char *fdl,
+ unsigned char *hld,
+ unsigned char *wa,
+ unsigned char *ra);
+int av8100_reg_fw_dl_entry_r(
+ unsigned char *mbyte_code_entry);
+int av8100_reg_r(
+ unsigned char offset,
+ unsigned char *value);
+int av8100_conf_get(enum av8100_command_type command_type,
+ union av8100_configuration *config);
+int av8100_conf_prep(enum av8100_command_type command_type,
+ union av8100_configuration *config);
+int av8100_conf_w(enum av8100_command_type command_type,
+ unsigned char *return_buffer_length,
+ unsigned char *return_buffer, enum interface_type if_type);
+int av8100_conf_w_raw(enum av8100_command_type command_type,
+ unsigned char buffer_length,
+ unsigned char *buffer,
+ unsigned char *return_buffer_length,
+ unsigned char *return_buffer);
+struct av8100_status av8100_status_get(void);
+enum av8100_output_CEA_VESA av8100_video_output_format_get(int xres,
+ int yres,
+ int htot,
+ int vtot,
+ int pixelclk,
+ bool interlaced);
+void av8100_hdmi_event_cb_set(void (*event_callback)(enum av8100_hdmi_event));
+u8 av8100_ver_get(void);
+
+#endif /* __AV8100__H__ */
diff --git a/include/video/b2r2_blt.h b/include/video/b2r2_blt.h
new file mode 100644
index 00000000000..a992155f0da
--- /dev/null
+++ b/include/video/b2r2_blt.h
@@ -0,0 +1,617 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 user interface
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#ifndef _LINUX_VIDEO_B2R2_BLT_H
+#define _LINUX_VIDEO_B2R2_BLT_H
+
+#include <linux/types.h>
+
+#if defined(__KERNEL__) || defined(_KERNEL)
+#include <linux/mm_types.h>
+#include <linux/bitops.h>
+#else
+#define BIT(nr) (1UL << (nr))
+#endif
+
+/**
+ * struct b2r2_blt_rect - Specifies a B2R2 rectangle
+ *
+ * @left: X-coordinate of top left corner
+ * @top: Y-coordinate of top left corner
+ * @width: Rectangle width. Must be >= 0.
+ * @height: Rectangle height. Must be >= 0.
+ */
+struct b2r2_blt_rect {
+ __s32 x;
+ __s32 y;
+ __s32 width;
+ __s32 height;
+};
+
+/**
+ * enum b2r2_blt_fmt - Defines the available B2R2 buffer formats
+ *
+ * Inspired by Khronos OpenMAX, please see
+ * OpenMAX IL specification for detailed descriptions of the formats
+ *
+ * @B2R2_BLT_FMT_UNUSED: Placeholder value when format is unknown,
+ * or specified using a vendor-specific means.
+ * @B2R2_BLT_FMT_16_BIT_ARGB4444: 16 bits per pixel ARGB format with colors
+ * stored as Alpha 15:12, Red 11:8, Green 7:4, and Blue 3:0.
+ * @B2R2_BLT_FMT_16_BIT_ARGB1555: 16 bits per pixel ARGB format with colors
+ * stored as Alpha 15, Red 14:10, Green 9:5, and Blue 4:0.
+ * @B2R2_BLT_FMT_16_BIT_RGB565: 16 bits per pixel RGB format with colors
+ * stored as Red 15:11, Green 10:5, and Blue 4:0.
+ * @B2R2_BLT_FMT_24_BIT_RGB888: 24 bits per pixel RGB format with colors
+ * stored as Red 23:16, Green 15:8, and Blue 7:0.
+ * @B2R2_BLT_FMT_32_BIT_ARGB8888: 32 bits per pixel ARGB format with colors
+ * stored as Alpha 31:24, Red 23:16, Green 15:8, and Blue 7:0.
+ * @B2R2_BLT_FMT_YUV420_PACKED_PLANAR: YUV planar format, organized with
+ * three separate planes for each color component, namely Y, U, and V.
+ * U and V pixels are sub-sampled by a factor of two both horizontally and
+ * vertically. The buffer shall contain a plane of Y, U, and V data in this
+ * order
+ * @B2R2_BLT_FMT_YUV422_PACKED_PLANAR: YUV planar format, organized with
+ * three separate planes for each color component, namely Y, U, and V.
+ * U and V pixels are subsampled by a factor of two horizontally.
+ * The buffer shall contain a plane of Y, U, and V data in this order.
+ * @B2R2_BLT_FMT_Y_CB_Y_CR: 16 bits per pixel YUV interleaved format organized
+ * as YUYV (i.e., YCbYCr).
+ * (Corresponds to YUV422 interleaved)
+ * @B2R2_BLT_FMT_CB_Y_CR_Y: 16 bits per pixel YUV interleaved format organized
+ * as UYVY (i.e., CbYCrY).
+ * (Corresponds to YUV422R)
+ * @B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: YUV planar format, organized with
+ * a first plane containing Y pixels, and a second plane containing U and V
+ * pixels interleaved with the first U value first. U and V pixels are
+ * sub-sampled by a factor of two both horizontally and vertically. The buffer
+ * shall contain a plane of Y, U and V data.
+ * (Same as B2R2 420 Raster 2 buffer - 420 R2B)
+ * @B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: YUV planar format, organized with
+ * a first plane containing Y pixels, and a second plane containing U and V
+ * pixels interleaved with the first U value first. U and V pixels are
+ * sub-sampled by a factor of two horizontally. The buffer shall contain a
+ * plane of Y, U and V data.
+ * (Same as B2R2 422 Raster 2 buffer - 422 R2B)
+ * @B2R2_BLT_FMT_32_BIT_ABGR8888: 32 bits per pixel ABGR format with colors
+ * stored as Alpha 31:24,Blue 23:16, Green 15:8, and Red 7:0.
+ * @B2R2_BLT_FMT_24_BIT_ARGB8565: 24 bits per pixel ARGB format with colors
+ * stored as Alpha 23:16, Red 15:11, Green 10:5, and Blue 4:0.
+ * @B2R2_BLT_FMT_24_BIT_YUV888: 24 bits per pixel YUV format with colors
+ * stored as Y 23:16, U 15:8, and V 7:0.
+ * @B2R2_BLT_FMT_32_BIT_AYUV8888: 32 bits per pixel AYUV format with colors
+ * stored as Alpha 31:24, Y 23:16, U 15:8, and V 7:0.
+ * @B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: Nomadik YUV 420 macro block
+ * format, see B2R2 spec for details
+ * @B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: Nomadik YUV 422 macro block
+ * format, see B2R2 spec for details
+ * @B2R2_BLT_FMT_1_BIT_A1: 1 bit per pixel A format, 1 bit alpha
+ * @B2R2_BLT_FMT_8_BIT_A8: 8 bit per pixel A format, 8 bit alpha
+ * @B2R2_BLT_FMT_YUV444_PACKED_PLANAR: YUV planar format, organized with
+ * three separate planes, one for each color component, namely Y, U, and V.
+ * All planes use full resolution, there is no subsampling.
+ * The buffer shall contain a plane of Y, U, and V data in this order.
+ * @B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: YVU planar format, organized with
+ * a first plane containing Y pixels, and a second plane containing V and U
+ * pixels interleaved with the first V value first. V and U pixels are
+ * sub-sampled by a factor of two both horizontally and vertically. The buffer
+ * shall contain two planes, one plane with Y, and one with V and U data.
+ * (Same as B2R2 420 Raster 2 buffer - 420 R2B except that chroma order is
+ * swapped.)
+ * @B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: YVU planar format, organized with
+ * a first plane containing Y pixels, and a second plane containing V and U
+ * pixels interleaved with the first V value first. V and U pixels are
+ * sub-sampled by a factor of two horizontally. The buffer shall contain a
+ * two planes, one with Y, and one with V and U data.
+ * (Same as B2R2 422 Raster 2 buffer - 422 R2B except that chroma order is
+ * swapped.)
+ * @B2R2_BLT_FMT_YVU420_PACKED_PLANAR: YVU planar format, organized with
+ * three separate planes for each color component, namely Y, V, and U.
+ * V and U pixels are sub-sampled by a factor of two both horizontally and
+ * vertically. The buffer shall contain a plane of Y, V, and U data in this
+ * order. (Same as B2R2_BLT_FMT_YUV420_PACKED_PLANAR except that chroma
+ * order is swapped.)
+ * @B2R2_BLT_FMT_YVU422_PACKED_PLANAR: YVU planar format, organized with
+ * three separate planes for each color component, namely Y, V, and U.
+ * V and U pixels are subsampled by a factor of two horizontally.
+ * The buffer shall contain a plane of Y, V, and U data in this order.
+ * (Same as B2R2_BLT_FMT_YUV422_PACKED_PLANAR except that chroma
+ * order is swapped.)
+ * @B2R2_BLT_FMT_24_BIT_VUY888: 24 bits per pixel VUY format with colors
+ * stored as V 23:16, U 15:8, and Y 7:0.
+ * @B2R2_BLT_FMT_32_BIT_VUYA8888: 32 bits per pixel VUYA format with colors
+ * stored as V 31:24, U 23:16, Y 15:8, and Alpha 7:0.
+ */
+enum b2r2_blt_fmt {
+ B2R2_BLT_FMT_UNUSED = 0,
+ B2R2_BLT_FMT_16_BIT_ARGB4444 = 4,
+ B2R2_BLT_FMT_16_BIT_ARGB1555 = 5,
+ B2R2_BLT_FMT_16_BIT_RGB565 = 6,
+ B2R2_BLT_FMT_24_BIT_RGB888 = 11,
+ B2R2_BLT_FMT_32_BIT_ARGB8888 = 16,
+ B2R2_BLT_FMT_YUV420_PACKED_PLANAR = 20,
+ B2R2_BLT_FMT_YUV422_PACKED_PLANAR = 23,
+ B2R2_BLT_FMT_Y_CB_Y_CR = 25,
+ B2R2_BLT_FMT_CB_Y_CR_Y = 27,
+ B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR = 39,
+ B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR = 40,
+ /* Extensions, non OpenMAX formats */
+ B2R2_BLT_FMT_32_BIT_ABGR8888 = 0x7F000000, /* OpenMax vendor start */
+ B2R2_BLT_FMT_24_BIT_ARGB8565 = 0x7F000001,
+ B2R2_BLT_FMT_24_BIT_YUV888 = 0x7F000002,
+ B2R2_BLT_FMT_32_BIT_AYUV8888 = 0x7F000003,
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE = 0x7F000004,
+ B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE = 0x7F000005,
+ B2R2_BLT_FMT_1_BIT_A1 = 0x7F000006,
+ B2R2_BLT_FMT_8_BIT_A8 = 0x7F000007,
+ B2R2_BLT_FMT_YUV444_PACKED_PLANAR = 0x7F000008,
+ B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR = 0x7F000009,
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR = 0x7F00000A,
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR = 0x7F00000B,
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR = 0x7F00000C,
+ B2R2_BLT_FMT_24_BIT_VUY888 = 0x7F00000D,
+ B2R2_BLT_FMT_32_BIT_VUYA8888 = 0x7F00000E,
+};
+
+/**
+ * enum b2r2_blt_ptr_type - Specifies a B2R2 buffer pointer type
+ *
+ * @B2R2_BLT_PTR_NONE:
+ * No pointer (NULL). E.g. src fill.
+ * @B2R2_BLT_PTR_VIRTUAL:
+ * Use offset as a userspace virtual address
+ * @B2R2_BLT_PTR_PHYSICAL:
+ * Use offset as a physical address
+ * @B2R2_BLT_PTR_FD_OFFSET:
+ * Use fd + offset to determine buffer location.
+ * @B2R2_BLT_PTR_HWMEM_BUF_NAME:
+ * Use hwmem_buf_name and offset to determine buffer location.
+ */
+enum b2r2_blt_ptr_type {
+ B2R2_BLT_PTR_NONE,
+ B2R2_BLT_PTR_VIRTUAL,
+ B2R2_BLT_PTR_PHYSICAL,
+ B2R2_BLT_PTR_FD_OFFSET,
+ B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET,
+};
+
+/**
+ * struct b2r2_blt_buf - Specifies a B2R2 buffer pointer
+ *
+ * @type: Buffer pointer type
+ * @hwmem_global_buf_id: Hwmem buffer name
+ * @fd: File descriptor (e.g. file handle to pmem or fb device)
+ * @offset: Offset where buffer can be found or address.
+ * @len: Size of buffer in bytes
+ * @bits: Pointer to the bitmap data. This field can be used to specify
+ * an alternative way to access the buffer. Whenever the 'bits' pointer
+ * is set to non-NULL, the underlying implementation is free to decide
+ * whether or not to use it in favor of other ways to locate the buffer.
+ */
+struct b2r2_blt_buf {
+ enum b2r2_blt_ptr_type type;
+ __s32 hwmem_buf_name;
+ __s32 fd;
+ __u32 offset;
+ __u32 len;
+ void *bits;
+};
+
+
+/**
+ * struct b2r2_blt_img - Specifies a B2R2 image
+ *
+ * @fmt: Pixel format of image
+ * @buf: Pixel buffer
+ * @width: Width in pixels
+ * @height: Height in pixels
+ * @pitch: Pitch in bytes (from start of one line to start of next)
+ */
+struct b2r2_blt_img {
+ enum b2r2_blt_fmt fmt;
+ struct b2r2_blt_buf buf;
+ __s32 width;
+ __s32 height;
+ __u32 pitch;
+};
+
+
+/**
+ * enum b2r2_blt_transform- Specifies rotation and flipping, mutually exclusive
+ * @B2R2_BLT_TRANSFORM_NONE:
+ * No rotation or flip
+ * @B2R2_BLT_TRANSFORM_FLIP_H
+ * Flip horizontally
+ * @B2R2_BLT_TRANSFORM_FLIP_V
+ * Flip vertically
+ * @B2R2_BLT_TRANSFORM_CCW_ROT_90
+ * Rotate 90 degrees counter clockwise
+ * @B2R2_BLT_TRANSFORM_CCW_ROT_180
+ * Rotate 180 degrees (same as flip horizontally together with
+ * flip vertically)
+ * @B2R2_BLT_TRANSFORM_CCW_ROT_270
+ * Rotate 270 degrees counter clockwise
+ * @B2R2_BLT_TRANSFORM_FLIP_H_CCW_ROT_90
+ * Flip horizontally and then rotate 90 degrees counter clockwise
+ * @B2R2_BLT_TRANSFORM_FLIP_V_CCW_ROT_90
+ * Flip vertically and then rotate 90 degrees counter clockwise
+ */
+enum b2r2_blt_transform {
+ B2R2_BLT_TRANSFORM_NONE = 0,
+ B2R2_BLT_TRANSFORM_FLIP_H = 1,
+ B2R2_BLT_TRANSFORM_FLIP_V = 2,
+ B2R2_BLT_TRANSFORM_CCW_ROT_90 = 4,
+ B2R2_BLT_TRANSFORM_CCW_ROT_180 = 3,
+ B2R2_BLT_TRANSFORM_CCW_ROT_270 = 7,
+ B2R2_BLT_TRANSFORM_FLIP_H_CCW_ROT_90 = 5,
+ B2R2_BLT_TRANSFORM_FLIP_V_CCW_ROT_90 = 6,
+};
+
+
+/**
+ * enum b2r2_blt_flag - Flags that controls the B2R2 request
+ *
+ * Can be combined.
+ *
+ * @B2R2_BLT_FLAG_ASYNCH:
+ * Asynchronous request. b2r2_blt will returns when the request
+ * has been queued.
+ * @B2R2_BLT_FLAG_DRY_RUN:
+ * Dry run, just to check if request can be performed.
+ * @B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND:
+ * Enable per pixel alpha blend
+ * @B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND:
+ * Enable global alpha blend (alpha value in global_alpha)
+ * @B2R2_BLT_FLAG_SOURCE_COLOR_KEY:
+ * Enable source color key (color in src_color). Color should be in raw
+ * format.
+ * B2R2_BLT_FLAG_SOURCE_COLOR_KEY, B2R2_BLT_FLAG_SOURCE_FILL and
+ * B2R2_BLT_FLAG_SOURCE_FILL_RAW cannot be specified at the same time.
+ * B2R2_BLT_FLAG_SOURCE_COLOR_KEY and B2R2_BLT_FLAG_DEST_COLOR_KEY cannot be
+ * specified at the same time.
+ * @B2R2_BLT_FLAG_SOURCE_FILL:
+ * Enable ARGB/AYUV source fill (color in src_color). Which of ARGB and AYUV
+ * is determined by the destination format.
+ * B2R2_BLT_FLAG_SOURCE_COLOR_KEY, B2R2_BLT_FLAG_SOURCE_FILL and
+ * B2R2_BLT_FLAG_SOURCE_FILL_RAW cannot be specified at the same time
+ * @B2R2_BLT_FLAG_SOURCE_FILL_RAW:
+ * Enable raw color source fill (color in src_color)
+ * B2R2_BLT_FLAG_SOURCE_COLOR_KEY, B2R2_BLT_FLAG_SOURCE_FILL and
+ * B2R2_BLT_FLAG_SOURCE_FILL_RAW cannot be specified at the same time
+ * @B2R2_BLT_FLAG_DEST_COLOR_KEY:
+ * Enable dest color key (color in dst_color). Color in raw format.
+ * @B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT:
+ * Source color not premultiplied (Valid for alpha formats only).
+ * @B2R2_BLT_FLAG_DITHER:
+ * Enable dithering
+ * @B2R2_BLT_FLAG_BLUR:
+ * Enable blur
+ * @B2R2_BLT_FLAG_SOURCE_MASK:
+ * Enable source mask
+ * @B2R2_BLT_FLAG_DESTINATION_CLIP:
+ * Enable destination clip rectangle
+ * @B2R2_BLT_FLAG_INHERIT_PRIO
+ * Inherit process priority
+ * @B2R2_BLT_FLAG_REPORT_WHEN_DONE
+ * Report through b2r2_blt file when done. A b2r2_blt_report structure is
+ * read. Use poll() or select() if anything to read. (i.e. to help user space
+ * to implement callback functionality)
+ * @B2R2_BLT_FLAG_REPORT_PERFORMANCE
+ * Include performance data in the report structure
+ * @B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION
+ * Use color look-up table for color correction.
+ * Pointer to the table must be specified in *clut field of
+ * the b2r2_blt_req structure.
+ * The table must map all input color values
+ * for each channel to the desired output values.
+ * It is an array with the following format:
+ * R0 G0 B0 A0 R1 G1 B1 A1...R255 G255 B255 A255
+ * where R0 is the 8 bit output value for red channel whenever its input
+ * equals 0.
+ * Similarly, R1 through R255 are the red channel outputs whenever
+ * the channel's inputs equal 1 through 255 respectively.
+ * Gn, Bn, An denote green, blue and alpha channel.
+ * Whenever the input bitmap format lacks the alpha channel,
+ * all alpha values in the color correction table should be set to 255.
+ * Size of the array that specifies the color correction table
+ * must be 1024 bytes.
+ * A table that does not change anything has the form:
+ * 0 0 0 0 1 1 1 1 2 2 2 2 ... 254 254 254 254 255 255 255 255.
+ * CLUT color correction can be applied to YUV raster buffers as well,
+ * in which case the RGB color channels are mapped onto YUV-space
+ * as follows:
+ * R = red chrominance
+ * G = luminance
+ * B = blue chrominance
+ * A = alpha
+ * If any of the planar or semi-planar formats is used, luminance cannot
+ * be changed by the color correction table.
+ */
+enum b2r2_blt_flag {
+ B2R2_BLT_FLAG_ASYNCH = BIT(0),/*0x1*/
+ B2R2_BLT_FLAG_DRY_RUN = BIT(1),/*0x2*/
+ B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND = BIT(2),/*0x4*/
+ B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND = BIT(3),/*0x8*/
+ B2R2_BLT_FLAG_SOURCE_COLOR_KEY = BIT(4),/*0x10*/
+ B2R2_BLT_FLAG_SOURCE_FILL = BIT(5),/*0x20*/
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW = BIT(6),/*0x40*/
+ B2R2_BLT_FLAG_DEST_COLOR_KEY = BIT(7),/*0x80*/
+ B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT = BIT(8),/*0x100*/
+ B2R2_BLT_FLAG_DITHER = BIT(9),/*0x200*/
+ B2R2_BLT_FLAG_BLUR = BIT(10),/*0x400*/
+ B2R2_BLT_FLAG_SOURCE_MASK = BIT(11),/*0x800*/
+ B2R2_BLT_FLAG_DESTINATION_CLIP = BIT(12),/*0x1000*/
+ B2R2_BLT_FLAG_INHERIT_PRIO = BIT(13),/*0x2000*/
+ B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH = BIT(14),/*0x4000*/
+ B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH = BIT(15),/*0x8000*/
+ B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH = BIT(16),/*0x10000*/
+ B2R2_BLT_FLAG_REPORT_WHEN_DONE = BIT(29),/*0x20000000*/
+ B2R2_BLT_FLAG_REPORT_PERFORMANCE = BIT(30),/*0x40000000*/
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION = BIT(31),/*0x80000000*/
+};
+
+
+/**
+ * struct b2r2_blt_req - Specifies a request to B2R2
+ *
+ * @size: Size of this structure. Used for versioning. MUST be specified.
+ * @flags: Flags that control the B2R2 request ORed together
+ * @tfm: How source should be flipped and rotated when blitting
+ * @prio: Priority (-20 to 19). Inherits process prio
+ * if B2R2_BLT_FLAG_INHERIT_PRIO. Given priority is mapped onto B2R2.
+ * TBD: How?
+ * @clut: Pointer to the look-up table for color correction.
+ * @src_img: Source image. Not used if source fill.
+ * @src_mask: Source mask. Not used if source fill.
+ * @src_rect: Source area to be blitted.
+ * @src_color: Source fill color or color key
+ * @dst_img: Destination image.
+ * @dst_rect: Destination area to be blitted to.
+ * @dst_color: Destination color key
+ * @dst_clip_rect: Destination clip rectangle.
+ * @global_alpha: Global alpha value (0 - 255)
+ * @report1: Data 1 to report back when request is done.
+ * See struct b2r2_blt_report.
+ * @report2: Data 2 to report back when request is done.
+ * See struct b2r2_blt_report.
+ *
+ */
+struct b2r2_blt_req {
+ __u32 size;
+ enum b2r2_blt_flag flags;
+ enum b2r2_blt_transform transform;
+ __s32 prio;
+ void *clut;
+ struct b2r2_blt_img src_img;
+ struct b2r2_blt_img src_mask;
+ struct b2r2_blt_rect src_rect;
+ __u32 src_color;
+ struct b2r2_blt_img dst_img;
+ struct b2r2_blt_rect dst_rect;
+ struct b2r2_blt_rect dst_clip_rect;
+ __u32 dst_color;
+ __u8 global_alpha;
+ __u32 report1;
+ __u32 report2;
+};
+
+/**
+ * enum b2r2_blt_cap - Capabilities that can be queried for.
+ *
+ * Capabilities can be queried for a specific format or for formats in
+ * general. To query for capabilities in general, specify BLT_FMT_UNUSED
+ * as format.
+ *
+ * B2R2_BLT_CAP_UNUSED: Unused/unspecified capability
+ * B2R2_BLT_CAP_FMT_SOURCE: Is format supported as source?
+ * B2R2_BLT_CAP_FMT_SOURCE_MASK: Is format supported as source mask?
+ * B2R2_BLT_CAP_FMT_DEST: Is format supported as dest?
+ * B2R2_BLT_CAP_PER_PIXEL_ALPHA_BLEND: Is per pixel alpha blending supported
+ * with format as source
+ * B2R2_BLT_CAP_GLOBAL_ALPHA_BLEND: Is per global alpha blending supported
+ * with format as source
+ * B2R2_BLT_CAP_SOURCE_COLOR_KEY: Is source color key supported with format as
+ * source
+ * B2R2_BLT_CAP_SOURCE_FILL: Is source fill supported with format as source
+ * B2R2_BLT_CAP_SOURCE_FILL_RAW: Is source fill raw supported with format as
+ * dest
+ * B2R2_BLT_CAP_DEST_COLOR_KEY: Is dest color key supported with format as dest
+ * B2R2_BLT_CAP_DITHER: Is dithering supported with format as source
+ * B2R2_BLT_CAP_BLUR: Is blur supported with format as source
+ * B2R2_BLT_CAP_MINIFICATION_LIMIT: Minification limit (copybit support)
+ * B2R2_BLT_CAP_MAGNIFICATION_LIMIT: Magnification limit (copybit support)
+ * B2R2_BLT_CAP_SCALING_FRAC_BITS: Number of scaling fractional bits (copybit
+ * support)
+ * B2R2_BLT_CAP_ROTATION_STEP_DEG: Supported rotation step in degrees (copybit
+ * support)
+ */
+
+enum b2r2_blt_cap {
+ B2R2_BLT_CAP_UNUSED = 0,
+ /**
+ * @brief Is format supported as source.
+ */
+ B2R2_BLT_CAP_FMT_SOURCE,
+ /**
+ * @brief Is format supported as source mask
+ */
+ B2R2_BLT_CAP_FMT_SOURCE_MASK,
+ /**
+ * @brief Is format supported as destination
+ */
+ B2R2_BLT_CAP_FMT_DEST,
+ /**
+ * @brief Is per pixel alpha blending supported with format as source
+ */
+ B2R2_BLT_CAP_PER_PIXEL_ALPHA_BLEND,
+ /**
+ * @brief Is global alpha blending supported with format as source
+ */
+ B2R2_BLT_CAP_GLOBAL_ALPHA_BLEND,
+ /**
+ * @brief Is source color key supported with format as source
+ */
+ B2R2_BLT_CAP_SOURCE_COLOR_KEY,
+ /**
+ * @brief Is source fill supported with format as source
+ */
+ B2R2_BLT_CAP_SOURCE_FILL,
+ /**
+ * @brief Is source fill raw supported with format as dest
+ */
+ B2R2_BLT_CAP_SOURCE_FILL_RAW,
+ /**
+ * @brief Is dest color key supported with format as dest
+ */
+ B2R2_BLT_CAP_DEST_COLOR_KEY,
+ /**
+ * @brief Is dithering supported with format as source
+ */
+ B2R2_BLT_CAP_DITHER,
+ /**
+ * @brief Is blur supported with format as source
+ */
+ B2R2_BLT_CAP_BLUR,
+ /**
+ * @brief Minification limit (copybit support)
+ */
+ B2R2_BLT_CAP_MINIFICATION_LIMIT,
+ /**
+ * @brief Magnification limit (copybit support)
+ */
+ B2R2_BLT_CAP_MAGNIFICATION_LIMIT,
+ /**
+ * @brief Number of scaling fractional bits (copybit support)
+ */
+ B2R2_BLT_CAP_SCALING_FRAC_BITS,
+ /**
+ * @brief Supported rotation step in degrees (copybit support)
+ */
+ B2R2_BLT_CAP_ROTATION_STEP_DEG,
+};
+
+/**
+ * struct b2r2_blt_query_cap - Query B2R2 capabilities
+ *
+ * fmt: Format to query capabilities for or BLT_FMT_UNUSED for all
+ * cap: Capability to query for
+ * result: Returned capability. Interpretaion of this variable varies
+ * with the capability queried
+ */
+struct b2r2_blt_query_cap {
+ enum b2r2_blt_fmt fmt;
+ enum b2r2_blt_cap cap;
+ __u32 result;
+};
+
+/**
+ * struct b2r2_blt_report - Report from B2R2 driver back to user space
+ *
+ * This structure can be read from B2R2 driver if B2R2_BLT_FLAG_REPORT_WHEN_DONE
+ * flag was specified when the request was issued.
+ *
+ * @request_id: The id for the request, same as reported from blt_request
+ * @report1: Client data specified in struct blt_request
+ * @report2: Client data specified in struct blt_request
+ * @usec_elapsed: Number of microseconds needed to perform this blit
+ * if B2R2_BLT_FLAG_REPORT_PERFORMANCE was specified when the
+ * request was issued.
+ *
+ */
+struct b2r2_blt_report {
+ __u32 request_id;
+ __u32 report1;
+ __u32 report2;
+ __u32 usec_elapsed;
+};
+
+/**
+ * B2R2 BLT driver is used in the following way:
+ *
+ * Obtain a file descriptor to the driver:
+ * fd = open("/dev/b2r2_blt", O_RDWR);
+ *
+ * Issue requests:
+ * struct b2r2_blt_request blt_request;
+ * blt_request.size = sizeof(blt_request);
+ * ... Fill request with data...
+ *
+ * request_id = ioctl(fd, B2R2_BLT_IOC, (__u32) &blt_request);
+ *
+ * Wait for a request to finish
+ * ret = ioctl(fd, B2R2_BLT_SYNCH_IOC, (__u32) request_id);
+ *
+ * Wait for all requests from this context to finish
+ * ret = ioctl(fd, B2R2_BLT_SYNCH_IOC, (__u32) 0);
+ *
+ * Wait indefinitely for report data from driver:
+ * pollfd.fd = fd
+ * pollfd.events = 0xFFFFFFFF;
+ * pollfd.revents = 0;
+ * ret = poll(&pollfd, 1, -1);
+ *
+ * Read report data from driver
+ * struct b2r2_blt_report blt_report;
+ *
+ * nread = read(fd, &blt_report, sizeof(blt_report));
+ *
+ * Close the driver
+ * close(fd);
+ */
+
+/* B2R2 BLT IOCTLS */
+
+/**
+ * B2R2_BLT_IOC_MAGIC is ioctl type group for B2R2 driver
+ */
+#define B2R2_BLT_IOC_MAGIC 0xb2
+
+/**
+ * The B2R2_BLT_IOC ioctl adds a blit request to B2R2.
+ *
+ * The ioctl returns when the blit has been performed if not
+ * asynchronous execution has been specified. If asynchronous,
+ * control is returned as soon as the request has been queued.
+ *
+ * Supplied parameter shall be a pointer to a struct b2r2_blt_req.
+ *
+ * Returns an unique request id if >= 0, else a negative error code.
+ * This request id can be waited for using B2R2_BLT_SYNC_IOC.
+ * Return values: -ESOMERROR Description of an error
+ */
+#define B2R2_BLT_IOC _IOW(B2R2_BLT_IOC_MAGIC, 1, struct b2r2_blt_req)
+
+/**
+ * The B2R2_BLT_SYNC_IOC waits for all or a specified request to be finished.
+ *
+ * Supplied parameter shall be a request id previously returned by
+ * B2R2_BLT_IOC or 0 for all requests.
+ *
+ * Returns 0 if OK, else a negative error code
+ * Return value: -ESOMERROR Description of an error
+ */
+#define B2R2_BLT_SYNCH_IOC _IOW(B2R2_BLT_IOC_MAGIC, 2, int)
+
+/**
+ * The BLT_QUERY_CAP_IOC returns capability information for all or
+ * for a certain format
+ *
+ * Supplied parameter shall be a pointer to a struct b2r2_blt_query_cap.
+ *
+ * @return Returns 0 if OK, else a negative error code
+ * @retval -ESOMERROR Description of an error
+ */
+#define B2R2_BLT_QUERY_CAP_IOC _IOWR(B2R2_BLT_IOC_MAGIC, 3, \
+ struct b2r2_blt_query_cap)
+
+#endif /* #ifdef _LINUX_VIDEO_B2R2_BLT_H */
diff --git a/include/video/hdmi.h b/include/video/hdmi.h
new file mode 100644
index 00000000000..55dcd003fcd
--- /dev/null
+++ b/include/video/hdmi.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * HDMI driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __HDMI__H__
+#define __HDMI__H__
+
+#define HDMI_RESULT_OK 0
+#define HDMI_RESULT_NOT_OK 1
+#define HDMI_AES_NOT_FUSED 2
+#define HDMI_RESULT_CRC_MISMATCH 3
+
+#define HDMI_CEC_READ_MAXSIZE 16
+#define HDMI_CEC_WRITE_MAXSIZE 15
+#define HDMI_INFOFRAME_MAX_SIZE 27
+#define HDMI_HDCP_FUSEAES_KEYSIZE 16
+#define HDMI_HDCP_AES_BLOCK_START 128
+#define HDMI_HDCP_KSV_BLOCK 40
+#define HDMI_HDCP_AES_NR_OF_BLOCKS 18
+#define HDMI_HDCP_AES_KEYSIZE 16
+#define HDMI_HDCP_AES_KSVSIZE 5
+#define HDMI_HDCP_AES_KSVZEROESSIZE 3
+#define HDMI_EDID_DATA_SIZE 128
+#define HDMI_CEC_SIZE 15
+#define HDMI_INFOFR_SIZE 27
+#define HDMI_FUSE_KEYSIZE 16
+#define HDMI_AES_KSVSIZE 5
+#define HDMI_AES_KEYSIZE 288
+#define HDMI_CRC32_SIZE 4
+#define HDMI_HDCPAUTHRESP_SIZE 126
+
+#define HDMI_STOREASTEXT_TEXT_SIZE 2
+#define HDMI_STOREASTEXT_BIN_SIZE 1
+#define HDMI_PLUGDETEN_TEXT_SIZE 6
+#define HDMI_PLUGDETEN_BIN_SIZE 3
+#define HDMI_EDIDREAD_TEXT_SIZE 4
+#define HDMI_EDIDREAD_BIN_SIZE 2
+#define HDMI_CECEVEN_TEXT_SIZE 2
+#define HDMI_CECEVEN_BIN_SIZE 1
+#define HDMI_CECSEND_TEXT_SIZE_MAX 37
+#define HDMI_CECSEND_TEXT_SIZE_MIN 6
+#define HDMI_CECSEND_BIN_SIZE_MAX 18
+#define HDMI_CECSEND_BIN_SIZE_MIN 3
+#define HDMI_INFOFRSEND_TEXT_SIZE_MIN 8
+#define HDMI_INFOFRSEND_TEXT_SIZE_MAX 63
+#define HDMI_INFOFRSEND_BIN_SIZE_MIN 4
+#define HDMI_INFOFRSEND_BIN_SIZE_MAX 31
+#define HDMI_HDCPEVEN_TEXT_SIZE 2
+#define HDMI_HDCPEVEN_BIN_SIZE 1
+#define HDMI_HDCP_FUSEAES_TEXT_SIZE 34
+#define HDMI_HDCP_FUSEAES_BIN_SIZE 17
+#define HDMI_HDCP_LOADAES_TEXT_SIZE 594
+#define HDMI_HDCP_LOADAES_BIN_SIZE 297
+#define HDMI_HDCPAUTHENCR_TEXT_SIZE 4
+#define HDMI_HDCPAUTHENCR_BIN_SIZE 2
+#define HDMI_EVCLR_TEXT_SIZE 2
+#define HDMI_EVCLR_BIN_SIZE 1
+#define HDMI_AUDIOCFG_TEXT_SIZE 14
+#define HDMI_AUDIOCFG_BIN_SIZE 7
+#define HDMI_POWERONOFF_TEXT_SIZE 2
+#define HDMI_POWERONOFF_BIN_SIZE 1
+
+#define HDMI_IOC_MAGIC 0xcc
+
+/** IOCTL Operations */
+#define IOC_PLUG_DETECT_ENABLE _IOWR(HDMI_IOC_MAGIC, 1, int)
+#define IOC_EDID_READ _IOWR(HDMI_IOC_MAGIC, 2, int)
+#define IOC_CEC_EVENT_ENABLE _IOWR(HDMI_IOC_MAGIC, 3, int)
+#define IOC_CEC_READ _IOWR(HDMI_IOC_MAGIC, 4, int)
+#define IOC_CEC_SEND _IOWR(HDMI_IOC_MAGIC, 5, int)
+#define IOC_INFOFRAME_SEND _IOWR(HDMI_IOC_MAGIC, 6, int)
+#define IOC_HDCP_EVENT_ENABLE _IOWR(HDMI_IOC_MAGIC, 7, int)
+#define IOC_HDCP_CHKAESOTP _IOWR(HDMI_IOC_MAGIC, 8, int)
+#define IOC_HDCP_FUSEAES _IOWR(HDMI_IOC_MAGIC, 9, int)
+#define IOC_HDCP_LOADAES _IOWR(HDMI_IOC_MAGIC, 10, int)
+#define IOC_HDCP_AUTHENCR_REQ _IOWR(HDMI_IOC_MAGIC, 11, int)
+#define IOC_HDCP_STATE_GET _IOWR(HDMI_IOC_MAGIC, 12, int)
+#define IOC_EVENTS_READ _IOWR(HDMI_IOC_MAGIC, 13, int)
+#define IOC_EVENTS_CLEAR _IOWR(HDMI_IOC_MAGIC, 14, int)
+#define IOC_AUDIO_CFG _IOWR(HDMI_IOC_MAGIC, 15, int)
+#define IOC_PLUG_STATUS _IOWR(HDMI_IOC_MAGIC, 16, int)
+#define IOC_POWERONOFF _IOWR(HDMI_IOC_MAGIC, 17, int)
+#define IOC_EVENT_WAKEUP _IOWR(HDMI_IOC_MAGIC, 18, int)
+#define IOC_POWERSTATE _IOWR(HDMI_IOC_MAGIC, 19, int)
+
+
+/* HDMI driver */
+void hdmi_event(enum av8100_hdmi_event);
+int hdmi_init(void);
+void hdmi_exit(void);
+
+enum hdmi_event {
+ HDMI_EVENT_NONE = 0x0,
+ HDMI_EVENT_HDMI_PLUGIN = 0x1,
+ HDMI_EVENT_HDMI_PLUGOUT = 0x2,
+ HDMI_EVENT_CEC = 0x4,
+ HDMI_EVENT_HDCP = 0x8,
+ HDMI_EVENT_CECTXERR = 0x10,
+ HDMI_EVENT_WAKEUP = 0x20,
+ HDMI_EVENT_CECTX = 0x40,
+};
+
+enum hdmi_hdcp_auth_type {
+ HDMI_HDCP_AUTH_OFF = 0,
+ HDMI_HDCP_AUTH_START = 1,
+ HDMI_HDCP_AUTH_REV_LIST_REQ = 2,
+ HDMI_HDCP_AUTH_CONT = 3,
+};
+
+enum hdmi_hdcp_encr_type {
+ HDMI_HDCP_ENCR_OESS = 0,
+ HDMI_HDCP_ENCR_EESS = 1,
+};
+
+struct plug_detect {
+ __u8 hdmi_detect_enable;
+ __u8 on_time;
+ __u8 hdmi_off_time;
+};
+
+struct edid_read {
+ __u8 address;
+ __u8 block_nr;
+ __u8 data_length;
+ __u8 data[HDMI_EDID_DATA_SIZE];
+};
+
+struct cec_rw {
+ __u8 src;
+ __u8 dest;
+ __u8 length;
+ __u8 data[HDMI_CEC_SIZE];
+};
+
+struct info_fr {
+ __u8 type;
+ __u8 ver;
+ __u8 crc;
+ __u8 length;
+ __u8 data[HDMI_INFOFR_SIZE];
+};
+
+struct hdcp_fuseaes {
+ __u8 key[HDMI_FUSE_KEYSIZE];
+ __u8 crc;
+ __u8 result;
+};
+
+struct hdcp_loadaesall {
+ __u8 key[HDMI_AES_KEYSIZE];
+ __u8 ksv[HDMI_AES_KSVSIZE];
+ __u8 crc32[HDMI_CRC32_SIZE];
+ __u8 result;
+};
+
+
+/* hdcp_authencr resp coding
+ *
+ * When encr_type is 2 (request revoc list), the response is given by
+ * resp_size is != 0 and resp containing the folllowing:
+ *
+ * __u8[5] Bksv from sink (not belonging to revocation list)
+ * __u8 Device count
+ * Additional output if Nrofdevices > 0:
+ * __u8[5 * Nrofdevices] Bksv per connected equipment
+ * __u8[20] SHA signature
+ *
+ * Device count coding:
+ * 0 = a simple receiver is connected
+ * 0x80 = a repeater is connected without downstream equipment
+ * 0x81 = a repeater is connected with one downstream equipment
+ * up to 0x94 = (0x80 + 0x14) a repeater is connected with downstream
+ * equipment (thus up to 20 connected equipments)
+ * 1 = repeater without sink equipment connected
+ * >1 = number of connected equipment on the repeater
+ * Nrofdevices = Device count & 0x7F (max 20)
+ *
+ * Max resp_size is 5 + 1 + 5 * 20 + 20 = 126 bytes
+ *
+ */
+struct hdcp_authencr {
+ __u8 auth_type;
+ __u8 encr_type;
+ __u8 result;
+ __u8 resp_size;
+ __u8 resp[HDMI_HDCPAUTHRESP_SIZE];
+};
+
+struct audio_cfg {
+ __u8 if_format;
+ __u8 i2s_entries;
+ __u8 freq;
+ __u8 word_length;
+ __u8 format;
+ __u8 if_mode;
+ __u8 mute;
+};
+
+#endif /* __HDMI__H__ */
diff --git a/include/video/mcde.h b/include/video/mcde.h
new file mode 100644
index 00000000000..b81d88af5fd
--- /dev/null
+++ b/include/video/mcde.h
@@ -0,0 +1,448 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE base driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __MCDE__H__
+#define __MCDE__H__
+
+/* Physical interface types */
+enum mcde_port_type {
+ MCDE_PORTTYPE_DSI = 0,
+ MCDE_PORTTYPE_DPI = 1,
+};
+
+/* Interface mode */
+enum mcde_port_mode {
+ MCDE_PORTMODE_CMD = 0,
+ MCDE_PORTMODE_VID = 1,
+};
+
+/* MCDE fifos */
+enum mcde_fifo {
+ MCDE_FIFO_A = 0,
+ MCDE_FIFO_B = 1,
+ MCDE_FIFO_C0 = 2,
+ MCDE_FIFO_C1 = 3,
+};
+
+/* MCDE channels (pixel pipelines) */
+enum mcde_chnl {
+ MCDE_CHNL_A = 0,
+ MCDE_CHNL_B = 1,
+ MCDE_CHNL_C0 = 2,
+ MCDE_CHNL_C1 = 3,
+};
+
+/* Channel path */
+#define MCDE_CHNLPATH(__chnl, __fifo, __type, __ifc, __link) \
+ (((__chnl) << 16) | ((__fifo) << 12) | \
+ ((__type) << 8) | ((__ifc) << 4) | ((__link) << 0))
+enum mcde_chnl_path {
+ /* Channel A */
+ MCDE_CHNLPATH_CHNLA_FIFOA_DPI_0 = MCDE_CHNLPATH(MCDE_CHNL_A,
+ MCDE_FIFO_A, MCDE_PORTTYPE_DPI, 0, 0),
+ MCDE_CHNLPATH_CHNLA_FIFOA_DSI_IFC0_0 = MCDE_CHNLPATH(MCDE_CHNL_A,
+ MCDE_FIFO_A, MCDE_PORTTYPE_DSI, 0, 0),
+ MCDE_CHNLPATH_CHNLA_FIFOA_DSI_IFC0_1 = MCDE_CHNLPATH(MCDE_CHNL_A,
+ MCDE_FIFO_A, MCDE_PORTTYPE_DSI, 0, 1),
+ MCDE_CHNLPATH_CHNLA_FIFOC0_DSI_IFC0_2 = MCDE_CHNLPATH(MCDE_CHNL_A,
+ MCDE_FIFO_C0, MCDE_PORTTYPE_DSI, 0, 2),
+ MCDE_CHNLPATH_CHNLA_FIFOC0_DSI_IFC1_0 = MCDE_CHNLPATH(MCDE_CHNL_A,
+ MCDE_FIFO_C0, MCDE_PORTTYPE_DSI, 1, 0),
+ MCDE_CHNLPATH_CHNLA_FIFOC0_DSI_IFC1_1 = MCDE_CHNLPATH(MCDE_CHNL_A,
+ MCDE_FIFO_C0, MCDE_PORTTYPE_DSI, 1, 1),
+ MCDE_CHNLPATH_CHNLA_FIFOA_DSI_IFC1_2 = MCDE_CHNLPATH(MCDE_CHNL_A,
+ MCDE_FIFO_A, MCDE_PORTTYPE_DSI, 1, 2),
+ /* Channel B */
+ MCDE_CHNLPATH_CHNLB_FIFOB_DPI_1 = MCDE_CHNLPATH(MCDE_CHNL_B,
+ MCDE_FIFO_B, MCDE_PORTTYPE_DPI, 0, 1),
+ MCDE_CHNLPATH_CHNLB_FIFOB_DSI_IFC0_0 = MCDE_CHNLPATH(MCDE_CHNL_B,
+ MCDE_FIFO_B, MCDE_PORTTYPE_DSI, 0, 0),
+ MCDE_CHNLPATH_CHNLB_FIFOB_DSI_IFC0_1 = MCDE_CHNLPATH(MCDE_CHNL_B,
+ MCDE_FIFO_B, MCDE_PORTTYPE_DSI, 0, 1),
+ MCDE_CHNLPATH_CHNLB_FIFOC1_DSI_IFC0_2 = MCDE_CHNLPATH(MCDE_CHNL_B,
+ MCDE_FIFO_C1, MCDE_PORTTYPE_DSI, 0, 2),
+ MCDE_CHNLPATH_CHNLB_FIFOC1_DSI_IFC1_0 = MCDE_CHNLPATH(MCDE_CHNL_B,
+ MCDE_FIFO_C1, MCDE_PORTTYPE_DSI, 1, 0),
+ MCDE_CHNLPATH_CHNLB_FIFOC1_DSI_IFC1_1 = MCDE_CHNLPATH(MCDE_CHNL_B,
+ MCDE_FIFO_C1, MCDE_PORTTYPE_DSI, 1, 1),
+ MCDE_CHNLPATH_CHNLB_FIFOB_DSI_IFC1_2 = MCDE_CHNLPATH(MCDE_CHNL_B,
+ MCDE_FIFO_B, MCDE_PORTTYPE_DSI, 1, 2),
+ /* Channel C0 */
+ MCDE_CHNLPATH_CHNLC0_FIFOA_DSI_IFC0_0 = MCDE_CHNLPATH(MCDE_CHNL_C0,
+ MCDE_FIFO_A, MCDE_PORTTYPE_DSI, 0, 0),
+ MCDE_CHNLPATH_CHNLC0_FIFOA_DSI_IFC0_1 = MCDE_CHNLPATH(MCDE_CHNL_C0,
+ MCDE_FIFO_A, MCDE_PORTTYPE_DSI, 0, 1),
+ MCDE_CHNLPATH_CHNLC0_FIFOC0_DSI_IFC0_2 = MCDE_CHNLPATH(MCDE_CHNL_C0,
+ MCDE_FIFO_C0, MCDE_PORTTYPE_DSI, 0, 2),
+ MCDE_CHNLPATH_CHNLC0_FIFOC0_DSI_IFC1_0 = MCDE_CHNLPATH(MCDE_CHNL_C0,
+ MCDE_FIFO_C0, MCDE_PORTTYPE_DSI, 1, 0),
+ MCDE_CHNLPATH_CHNLC0_FIFOC0_DSI_IFC1_1 = MCDE_CHNLPATH(MCDE_CHNL_C0,
+ MCDE_FIFO_C0, MCDE_PORTTYPE_DSI, 1, 1),
+ MCDE_CHNLPATH_CHNLC0_FIFOA_DSI_IFC1_2 = MCDE_CHNLPATH(MCDE_CHNL_C0,
+ MCDE_FIFO_A, MCDE_PORTTYPE_DSI, 1, 2),
+ /* Channel C1 */
+ MCDE_CHNLPATH_CHNLC1_FIFOB_DSI_IFC0_0 = MCDE_CHNLPATH(MCDE_CHNL_C1,
+ MCDE_FIFO_B, MCDE_PORTTYPE_DSI, 0, 0),
+ MCDE_CHNLPATH_CHNLC1_FIFOB_DSI_IFC0_1 = MCDE_CHNLPATH(MCDE_CHNL_C1,
+ MCDE_FIFO_B, MCDE_PORTTYPE_DSI, 0, 1),
+ MCDE_CHNLPATH_CHNLC1_FIFOC1_DSI_IFC0_2 = MCDE_CHNLPATH(MCDE_CHNL_C1,
+ MCDE_FIFO_C1, MCDE_PORTTYPE_DSI, 0, 2),
+ MCDE_CHNLPATH_CHNLC1_FIFOC1_DSI_IFC1_0 = MCDE_CHNLPATH(MCDE_CHNL_C1,
+ MCDE_FIFO_C1, MCDE_PORTTYPE_DSI, 1, 0),
+ MCDE_CHNLPATH_CHNLC1_FIFOC1_DSI_IFC1_1 = MCDE_CHNLPATH(MCDE_CHNL_C1,
+ MCDE_FIFO_C1, MCDE_PORTTYPE_DSI, 1, 1),
+ MCDE_CHNLPATH_CHNLC1_FIFOB_DSI_IFC1_2 = MCDE_CHNLPATH(MCDE_CHNL_C1,
+ MCDE_FIFO_B, MCDE_PORTTYPE_DSI, 1, 2),
+};
+
+/* Update sync mode */
+enum mcde_sync_src {
+ MCDE_SYNCSRC_OFF = 0, /* No sync */
+ MCDE_SYNCSRC_TE0 = 1, /* MCDE ext TE0 */
+ MCDE_SYNCSRC_TE1 = 2, /* MCDE ext TE1 */
+ MCDE_SYNCSRC_BTA = 3, /* DSI BTA */
+ MCDE_SYNCSRC_TE_POLLING = 4, /* DSI TE_POLLING */
+};
+
+/* Interface pixel formats (output) */
+/*
+* REVIEW: Define formats
+* Add explanatory comments how the formats are ordered in memory
+*/
+enum mcde_port_pix_fmt {
+ /* MIPI standard formats */
+
+ MCDE_PORTPIXFMT_DPI_16BPP_C1 = 0x21,
+ MCDE_PORTPIXFMT_DPI_16BPP_C2 = 0x22,
+ MCDE_PORTPIXFMT_DPI_16BPP_C3 = 0x23,
+ MCDE_PORTPIXFMT_DPI_18BPP_C1 = 0x24,
+ MCDE_PORTPIXFMT_DPI_18BPP_C2 = 0x25,
+ MCDE_PORTPIXFMT_DPI_24BPP = 0x26,
+
+ MCDE_PORTPIXFMT_DSI_16BPP = 0x31,
+ MCDE_PORTPIXFMT_DSI_18BPP = 0x32,
+ MCDE_PORTPIXFMT_DSI_18BPP_PACKED = 0x33,
+ MCDE_PORTPIXFMT_DSI_24BPP = 0x34,
+
+ /* Custom formats */
+ MCDE_PORTPIXFMT_DSI_YCBCR422 = 0x40,
+};
+
+enum mcde_hdmi_sdtv_switch {
+ HDMI_SWITCH,
+ SDTV_SWITCH,
+ DVI_SWITCH
+};
+
+enum mcde_col_convert {
+ MCDE_CONVERT_RGB_2_RGB,
+ MCDE_CONVERT_RGB_2_YCBCR,
+ MCDE_CONVERT_YCBCR_2_RGB,
+ MCDE_CONVERT_YCBCR_2_YCBCR,
+};
+
+struct mcde_col_transform {
+ u16 matrix[3][3];
+ u16 offset[3];
+};
+
+#define MCDE_PORT_DPI_NO_CLOCK_DIV 0
+
+#define DPI_ACT_HIGH_ALL 0 /* all signals are active high */
+#define DPI_ACT_LOW_HSYNC 1 /* horizontal sync signal is active low */
+#define DPI_ACT_LOW_VSYNC 2 /* vertical sync signal is active low */
+#define DPI_ACT_LOW_DATA_ENABLE 4 /* data enable signal is active low */
+#define DPI_ACT_ON_FALLING_EDGE 8 /* drive data on the falling edge of the
+ * pixel clock
+ */
+
+struct mcde_port {
+ enum mcde_port_type type;
+ enum mcde_port_mode mode;
+ enum mcde_port_pix_fmt pixel_format;
+ u8 ifc;
+ u8 link;
+ enum mcde_sync_src sync_src;
+ bool update_auto_trig;
+ enum mcde_hdmi_sdtv_switch hdmi_sdtv_switch;
+ union {
+ struct {
+ u8 virt_id;
+ u8 num_data_lanes;
+ u8 ui;
+ bool clk_cont;
+
+ /* DSI data lanes are swapped if true */
+ bool data_lanes_swap;
+ } dsi;
+ struct {
+ u8 bus_width;
+ bool tv_mode;
+ u16 clock_div; /* use 0 or 1 for no clock divider */
+ u32 polarity; /* see DPI_ACT_LOW_* definitions */
+ } dpi;
+ } phy;
+};
+
+/* Overlay pixel formats (input) *//* REVIEW: Define byte order */
+enum mcde_ovly_pix_fmt {
+ MCDE_OVLYPIXFMT_RGB565 = 1,
+ MCDE_OVLYPIXFMT_RGBA5551 = 2,
+ MCDE_OVLYPIXFMT_RGBA4444 = 3,
+ MCDE_OVLYPIXFMT_RGB888 = 4,
+ MCDE_OVLYPIXFMT_RGBX8888 = 5,
+ MCDE_OVLYPIXFMT_RGBA8888 = 6,
+ MCDE_OVLYPIXFMT_YCbCr422 = 7,
+};
+
+/* Display power modes */
+enum mcde_display_power_mode {
+ MCDE_DISPLAY_PM_OFF = 0, /* Power off */
+ MCDE_DISPLAY_PM_STANDBY = 1, /* DCS sleep mode */
+ MCDE_DISPLAY_PM_ON = 2, /* DCS normal mode, display on */
+};
+
+/* Display rotation */
+enum mcde_display_rotation {
+ MCDE_DISPLAY_ROT_0 = 0,
+ MCDE_DISPLAY_ROT_90_CCW = 90,
+ MCDE_DISPLAY_ROT_180_CCW = 180,
+ MCDE_DISPLAY_ROT_270_CCW = 270,
+ MCDE_DISPLAY_ROT_90_CW = MCDE_DISPLAY_ROT_270_CCW,
+ MCDE_DISPLAY_ROT_180_CW = MCDE_DISPLAY_ROT_180_CCW,
+ MCDE_DISPLAY_ROT_270_CW = MCDE_DISPLAY_ROT_90_CCW,
+};
+
+/* REVIEW: Verify */
+#define MCDE_MIN_WIDTH 16
+#define MCDE_MIN_HEIGHT 16
+#define MCDE_MAX_WIDTH 2048
+#define MCDE_MAX_HEIGHT 2048
+#define MCDE_BUF_START_ALIGMENT 8
+#define MCDE_BUF_LINE_ALIGMENT 8
+
+#define MCDE_FIFO_AB_SIZE 640
+#define MCDE_FIFO_C0C1_SIZE 160
+
+#define MCDE_INPUT_FIFO_SIZE_4_0_4 80
+#define MCDE_INPUT_FIFO_SIZE_3_0_8 128
+
+/* Tv-out defines */
+#define MCDE_CONFIG_TVOUT_HBORDER 2
+#define MCDE_CONFIG_TVOUT_VBORDER 2
+#define MCDE_CONFIG_TVOUT_BACKGROUND_LUMINANCE 0x83
+#define MCDE_CONFIG_TVOUT_BACKGROUND_CHROMINANCE_CB 0x9C
+#define MCDE_CONFIG_TVOUT_BACKGROUND_CHROMINANCE_CR 0x2C
+
+/* In seconds */
+#define MCDE_AUTO_SYNC_WATCHDOG 5
+
+/* Hardware versions */
+#define MCDE_CHIP_VERSION_4_0_4 4 /* U5500 V2 */
+#define MCDE_CHIP_VERSION_1_0_4 3 /* U5500 V1 */
+#define MCDE_CHIP_VERSION_3_0_8 2 /* U8500 V2 */
+#define MCDE_CHIP_VERSION_3_0_5 1 /* U8500 V1 */
+#define MCDE_CHIP_VERSION_3 0
+
+/* DSI modes */
+#define DSI_VIDEO_MODE 0
+#define DSI_CMD_MODE 1
+
+/* Video mode descriptor */
+struct mcde_video_mode {
+ u32 xres;
+ u32 yres;
+ u32 pixclock; /* pixel clock in ps (pico seconds) */
+ u32 hbp; /* horizontal back porch: left margin (excl. hsync) */
+ u32 hfp; /* horizontal front porch: right margin (excl. hsync) */
+ u32 hsw; /* horizontal sync width */
+ u32 vbp; /* vertical back porch: upper margin (excl. vsync) */
+ u32 vfp; /* vertical front porch: lower margin (excl. vsync) */
+ u32 vsw; /* vertical sync width*/
+ bool interlaced;
+ bool force_update; /* when switching between hdmi and sdtv */
+};
+
+struct mcde_rectangle {
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+};
+
+struct mcde_overlay_info {
+ u32 paddr;
+ u32 *vaddr;
+ u16 stride; /* buffer line len in bytes */
+ enum mcde_ovly_pix_fmt fmt;
+
+ u16 src_x;
+ u16 src_y;
+ u16 dst_x;
+ u16 dst_y;
+ u16 dst_z;
+ u16 w;
+ u16 h;
+ struct mcde_rectangle dirty;
+};
+
+struct mcde_overlay {
+ struct kobject kobj;
+ struct list_head list; /* mcde_display_device.ovlys */
+
+ struct mcde_display_device *ddev;
+ struct mcde_overlay_info info;
+ struct mcde_ovly_state *state;
+};
+
+/*
+ * Three functions for mapping 8 bits colour channels on 12 bits colour
+ * channels. The colour channels (ch0, ch1, ch2) can represent (r, g, b) or
+ * (Y, Cb, Cr) respectively.
+ */
+struct mcde_palette_table {
+ u16 (*map_col_ch0)(u8);
+ u16 (*map_col_ch1)(u8);
+ u16 (*map_col_ch2)(u8);
+};
+
+struct mcde_chnl_state;
+
+struct mcde_chnl_state *mcde_chnl_get(enum mcde_chnl chnl_id,
+ enum mcde_fifo fifo, const struct mcde_port *port);
+int mcde_chnl_set_pixel_format(struct mcde_chnl_state *chnl,
+ enum mcde_port_pix_fmt pix_fmt);
+int mcde_chnl_set_palette(struct mcde_chnl_state *chnl,
+ struct mcde_palette_table *palette);
+void mcde_chnl_set_col_convert(struct mcde_chnl_state *chnl,
+ struct mcde_col_transform *transform,
+ enum mcde_col_convert convert);
+int mcde_chnl_set_video_mode(struct mcde_chnl_state *chnl,
+ struct mcde_video_mode *vmode);
+/* TODO: Remove rotbuf* parameters when ESRAM allocator is implemented*/
+int mcde_chnl_set_rotation(struct mcde_chnl_state *chnl,
+ enum mcde_display_rotation rotation, u32 rotbuf1, u32 rotbuf2);
+int mcde_chnl_enable_synchronized_update(struct mcde_chnl_state *chnl,
+ bool enable);
+int mcde_chnl_set_power_mode(struct mcde_chnl_state *chnl,
+ enum mcde_display_power_mode power_mode);
+
+int mcde_chnl_apply(struct mcde_chnl_state *chnl);
+int mcde_chnl_update(struct mcde_chnl_state *chnl,
+ struct mcde_rectangle *update_area,
+ bool tripple_buffer);
+void mcde_chnl_put(struct mcde_chnl_state *chnl);
+
+void mcde_chnl_stop_flow(struct mcde_chnl_state *chnl);
+
+void mcde_chnl_enable(struct mcde_chnl_state *chnl);
+void mcde_chnl_disable(struct mcde_chnl_state *chnl);
+
+/* MCDE overlay */
+struct mcde_ovly_state;
+
+struct mcde_ovly_state *mcde_ovly_get(struct mcde_chnl_state *chnl);
+void mcde_ovly_set_source_buf(struct mcde_ovly_state *ovly,
+ u32 paddr);
+void mcde_ovly_set_source_info(struct mcde_ovly_state *ovly,
+ u32 stride, enum mcde_ovly_pix_fmt pix_fmt);
+void mcde_ovly_set_source_area(struct mcde_ovly_state *ovly,
+ u16 x, u16 y, u16 w, u16 h);
+void mcde_ovly_set_dest_pos(struct mcde_ovly_state *ovly,
+ u16 x, u16 y, u8 z);
+void mcde_ovly_apply(struct mcde_ovly_state *ovly);
+void mcde_ovly_put(struct mcde_ovly_state *ovly);
+
+/* MCDE dsi */
+
+#define DCS_CMD_ENTER_IDLE_MODE 0x39
+#define DCS_CMD_ENTER_INVERT_MODE 0x21
+#define DCS_CMD_ENTER_NORMAL_MODE 0x13
+#define DCS_CMD_ENTER_PARTIAL_MODE 0x12
+#define DCS_CMD_ENTER_SLEEP_MODE 0x10
+#define DCS_CMD_EXIT_IDLE_MODE 0x38
+#define DCS_CMD_EXIT_INVERT_MODE 0x20
+#define DCS_CMD_EXIT_SLEEP_MODE 0x11
+#define DCS_CMD_GET_ADDRESS_MODE 0x0B
+#define DCS_CMD_GET_BLUE_CHANNEL 0x08
+#define DCS_CMD_GET_DIAGNOSTIC_RESULT 0x0F
+#define DCS_CMD_GET_DISPLAY_MODE 0x0D
+#define DCS_CMD_GET_GREEN_CHANNEL 0x07
+#define DCS_CMD_GET_PIXEL_FORMAT 0x0C
+#define DCS_CMD_GET_POWER_MODE 0x0A
+#define DCS_CMD_GET_RED_CHANNEL 0x06
+#define DCS_CMD_GET_SCANLINE 0x45
+#define DCS_CMD_GET_SIGNAL_MODE 0x0E
+#define DCS_CMD_NOP 0x00
+#define DCS_CMD_READ_DDB_CONTINUE 0xA8
+#define DCS_CMD_READ_DDB_START 0xA1
+#define DCS_CMD_READ_MEMORY_CONTINE 0x3E
+#define DCS_CMD_READ_MEMORY_START 0x2E
+#define DCS_CMD_SET_ADDRESS_MODE 0x36
+#define DCS_CMD_SET_COLUMN_ADDRESS 0x2A
+#define DCS_CMD_SET_DISPLAY_OFF 0x28
+#define DCS_CMD_SET_DISPLAY_ON 0x29
+#define DCS_CMD_SET_GAMMA_CURVE 0x26
+#define DCS_CMD_SET_PAGE_ADDRESS 0x2B
+#define DCS_CMD_SET_PARTIAL_AREA 0x30
+#define DCS_CMD_SET_PIXEL_FORMAT 0x3A
+#define DCS_CMD_SET_SCROLL_AREA 0x33
+#define DCS_CMD_SET_SCROLL_START 0x37
+#define DCS_CMD_SET_TEAR_OFF 0x34
+#define DCS_CMD_SET_TEAR_ON 0x35
+#define DCS_CMD_SET_TEAR_SCANLINE 0x44
+#define DCS_CMD_SOFT_RESET 0x01
+#define DCS_CMD_WRITE_LUT 0x2D
+#define DCS_CMD_WRITE_CONTINUE 0x3C
+#define DCS_CMD_WRITE_START 0x2C
+
+#define MCDE_MAX_DCS_READ 4
+#define MCDE_MAX_DSI_DIRECT_CMD_WRITE 15
+
+int mcde_dsi_generic_write(struct mcde_chnl_state *chnl, u8* para, int len);
+int mcde_dsi_dcs_write(struct mcde_chnl_state *chnl,
+ u8 cmd, u8 *data, int len);
+int mcde_dsi_dcs_read(struct mcde_chnl_state *chnl,
+ u8 cmd, u32 *data, int *len);
+int mcde_dsi_set_max_pkt_size(struct mcde_chnl_state *chnl);
+
+/* MCDE */
+
+/* Driver data */
+#define MCDE_IRQ "MCDE IRQ"
+#define MCDE_IO_AREA "MCDE I/O Area"
+
+struct mcde_platform_data {
+ /* DSI */
+ int num_dsilinks;
+
+ /* DPI */
+ u8 outmux[5]; /* MCDE_CONF0.OUTMUXx */
+ u8 syncmux; /* MCDE_CONF0.SYNCMUXx */
+
+ const char *regulator_vana_id;
+ const char *regulator_mcde_epod_id;
+ const char *regulator_esram_epod_id;
+ int num_channels;
+ int num_overlays;
+ const char *clock_dsi_id;
+ const char *clock_dsi_lp_id;
+ const char *clock_dpi_id;
+ const char *clock_mcde_id;
+
+ int (*platform_set_clocks)(void);
+ int (*platform_enable_dsipll)(void);
+ int (*platform_disable_dsipll)(void);
+};
+
+int mcde_init(void);
+void mcde_exit(void);
+
+#endif /* __MCDE__H__ */
diff --git a/include/video/mcde_display-ab8500.h b/include/video/mcde_display-ab8500.h
new file mode 100644
index 00000000000..ffebe62af92
--- /dev/null
+++ b/include/video/mcde_display-ab8500.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * AB8500 tvout driver interface
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __DISPLAY_AB8500__H__
+#define __DISPLAY_AB8500__H__
+
+#include <video/mcde.h>
+
+struct ab8500_display_platform_data {
+ /* Platform info */
+ struct mcde_col_transform *rgb_2_yCbCr_transform;
+ int nr_regulators;
+ const char *regulator_id[];
+};
+
+#endif /* __DISPLAY_AB8500__H__*/
+
diff --git a/include/video/mcde_display-av8100.h b/include/video/mcde_display-av8100.h
new file mode 100644
index 00000000000..52578a675f1
--- /dev/null
+++ b/include/video/mcde_display-av8100.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE HDMI display driver
+ *
+ * Author: Per Persson <per-xb-persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __DISPLAY_AV8100__H__
+#define __DISPLAY_AV8100__H__
+
+#include <linux/regulator/consumer.h>
+
+#include "mcde_display.h"
+
+#define GPIO_AV8100_RSTN 196
+#define NATIVE_XRES_HDMI 1280
+#define NATIVE_YRES_HDMI 720
+#define NATIVE_XRES_SDTV 720
+#define NATIVE_YRES_SDTV 576
+#define DISPONOFF_SIZE 6
+#define TIMING_SIZE 2
+#define STAYALIVE_SIZE 1
+
+struct mcde_display_hdmi_platform_data {
+ /* Platform info */
+ int reset_gpio;
+ bool reset_high;
+ const char *regulator_id;
+ const char *cvbs_regulator_id;
+ int reset_delay; /* ms */
+ u32 ddb_id;
+ struct mcde_col_transform *rgb_2_yCbCr_transform;
+
+ /* Driver data */ /* TODO: move to driver data instead */
+ bool hdmi_platform_enable;
+ struct regulator *regulator;
+};
+
+struct display_driver_data {
+ struct regulator *cvbs_regulator;
+ bool cvbs_regulator_enabled;
+ bool update_port_pixel_format;
+ char *fbdevname;
+ struct mcde_video_mode *video_mode;
+};
+
+void hdmi_fb_onoff(struct mcde_display_device *ddev, bool enable,
+ u8 cea, u8 vesa_cea_nr);
+
+#endif /* __DISPLAY_AV8100__H__ */
diff --git a/include/video/mcde_display-generic_dsi.h b/include/video/mcde_display-generic_dsi.h
new file mode 100644
index 00000000000..87ef6baf67a
--- /dev/null
+++ b/include/video/mcde_display-generic_dsi.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE generic DCS display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __MCDE_DISPLAY_GENERIC__H__
+#define __MCDE_DISPLAY_GENERIC__H__
+
+#include <linux/regulator/consumer.h>
+
+#include "mcde_display.h"
+
+struct mcde_display_generic_platform_data {
+ /* Platform info */
+ int reset_gpio;
+ bool reset_high;
+ const char *regulator_id;
+ int reset_delay; /* ms */
+ int sleep_out_delay; /* ms */
+ u32 ddb_id;
+
+ /* Driver data */
+ bool generic_platform_enable;
+ struct regulator *regulator;
+ int max_supply_voltage;
+ int min_supply_voltage;
+};
+
+#endif /* __MCDE_DISPLAY_GENERIC__H__ */
+
diff --git a/include/video/mcde_display-sony_acx424akp_dsi.h b/include/video/mcde_display-sony_acx424akp_dsi.h
new file mode 100644
index 00000000000..9f656fdcc3b
--- /dev/null
+++ b/include/video/mcde_display-sony_acx424akp_dsi.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE Sony acx424akp DCS display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __MCDE_DISPLAY_SONY_ACX424AKP__H__
+#define __MCDE_DISPLAY_SONY_ACX424AKP__H__
+
+enum display_panel_type {
+ DISPLAY_NONE = 0,
+ DISPLAY_SONY_ACX424AKP = 0x1b81,
+};
+
+struct mcde_display_sony_acx424akp_platform_data {
+ /* Platform info */
+ int reset_gpio;
+ enum display_panel_type disp_panel; /* display panel types */
+};
+
+#endif /* __MCDE_DISPLAY_SONY_ACX424AKP__H__ */
+
diff --git a/include/video/mcde_display-vuib500-dpi.h b/include/video/mcde_display-vuib500-dpi.h
new file mode 100644
index 00000000000..94bad83bf97
--- /dev/null
+++ b/include/video/mcde_display-vuib500-dpi.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE DPI display driver
+ *
+ * Author: Torbjorn Svensson <torbjorn.x.svensson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __MCDE_DISPLAY_DPI__H__
+#define __MCDE_DISPLAY_DPI__H__
+
+#include <linux/regulator/consumer.h>
+
+#include "mcde_display.h"
+
+struct mcde_display_dpi_platform_data {
+ /* Platform info */
+ int reset_gpio;
+ bool reset_high;
+ const char *regulator_id;
+ int reset_delay;
+
+ /* Driver data */
+ struct regulator *regulator;
+ int max_supply_voltage;
+ int min_supply_voltage;
+};
+#endif /* __MCDE_DISPLAY_DPI__H__ */
diff --git a/include/video/mcde_display.h b/include/video/mcde_display.h
new file mode 100644
index 00000000000..97326cbc28c
--- /dev/null
+++ b/include/video/mcde_display.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __MCDE_DISPLAY__H__
+#define __MCDE_DISPLAY__H__
+
+#include <linux/device.h>
+#include <linux/pm.h>
+
+#include <video/mcde.h>
+
+#define UPDATE_FLAG_PIXEL_FORMAT 0x1
+#define UPDATE_FLAG_VIDEO_MODE 0x2
+#define UPDATE_FLAG_ROTATION 0x4
+
+struct mcde_display_dsi_platform_data {
+ int reset_gpio;
+ int link;
+};
+
+#define to_mcde_display_device(__dev) \
+ container_of((__dev), struct mcde_display_device, dev)
+
+struct mcde_display_device {
+ /* MCDE driver static */
+ struct device dev;
+ const char *name;
+ int id;
+ struct mcde_port *port;
+ struct fb_info *fbi;
+ bool fictive;
+
+ /* MCDE dss driver internal */
+ bool initialized;
+ enum mcde_chnl chnl_id;
+ enum mcde_fifo fifo;
+ bool first_update;
+ struct mutex display_lock;
+
+ bool enabled;
+ struct mcde_chnl_state *chnl_state;
+ struct list_head ovlys;
+ struct mcde_rectangle update_area;
+ /* TODO: Remove once ESRAM allocator is done */
+ u32 rotbuf1;
+ u32 rotbuf2;
+
+ /* Display driver internal */
+ u16 native_x_res;
+ u16 native_y_res;
+ u16 physical_width;
+ u16 physical_height;
+ enum mcde_display_power_mode power_mode;
+ enum mcde_ovly_pix_fmt default_pixel_format;
+ enum mcde_ovly_pix_fmt pixel_format;
+ enum mcde_display_rotation rotation;
+ bool synchronized_update;
+ struct mcde_video_mode video_mode;
+ int update_flags;
+ bool stay_alive;
+ int check_transparency;
+
+ /* Driver API */
+ void (*get_native_resolution)(struct mcde_display_device *dev,
+ u16 *x_res, u16 *y_res);
+ enum mcde_ovly_pix_fmt (*get_default_pixel_format)(
+ struct mcde_display_device *dev);
+ void (*get_physical_size)(struct mcde_display_device *dev,
+ u16 *x_size, u16 *y_size);
+
+ int (*set_power_mode)(struct mcde_display_device *dev,
+ enum mcde_display_power_mode power_mode);
+ enum mcde_display_power_mode (*get_power_mode)(
+ struct mcde_display_device *dev);
+
+ int (*try_video_mode)(struct mcde_display_device *dev,
+ struct mcde_video_mode *video_mode);
+ int (*set_video_mode)(struct mcde_display_device *dev,
+ struct mcde_video_mode *video_mode);
+ void (*get_video_mode)(struct mcde_display_device *dev,
+ struct mcde_video_mode *video_mode);
+ int (*set_pixel_format)(struct mcde_display_device *dev,
+ enum mcde_ovly_pix_fmt pix_fmt);
+ enum mcde_ovly_pix_fmt (*get_pixel_format)(
+ struct mcde_display_device *dev);
+ enum mcde_port_pix_fmt (*get_port_pixel_format)(
+ struct mcde_display_device *dev);
+
+ int (*set_rotation)(struct mcde_display_device *dev,
+ enum mcde_display_rotation rotation);
+ enum mcde_display_rotation (*get_rotation)(
+ struct mcde_display_device *dev);
+
+ int (*set_synchronized_update)(struct mcde_display_device *dev,
+ bool enable);
+ bool (*get_synchronized_update)(struct mcde_display_device *dev);
+
+ int (*apply_config)(struct mcde_display_device *dev);
+ int (*invalidate_area)(struct mcde_display_device *dev,
+ struct mcde_rectangle *area);
+ int (*update)(struct mcde_display_device *dev, bool tripple_buffer);
+ int (*on_first_update)(struct mcde_display_device *dev);
+ int (*platform_enable)(struct mcde_display_device *dev);
+ int (*platform_disable)(struct mcde_display_device *dev);
+ int (*ceanr_convert)(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr, int buffering,
+ u16 *w, u16 *h, u16 *vw, u16 *vh);
+};
+
+struct mcde_display_driver {
+ int (*probe)(struct mcde_display_device *dev);
+ int (*remove)(struct mcde_display_device *dev);
+ void (*shutdown)(struct mcde_display_device *dev);
+ int (*suspend)(struct mcde_display_device *dev,
+ pm_message_t state);
+ int (*resume)(struct mcde_display_device *dev);
+
+ struct device_driver driver;
+};
+
+/* MCDE dsi (Used by MCDE display drivers) */
+
+int mcde_display_dsi_dcs_write(struct mcde_display_device *dev,
+ u8 cmd, u8 *data, int len);
+int mcde_display_dsi_dcs_read(struct mcde_display_device *dev,
+ u8 cmd, u8 *data, int *len);
+int mcde_display_dsi_bta_sync(struct mcde_display_device *dev);
+
+/* MCDE display bus */
+
+int mcde_display_driver_register(struct mcde_display_driver *drv);
+void mcde_display_driver_unregister(struct mcde_display_driver *drv);
+int mcde_display_device_register(struct mcde_display_device *dev);
+void mcde_display_device_unregister(struct mcde_display_device *dev);
+
+void mcde_display_init_device(struct mcde_display_device *dev);
+
+int mcde_display_init(void);
+void mcde_display_exit(void);
+
+#endif /* __MCDE_DISPLAY__H__ */
+
diff --git a/include/video/mcde_dss.h b/include/video/mcde_dss.h
new file mode 100644
index 00000000000..efed79ad023
--- /dev/null
+++ b/include/video/mcde_dss.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE display sub system driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __MCDE_DSS__H__
+#define __MCDE_DSS__H__
+
+#include <linux/kobject.h>
+#include <linux/notifier.h>
+
+#include "mcde.h"
+#include "mcde_display.h"
+
+/* Public MCDE dss (Used by MCDE fb ioctl & MCDE display sysfs) */
+int mcde_dss_open_channel(struct mcde_display_device *ddev);
+void mcde_dss_close_channel(struct mcde_display_device *ddev);
+int mcde_dss_enable_display(struct mcde_display_device *ddev);
+void mcde_dss_disable_display(struct mcde_display_device *ddev);
+int mcde_dss_apply_channel(struct mcde_display_device *ddev);
+struct mcde_overlay *mcde_dss_create_overlay(struct mcde_display_device *ddev,
+ struct mcde_overlay_info *info);
+void mcde_dss_destroy_overlay(struct mcde_overlay *ovl);
+int mcde_dss_enable_overlay(struct mcde_overlay *ovl);
+void mcde_dss_disable_overlay(struct mcde_overlay *ovl);
+int mcde_dss_apply_overlay(struct mcde_overlay *ovl,
+ struct mcde_overlay_info *info);
+void mcde_dss_get_overlay_info(struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info);
+int mcde_dss_update_overlay(struct mcde_overlay *ovl, bool tripple_buffer);
+
+void mcde_dss_get_native_resolution(struct mcde_display_device *ddev,
+ u16 *x_res, u16 *y_res);
+enum mcde_ovl_pix_fmt mcde_dss_get_default_color_format(
+ struct mcde_display_device *ddev);
+void mcde_dss_get_physical_size(struct mcde_display_device *ddev,
+ u16 *x_size, u16 *y_size); /* mm */
+
+int mcde_dss_try_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+int mcde_dss_set_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+void mcde_dss_get_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+
+int mcde_dss_set_pixel_format(struct mcde_display_device *ddev,
+ enum mcde_ovly_pix_fmt pix_fmt);
+int mcde_dss_get_pixel_format(struct mcde_display_device *ddev);
+
+int mcde_dss_set_rotation(struct mcde_display_device *ddev,
+ enum mcde_display_rotation rotation);
+enum mcde_display_rotation mcde_dss_get_rotation(
+ struct mcde_display_device *ddev);
+
+int mcde_dss_set_synchronized_update(struct mcde_display_device *ddev,
+ bool enable);
+bool mcde_dss_get_synchronized_update(struct mcde_display_device *ddev);
+
+/* MCDE dss events */
+
+/* A display device and driver has been loaded, probed and bound */
+#define MCDE_DSS_EVENT_DISPLAY_REGISTERED 1
+/* A display device has been removed */
+#define MCDE_DSS_EVENT_DISPLAY_UNREGISTERED 2
+
+/* Note! Notifier callback will be called holding the dev sem */
+int mcde_dss_register_notifier(struct notifier_block *nb);
+int mcde_dss_unregister_notifier(struct notifier_block *nb);
+
+/* MCDE dss driver */
+
+int mcde_dss_init(void);
+void mcde_dss_exit(void);
+
+#endif /* __MCDE_DSS__H__ */
+
diff --git a/include/video/mcde_fb.h b/include/video/mcde_fb.h
new file mode 100644
index 00000000000..17556414aa0
--- /dev/null
+++ b/include/video/mcde_fb.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE display sub system frame buffer driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __MCDE_FB__H__
+#define __MCDE_FB__H__
+
+#include <linux/fb.h>
+#include <linux/ioctl.h>
+#if !defined(__KERNEL__) && !defined(_KERNEL)
+#include <stdint.h>
+#else
+#include <linux/types.h>
+#include <linux/hwmem.h>
+#endif
+
+#ifdef __KERNEL__
+#include "mcde_dss.h"
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+#endif
+
+#define MCDE_GET_BUFFER_NAME_IOC _IO('M', 1)
+
+#ifdef __KERNEL__
+#define to_mcde_fb(x) ((struct mcde_fb *)(x)->par)
+
+#define MCDE_FB_MAX_NUM_OVERLAYS 3
+
+struct mcde_fb {
+ int num_ovlys;
+ struct mcde_overlay *ovlys[MCDE_FB_MAX_NUM_OVERLAYS];
+ u32 pseudo_palette[17];
+ enum mcde_ovly_pix_fmt pix_fmt;
+ int id;
+ struct hwmem_alloc *alloc;
+ int alloc_name;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+};
+
+/* MCDE fbdev API */
+struct fb_info *mcde_fb_create(struct mcde_display_device *ddev,
+ uint16_t w, uint16_t h, uint16_t vw, uint16_t vh,
+ enum mcde_ovly_pix_fmt pix_fmt, uint32_t rotate);
+
+int mcde_fb_attach_overlay(struct fb_info *fb_info,
+ struct mcde_overlay *ovl);
+void mcde_fb_destroy(struct mcde_display_device *ddev);
+
+/* MCDE fb driver */
+int mcde_fb_init(void);
+void mcde_fb_exit(void);
+#endif
+
+#endif /* __MCDE_FB__H__ */
+
diff --git a/init/Kconfig b/init/Kconfig
index 43298f9810f..9162a7607e8 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1285,6 +1285,15 @@ config PROFILING
Say Y here to enable the extended profiling support mechanisms used
by profilers such as OProfile.
+config BOOTTIME
+ bool "Boot time measurments"
+ default n
+ help
+ Adds sysfs entries (boottime/) with start-up timing information.
+ If CONFIG_DEBUG_FS is enabled, detailed information about the
+ boot time, including system load during boot can be extraced.
+ This information can be visualised with help of the bootgraph script.
+
#
# Place an empty function call at each tracepoint site. Can be
# dynamically changed for a probe function.
diff --git a/init/Makefile b/init/Makefile
index 0bf677aa087..6b77be3855f 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -9,6 +9,7 @@ else
obj-$(CONFIG_BLK_DEV_INITRD) += initramfs.o
endif
obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o
+obj-$(CONFIG_BOOTTIME) += boottime.o
mounts-y := do_mounts.o
mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o
diff --git a/init/boottime.c b/init/boottime.c
new file mode 100644
index 00000000000..be73e0ee550
--- /dev/null
+++ b/init/boottime.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2009-2010
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * boottime is a tool for collecting start-up timing
+ * information and can together with boot loader support
+ * display a total system start-up time.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/spinlock.h>
+#include <linux/boottime.h>
+#include <linux/kernel_stat.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+
+/*
+ * BOOTTIME_MAX_NAME_LEN is defined in arch/arm/include/asm/setup.h to 64.
+ * No crisis if they don't match.
+ */
+#ifndef BOOTTIME_MAX_NAME_LEN
+#define BOOTTIME_MAX_NAME_LEN 64
+#endif
+
+/*
+ * We have a few static entries, since it is good to have measure points
+ * before the system is up and running properly
+ */
+#define NUM_STATIC_BOOTTIME_ENTRIES 16
+
+struct boottime_list {
+ struct list_head list;
+ char name[BOOTTIME_MAX_NAME_LEN];
+ /* Time in us since power on, possible including boot loader. */
+ unsigned long time;
+ bool cpu_load;
+ struct cpu_usage_stat cpu_usage[NR_CPUS];
+};
+
+enum boottime_filter_type {
+ BOOTTIME_FILTER_OUT_ZERO,
+ BOOTTIME_FILTER_OUT_LESS_100,
+ BOOTTIME_FILTER_NOTHING,
+};
+
+enum boottime_symbolic_print {
+ BOOTTIME_SYMBOLIC_PRINT,
+ BOOTTIME_NORMAL_PRINT,
+};
+
+enum boottime_cpu_load {
+ BOOTTIME_CPU_LOAD,
+ BOOTTIME_NO_CPU_LOAD,
+};
+
+static LIST_HEAD(boottime_list);
+static __initdata DEFINE_SPINLOCK(boottime_list_lock);
+static __initdata struct boottime_timer boottime_timer;
+static __initdata int num_const_boottime_list;
+static struct boottime_list const_boottime_list[NUM_STATIC_BOOTTIME_ENTRIES];
+static unsigned long time_kernel_done;
+static unsigned long time_bootloader_done;
+static __initdata bool system_up;
+static bool boottime_done;
+
+int __attribute__((weak)) boottime_arch_startup(void)
+{
+ return 0;
+}
+
+int __attribute__((weak)) boottime_bootloader_idle(void)
+{
+ return 0;
+}
+
+static void __init boottime_mark_core(char *name,
+ unsigned long time,
+ enum boottime_symbolic_print symbolic,
+ enum boottime_cpu_load cpu_load)
+{
+ struct boottime_list *b;
+ unsigned long flags = 0;
+ int i;
+
+ if (system_up) {
+ b = kmalloc(sizeof(struct boottime_list), GFP_KERNEL);
+ if (!b) {
+ printk(KERN_ERR
+ "boottime: failed to allocate memory!\n");
+ return;
+ }
+
+ } else {
+ if (num_const_boottime_list < NUM_STATIC_BOOTTIME_ENTRIES) {
+ b = &const_boottime_list[num_const_boottime_list];
+ num_const_boottime_list++;
+ } else {
+ printk(KERN_ERR
+ "boottime: too many early measure points!\n");
+ return;
+ }
+ }
+
+ INIT_LIST_HEAD(&b->list);
+
+ if (symbolic == BOOTTIME_SYMBOLIC_PRINT)
+ snprintf(b->name, BOOTTIME_MAX_NAME_LEN, "%pF", name);
+ else
+ strncpy(b->name, name, BOOTTIME_MAX_NAME_LEN);
+
+ b->name[BOOTTIME_MAX_NAME_LEN - 1] = '\0';
+ b->time = time;
+ b->cpu_load = cpu_load;
+
+ if (cpu_load == BOOTTIME_CPU_LOAD && system_up)
+ for_each_possible_cpu(i) {
+ b->cpu_usage[i].system = kstat_cpu(i).cpustat.system;
+ b->cpu_usage[i].idle = kstat_cpu(i).cpustat.idle;
+ b->cpu_usage[i].iowait = kstat_cpu(i).cpustat.iowait;
+ b->cpu_usage[i].irq = kstat_cpu(i).cpustat.irq;
+ /*
+ * TODO: Make sure that user, nice, softirq, steal
+ * and guest are not used during boot
+ */
+ }
+ else
+ b->cpu_load = BOOTTIME_NO_CPU_LOAD;
+
+ if (system_up) {
+ spin_lock_irqsave(&boottime_list_lock, flags);
+ list_add(&b->list, &boottime_list);
+ spin_unlock_irqrestore(&boottime_list_lock, flags);
+ } else {
+ list_add(&b->list, &boottime_list);
+ }
+}
+
+void __init boottime_mark_wtime(char *name, unsigned long time)
+{
+ boottime_mark_core(name, time,
+ BOOTTIME_NORMAL_PRINT,
+ BOOTTIME_NO_CPU_LOAD);
+}
+
+void __ref boottime_mark_symbolic(void *name)
+{
+
+ if (boottime_done)
+ return;
+
+ if (boottime_timer.get_time)
+ boottime_mark_core((char *) name,
+ boottime_timer.get_time(),
+ BOOTTIME_SYMBOLIC_PRINT,
+ BOOTTIME_CPU_LOAD);
+}
+
+void __init boottime_mark(char *name)
+{
+ if (boottime_timer.get_time)
+ boottime_mark_core(name,
+ boottime_timer.get_time(),
+ BOOTTIME_NORMAL_PRINT,
+ BOOTTIME_CPU_LOAD);
+}
+
+void __init boottime_activate(struct boottime_timer *bt)
+{
+ struct boottime_list *b;
+ int res = 0;
+ unsigned long flags;
+
+ if (bt == NULL) {
+ printk(KERN_ERR
+ "boottime: error: bad configured\n");
+ return;
+ }
+
+ if (bt->get_time == NULL) {
+ printk(KERN_ERR
+ "boottime: error: you must provide a get_time() function\n");
+ return;
+ }
+ memcpy(&boottime_timer, bt, sizeof(struct boottime_timer));
+
+ if (boottime_timer.init)
+ res = boottime_timer.init();
+
+ if (res) {
+ printk(KERN_ERR "boottime: initialization failed\n");
+ return;
+ }
+
+ if (boottime_arch_startup())
+ printk(KERN_ERR
+ "boottime: arch specfic initialization failed\n");
+
+ spin_lock_irqsave(&boottime_list_lock, flags);
+
+ if (!list_empty(&boottime_list)) {
+
+ b = list_first_entry(&boottime_list, struct boottime_list,
+ list);
+ if (b)
+ time_bootloader_done = b->time;
+ }
+
+ spin_unlock_irqrestore(&boottime_list_lock, flags);
+}
+
+void __init boottime_system_up(void)
+{
+ system_up = true;
+}
+
+void __init boottime_deactivate(void)
+{
+ struct boottime_list *b;
+ unsigned long flags;
+
+ boottime_mark("execute_init+0x0/0x0");
+
+ boottime_done = true;
+
+ spin_lock_irqsave(&boottime_list_lock, flags);
+ b = list_first_entry(&boottime_list, struct boottime_list, list);
+ spin_unlock_irqrestore(&boottime_list_lock, flags);
+
+ time_kernel_done = b->time;
+
+ if (boottime_timer.finalize)
+ boottime_timer.finalize();
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void boottime_debugfs_load(struct seq_file *s,
+ struct boottime_list *b,
+ struct boottime_list *p)
+{
+ int i;
+ unsigned long total_p, total_b;
+ unsigned long system_total, idle_total, irq_total, iowait_total;
+ unsigned long system_load, idle_load, irq_load, iowait_load;
+
+ for_each_possible_cpu(i) {
+ total_b = (b->cpu_usage[i].system +
+ b->cpu_usage[i].idle +
+ b->cpu_usage[i].iowait +
+ b->cpu_usage[i].irq);
+
+ total_p = (p->cpu_usage[i].system +
+ p->cpu_usage[i].idle +
+ p->cpu_usage[i].iowait +
+ p->cpu_usage[i].irq);
+
+ if (total_b == total_p)
+ continue;
+
+ system_total = b->cpu_usage[i].system - p->cpu_usage[i].system;
+ idle_total = b->cpu_usage[i].idle - p->cpu_usage[i].idle;
+ irq_total = b->cpu_usage[i].irq - p->cpu_usage[i].irq;
+ iowait_total = b->cpu_usage[i].iowait - p->cpu_usage[i].iowait;
+
+ system_load = (100 * system_total / (total_b - total_p));
+ idle_load = (100 * idle_total / (total_b - total_p));
+ irq_load = (100 * irq_total / (total_b - total_p));
+ iowait_load = (100 * iowait_total / (total_b - total_p));
+
+ seq_printf(s,
+ " cpu%d system: %lu%% idle: %lu%% iowait: %lu%% irq: %lu%%",
+ i,
+ system_load,
+ idle_load,
+ iowait_load,
+ irq_load);
+ }
+ seq_printf(s, "\n");
+}
+
+static void boottime_debugfs_print(struct seq_file *s,
+ struct boottime_list *b,
+ struct boottime_list *p)
+{
+ seq_printf(s, "[%5lu.%06lu] calling %s\n",
+ p->time / 1000000,
+ (p->time % 1000000),
+ p->name);
+ seq_printf(s, "[%5lu.%06lu] initcall %s returned 0 after %ld msecs.",
+ b->time / 1000000,
+ (b->time % 1000000),
+ p->name, (b->time - p->time) / 1000);
+
+ if (p->cpu_load == BOOTTIME_NO_CPU_LOAD ||
+ b->cpu_load == BOOTTIME_NO_CPU_LOAD) {
+ seq_printf(s, "\n");
+ return;
+ }
+
+ boottime_debugfs_load(s, b, p);
+}
+
+static int boottime_debugfs_bootgraph_show(struct seq_file *s, void *iter)
+{
+ struct boottime_list *b, *p = NULL, *old_p = NULL;
+ enum boottime_filter_type filter = (int)s->private;
+
+ list_for_each_entry_reverse(b, &boottime_list, list) {
+ if (p) {
+ if (!(filter == BOOTTIME_FILTER_OUT_ZERO &&
+ (b->time - p->time) / 1000 == 0)
+ && !(filter == BOOTTIME_FILTER_OUT_LESS_100 &&
+ (b->time - p->time) < 100 * 1000))
+ boottime_debugfs_print(s, b, p);
+ old_p = p;
+ }
+ p = b;
+ }
+
+ if (filter == BOOTTIME_FILTER_NOTHING && p)
+ boottime_debugfs_print(s, p, p);
+
+ if (p)
+ seq_printf(s, "[%5lu.%06lu] Freeing init memory: 0K\n",
+ p->time / 1000000, p->time % 1000000);
+ return 0;
+}
+
+static int boottime_debugfs_summary_show(struct seq_file *s, void *data)
+{
+ struct boottime_list *b, b_zero;
+
+ if (time_bootloader_done)
+ seq_printf(s, "bootloader: %ld msecs\n",
+ time_bootloader_done / 1000);
+
+ seq_printf(s, "kernel: %ld msecs\ntotal: %ld msecs\n",
+ (time_kernel_done - time_bootloader_done) / 1000,
+ time_kernel_done / 1000);
+ seq_printf(s, "kernel:");
+ b = list_first_entry(&boottime_list,
+ struct boottime_list, list);
+ memset(&b_zero, 0, sizeof(struct boottime_list));
+ boottime_debugfs_load(s, b, &b_zero);
+
+ if (time_bootloader_done)
+ seq_printf(s,
+ "bootloader: cpu0 system: %d%% idle: %d%% iowait: 0%% irq: 0%%\n",
+ 100 - boottime_bootloader_idle(),
+ boottime_bootloader_idle());
+ return 0;
+}
+
+static int boottime_debugfs_bootgraph_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file,
+ boottime_debugfs_bootgraph_show,
+ inode->i_private);
+}
+
+static int boottime_debugfs_summary_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file,
+ boottime_debugfs_summary_show,
+ inode->i_private);
+}
+
+static const struct file_operations boottime_debugfs_bootgraph_operations = {
+ .open = boottime_debugfs_bootgraph_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations boottime_debugfs_summary_operations = {
+ .open = boottime_debugfs_summary_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void boottime_debugfs_init(void)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("boottime", NULL);
+
+ (void) debugfs_create_file("bootgraph", S_IFREG | S_IRUGO,
+ dir, (void *)BOOTTIME_FILTER_NOTHING,
+ &boottime_debugfs_bootgraph_operations);
+ (void) debugfs_create_file("bootgraph_all_except0", S_IFREG | S_IRUGO,
+ dir, (void *)BOOTTIME_FILTER_OUT_ZERO,
+ &boottime_debugfs_bootgraph_operations);
+ (void) debugfs_create_file("bootgraph_larger100",
+ S_IFREG | S_IRUGO,
+ dir, (void *)BOOTTIME_FILTER_OUT_LESS_100,
+ &boottime_debugfs_bootgraph_operations);
+ (void) debugfs_create_file("summary", S_IFREG | S_IRUGO,
+ dir, NULL,
+ &boottime_debugfs_summary_operations);
+}
+#else
+#define boottime_debugfs_init(x)
+#endif
+
+static ssize_t show_bootloader(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%ld\n", time_bootloader_done / 1000);
+}
+
+static ssize_t show_kernel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%ld\n",
+ (time_kernel_done - time_bootloader_done) / 1000);
+}
+
+DEVICE_ATTR(kernel, 0444, show_kernel, NULL);
+DEVICE_ATTR(bootloader, 0444, show_bootloader, NULL);
+
+static struct attribute *boottime_sysfs_entries[] = {
+ &dev_attr_kernel.attr,
+ &dev_attr_bootloader.attr,
+ NULL
+};
+
+static struct attribute_group boottime_attr_grp = {
+ .name = NULL,
+ .attrs = boottime_sysfs_entries,
+};
+
+static int __init boottime_init(void)
+{
+ struct kobject *boottime_kobj;
+
+ boottime_kobj = kobject_create_and_add("boottime", NULL);
+ if (!boottime_kobj) {
+ printk(KERN_ERR "boottime: out of memory!\n");
+ return -ENOMEM;
+ }
+
+ if (sysfs_create_group(boottime_kobj, &boottime_attr_grp) < 0) {
+ kobject_put(boottime_kobj);
+ printk(KERN_ERR "boottime: Failed creating sysfs group\n");
+ return -ENOMEM;
+ }
+
+ boottime_debugfs_init();
+
+ return 0;
+}
+
+late_initcall(boottime_init);
diff --git a/init/main.c b/init/main.c
index 217ed23e948..a59775f4557 100644
--- a/init/main.c
+++ b/init/main.c
@@ -68,6 +68,7 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
+#include <linux/boottime.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -682,6 +683,8 @@ int __init_or_module do_one_initcall(initcall_t fn)
int count = preempt_count();
int ret;
+ boottime_mark_symbolic(fn);
+
if (initcall_debug)
ret = do_one_initcall_debug(fn);
else
@@ -758,6 +761,7 @@ static noinline int init_post(void)
{
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
+ boottime_deactivate();
free_initmem();
mark_rodata_ro();
system_state = SYSTEM_RUNNING;
@@ -813,6 +817,7 @@ static int __init kernel_init(void * unused)
do_pre_smp_initcalls();
lockup_detector_init();
+ boottime_system_up();
smp_init();
sched_init_smp();
@@ -835,6 +840,7 @@ static int __init kernel_init(void * unused)
if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
ramdisk_execute_command = NULL;
+ boottime_mark("mount+0x0/0x0");
prepare_namespace();
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f7c543a801d..7b35e039a26 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -262,7 +262,7 @@ void handle_nested_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
- irqreturn_t action_ret;
+ irqreturn_t action_ret = IRQ_NONE;
might_sleep();
@@ -277,7 +277,11 @@ void handle_nested_irq(unsigned int irq)
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock_irq(&desc->lock);
- action_ret = action->thread_fn(action->irq, action->dev_id);
+ do {
+ action_ret |= action->thread_fn(action->irq, action->dev_id);
+ action = action->next;
+ } while (action);
+
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index dc7bc082928..feec0e3689c 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -50,6 +50,8 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
size_t vmcoreinfo_size;
size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
+ATOMIC_NOTIFIER_HEAD(crash_percpu_notifier_list);
+
/* Location of the reserved area for the crash kernel */
struct resource crashk_res = {
.name = "Crash kernel",
@@ -1082,6 +1084,7 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
void crash_kexec(struct pt_regs *regs)
{
+ struct pt_regs fixed_regs;
/* Take the kexec_mutex here to prevent sys_kexec_load
* running on one cpu from replacing the crash kernel
* we are using after a panic on a different cpu.
@@ -1092,7 +1095,6 @@ void crash_kexec(struct pt_regs *regs)
*/
if (mutex_trylock(&kexec_mutex)) {
if (kexec_crash_image) {
- struct pt_regs fixed_regs;
kmsg_dump(KMSG_DUMP_KEXEC);
@@ -1101,6 +1103,14 @@ void crash_kexec(struct pt_regs *regs)
machine_crash_shutdown(&fixed_regs);
machine_kexec(kexec_crash_image);
}
+#ifdef CONFIG_CRASH_SWRESET
+ else {
+ crash_setup_regs(&fixed_regs, regs);
+ crash_save_vmcoreinfo();
+ machine_crash_shutdown(&fixed_regs);
+ machine_crash_swreset();
+ }
+#endif
mutex_unlock(&kexec_mutex);
}
}
diff --git a/kernel/printk.c b/kernel/printk.c
index 7982a0a841e..2cb19092743 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -41,6 +41,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/rculist.h>
+#include <trace/stm.h>
#include <asm/uaccess.h>
@@ -53,6 +54,10 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+#ifdef CONFIG_PRINTK_LL
+extern void printascii(char *);
+#endif
+
/* printk's without a loglevel use this.. */
#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
@@ -876,6 +881,10 @@ asmlinkage int vprintk(const char *fmt, va_list args)
printed_len += vscnprintf(printk_buf + printed_len,
sizeof(printk_buf) - printed_len, fmt, args);
+#ifdef CONFIG_PRINTK_LL
+ printascii(printk_buf);
+#endif
+
p = printk_buf;
/* Read log level and handle special printk prefix */
@@ -897,6 +906,9 @@ asmlinkage int vprintk(const char *fmt, va_list args)
}
}
+ /* Send printk buffer to MIPI STM trace hardware too if enable */
+ stm_dup_printk(printk_buf, printed_len);
+
/*
* Copy the output into log_buf. If the caller didn't provide
* the appropriate log prefix, we insert them here
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f2bd275bb60..8928afea2f4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -37,6 +37,7 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/fs.h>
+#include <trace/stm.h>
#include "trace.h"
#include "trace_output.h"
@@ -909,7 +910,7 @@ void tracing_reset_current_online_cpus(void)
tracing_reset_online_cpus(&global_trace);
}
-#define SAVED_CMDLINES 128
+#define SAVED_CMDLINES 2048
#define NO_CMDLINE_MAP UINT_MAX
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
@@ -1237,6 +1238,8 @@ trace_function(struct trace_array *tr,
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
+
+ stm_ftrace(ip, parent_ip);
}
void
@@ -1331,6 +1334,8 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
+ stm_stack_trace(trace.entries);
+
out:
/* Again, don't let gcc optimize things here */
barrier();
@@ -1501,6 +1506,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
ftrace_trace_stack(buffer, flags, 6, pc);
}
+ stm_trace_bprintk_buf(ip, fmt, trace_buf, sizeof(u32) * len);
+
out_unlock:
arch_spin_unlock(&trace_buf_lock);
local_irq_restore(flags);
@@ -1577,6 +1584,8 @@ int trace_array_vprintk(struct trace_array *tr,
ftrace_trace_stack(buffer, irq_flags, 6, pc);
}
+ stm_trace_printk_buf(ip, trace_buf, len);
+
out_unlock:
arch_spin_unlock(&trace_buf_lock);
raw_local_irq_restore(irq_flags);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c212a7f934e..773cb84adc8 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <asm/setup.h>
+#include <trace/stm.h>
#include "trace_output.h"
@@ -1703,6 +1704,8 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
+ stm_ftrace(ip, parent_ip);
+
out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
preempt_enable_notrace();
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 7e62c0a1845..a136fd86533 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -11,6 +11,7 @@
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <trace/events/sched.h>
+#include <trace/stm.h>
#include "trace.h"
@@ -47,6 +48,10 @@ tracing_sched_switch_trace(struct trace_array *tr,
if (!filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc);
+
+ stm_sched_switch(entry->prev_pid, entry->prev_prio, entry->prev_state,
+ entry->next_pid, entry->next_prio, entry->next_state,
+ entry->next_cpu);
}
static void
@@ -103,6 +108,11 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
+
+ stm_sched_wakeup(entry->prev_pid, entry->prev_prio, entry->prev_state,
+ entry->next_pid, entry->next_prio, entry->next_state,
+ entry->next_cpu);
+
ftrace_trace_stack(tr->buffer, flags, 6, pc);
ftrace_trace_userstack(tr->buffer, flags, pc);
}
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 4d403844e13..06c33adfe7f 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -10,23 +10,37 @@
#
usage() {
- echo "Usage: $0 [--save-scmversion] [srctree]" >&2
+ echo "Usage: $0 [--save-scmversion] [-s srctree] [-t ref_tag]" >&2
exit 1
}
scm_only=false
srctree=.
-if test "$1" = "--save-scmversion"; then
- scm_only=true
- shift
-fi
-if test $# -gt 0; then
- srctree=$1
+match_option=--exact-match
+
+while [ $# -ne 0 ]; do
+ if test "$1" = "--save-scmversion"; then
+ scm_only=true
+ elif test "$1" = "-s"; then
+ shift
+ if test $# -ne 0 -a -d "$1"; then
+ srctree=$1
+ else
+ usage
+ fi
+ elif test "$1" = "-t"; then
+ shift
+ if [ $# -ne 0 ]; then
+ match=" --tags --match "$1
+ rev_refs="--refs refs/tags/"$1
+ else
+ usage
+ fi
+ else
+ usage
+ fi
shift
-fi
-if test $# -gt 0 -o ! -d "$srctree"; then
- usage
-fi
+done
scm_version()
{
@@ -47,8 +61,8 @@ scm_version()
# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
# it, because this version is defined in the top level Makefile.
- if [ -z "`git describe --exact-match 2>/dev/null`" ]; then
-
+ if git name-rev --tags $rev_refs HEAD | \
+ grep -E '^HEAD[[:space:]]+(.*~[0-9]*|undefined)$' > /dev/null; then
# If only the short version is requested, don't bother
# running further git commands
if $short; then
@@ -57,7 +71,7 @@ scm_version()
fi
# If we are past a tagged commit (like
# "v2.6.30-rc5-302-g72357d5"), we pretty print it.
- if atag="`git describe 2>/dev/null`"; then
+ if atag="`git describe $match 2>/dev/null`"; then
echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'
# If we don't have a tag at all we print -g{commitish}.