aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKen Liu <ken.liu@arm.com>2018-03-08 09:49:32 +0800
committerArvind Chauhan <arvind.chauhan@arm.com>2018-05-15 12:27:15 +0100
commit4e0e919dea3fb8d1191dabad96386b5f04a61b32 (patch)
tree785518b01243d7c13fcacab8ad25f0097e92729b
parent85228f1fb96655f9de76759fe14a85ae11f3842b (diff)
drvers: video: add mali v550 driverjuno-tzmp1
V550 driver added to support hardware. Protected memory is reserved in this commit due to V550 supports protected mode. Source package downloaded from: https://developer.arm.com/products/software/mali-drivers/video-drivers Filename: VD304K03A-SW-98002-r5p1-00rel0.tgz Signed-off-by: Ken Liu <ken.liu@arm.com>
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi28
-rw-r--r--arch/arm64/boot/dts/arm/juno_dp650_mv500_smmu_bifrost.dts1
-rw-r--r--drivers/video/Kconfig3
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/arm/v5xx/Kconfig66
-rw-r--r--drivers/video/arm/v5xx/Makefile35
-rw-r--r--drivers/video/arm/v5xx/base/Makefile68
-rw-r--r--drivers/video/arm/v5xx/base/docs/Doxyfile122
-rw-r--r--drivers/video/arm/v5xx/base/mve_base.h425
-rw-r--r--drivers/video/arm/v5xx/base/mve_buffer.h192
-rw-r--r--drivers/video/arm/v5xx/base/mve_buffer_ashmem.c176
-rw-r--r--drivers/video/arm/v5xx/base/mve_buffer_ashmem.h65
-rw-r--r--drivers/video/arm/v5xx/base/mve_buffer_ashmem_stub.c38
-rw-r--r--drivers/video/arm/v5xx/base/mve_buffer_attachment.c67
-rw-r--r--drivers/video/arm/v5xx/base/mve_buffer_attachment.h55
-rw-r--r--drivers/video/arm/v5xx/base/mve_buffer_common.c263
-rw-r--r--drivers/video/arm/v5xx/base/mve_buffer_dmabuf.c230
-rw-r--r--drivers/video/arm/v5xx/base/mve_buffer_dmabuf.h65
-rw-r--r--drivers/video/arm/v5xx/base/mve_buffer_dmabuf_stub.c38
-rw-r--r--drivers/video/arm/v5xx/base/mve_buffer_valloc.c170
-rw-r--r--drivers/video/arm/v5xx/base/mve_buffer_valloc.h65
-rw-r--r--drivers/video/arm/v5xx/base/mve_com.c230
-rw-r--r--drivers/video/arm/v5xx/base/mve_com.h313
-rw-r--r--drivers/video/arm/v5xx/base/mve_com_host_interface_v1.c821
-rw-r--r--drivers/video/arm/v5xx/base/mve_com_host_interface_v1.h20
-rw-r--r--drivers/video/arm/v5xx/base/mve_com_host_interface_v2.c1549
-rw-r--r--drivers/video/arm/v5xx/base/mve_com_host_interface_v2.h25
-rw-r--r--drivers/video/arm/v5xx/base/mve_command.c564
-rw-r--r--drivers/video/arm/v5xx/base/mve_command.h50
-rw-r--r--drivers/video/arm/v5xx/base/mve_driver.c288
-rw-r--r--drivers/video/arm/v5xx/base/mve_driver.h31
-rw-r--r--drivers/video/arm/v5xx/base/mve_fw.c1011
-rw-r--r--drivers/video/arm/v5xx/base/mve_fw.h116
-rw-r--r--drivers/video/arm/v5xx/base/mve_ioctl.h26
-rw-r--r--drivers/video/arm/v5xx/base/mve_mem_region.c134
-rw-r--r--drivers/video/arm/v5xx/base/mve_mem_region.h86
-rw-r--r--drivers/video/arm/v5xx/base/mve_mmu.c1260
-rw-r--r--drivers/video/arm/v5xx/base/mve_mmu.h383
-rw-r--r--drivers/video/arm/v5xx/base/mve_queue.c232
-rw-r--r--drivers/video/arm/v5xx/base/mve_queue.h112
-rw-r--r--drivers/video/arm/v5xx/base/mve_session.c3738
-rw-r--r--drivers/video/arm/v5xx/base/mve_session.h426
-rw-r--r--drivers/video/arm/v5xx/base/mve_session_buffer.c992
-rw-r--r--drivers/video/arm/v5xx/base/mve_session_buffer.h95
-rw-r--r--drivers/video/arm/v5xx/base/sconscript46
-rw-r--r--drivers/video/arm/v5xx/external/host_interface_v1/mve_coresched_reg.h170
-rw-r--r--drivers/video/arm/v5xx/external/host_interface_v1/mve_protocol_kernel.h478
-rw-r--r--drivers/video/arm/v5xx/external/host_interface_v2/mve_protocol_def.h1574
-rw-r--r--drivers/video/arm/v5xx/resource/Makefile75
-rw-r--r--drivers/video/arm/v5xx/resource/docs/Doxyfile122
-rw-r--r--drivers/video/arm/v5xx/resource/machine/board-vexpress.c433
-rw-r--r--drivers/video/arm/v5xx/resource/machine/mve_config.c36
-rw-r--r--drivers/video/arm/v5xx/resource/machine/mve_config.h86
-rw-r--r--drivers/video/arm/v5xx/resource/machine/mve_dvfs.h70
-rw-r--r--drivers/video/arm/v5xx/resource/machine/mve_port_attributes.h32
-rw-r--r--drivers/video/arm/v5xx/resource/machine/mve_power_management.h47
-rw-r--r--drivers/video/arm/v5xx/resource/machine/vexpress-regs.h36
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_circular_buffer.c125
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_circular_buffer.h95
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_driver.c324
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_driver.h75
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_dvfs.c832
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_dvfs.h85
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_irq.c230
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_irq.h73
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_log.c1549
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_log.h299
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_log_ram.h185
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_mem_backend.c292
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_mem_backend.h134
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_mem_cache.c132
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_mem_cache.h47
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma.c130
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma.h92
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma_uncached.c122
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma_uncached.h66
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_mem_frontend.c670
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_mem_frontend.h217
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_pm.c275
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_pm.h121
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_register.c95
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_register.h96
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_scheduler.c1548
-rw-r--r--drivers/video/arm/v5xx/resource/mve_rsrc_scheduler.h169
-rw-r--r--drivers/video/arm/v5xx/resource/sconscript48
-rw-r--r--drivers/video/arm/v5xx/sconscript111
-rw-r--r--linaro/configs/mali.conf3
87 files changed, 26117 insertions, 4 deletions
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 402300fd37c7..4117de5a4830 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -332,6 +332,25 @@
reg = <0x00000000 0xfee00000 0 0x00200000>;
no-map;
};
+
+ /* Mve secure firmware run-time memory */
+ mve_fw_carveout: mve_fw_carveout@fc000000 {
+ compatible = "ion,mve_private_heap";
+ reg = <0x0 0xfc000000 0x0 0x02000000>;
+ };
+
+ /* Mve decoder decrypted input buffer */
+ mve_protected_carveout_input: mve_protected_carveout_input@f6000000 {
+ compatible = "ion,mve_protected_heap";
+ reg = <0x0 0xf6000000 0x0 0x06000000>;
+ };
+
+ /* Video decoded output buffer. */
+ mve_protected_carveout_output: mve_protected_carveout_output@e8000000 {
+ compatible = "ion,multimedia_protected_heap";
+ reg = <0x0 0xe8000000 0x0 0x0e000000>;
+ };
+
};
soc_i2s: i2s@7ff90000 {
@@ -419,7 +438,8 @@
<2 0 0 0x18000000 0x04000000>,
<3 0 0 0x1c000000 0x04000000>,
<4 0 0 0x0c000000 0x04000000>,
- <5 0 0 0x10000000 0x04000000>;
+ <5 0 0 0x10000000 0x04000000>,
+ <6 0 0 0x6F000000 0x04000000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 15>;
@@ -438,6 +458,12 @@
<0 0 12 &gic 0 0 0 169 IRQ_TYPE_LEVEL_HIGH>;
/include/ "juno-motherboard.dtsi"
+
+ mve@6,00030000 {
+ compatible = "arm,mali-v500";
+ reg = <6 0x30000 0x10000>;
+ interrupts = <11>;
+ };
};
site2: tlx@60000000 {
diff --git a/arch/arm64/boot/dts/arm/juno_dp650_mv500_smmu_bifrost.dts b/arch/arm64/boot/dts/arm/juno_dp650_mv500_smmu_bifrost.dts
index a5787cdbef30..bdefec5a72db 100644
--- a/arch/arm64/boot/dts/arm/juno_dp650_mv500_smmu_bifrost.dts
+++ b/arch/arm64/boot/dts/arm/juno_dp650_mv500_smmu_bifrost.dts
@@ -115,5 +115,4 @@
mmu-masters = <&dp0 0x000 0x400 0x800 0xc00 0x1000>;
};
-
};
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 005e638acc72..b263a68334de 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -31,8 +31,9 @@ endmenu
source "drivers/video/backlight/Kconfig"
source "drivers/video/adf/Kconfig"
-source "drivers/video/adf/arm/Kconfig"
source "drivers/video/video-tx/Kconfig"
+source "drivers/video/adf/arm/Kconfig"
+source "drivers/video/arm/v5xx/Kconfig"
config VGASTATE
tristate
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index af9e05e98c0f..c80ba6bece95 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -8,7 +8,7 @@ obj-y += backlight/
obj-$(CONFIG_ADF) += adf/
obj-$(CONFIG_VIDEO_TX) += video-tx/
-
+obj-$(CONFIG_MALI_VPU) += arm/v5xx/
obj-y += fbdev/
obj-$(CONFIG_VIDEOMODE_HELPERS) += display_timing.o videomode.o
diff --git a/drivers/video/arm/v5xx/Kconfig b/drivers/video/arm/v5xx/Kconfig
new file mode 100644
index 000000000000..3e7ad63dfc6e
--- /dev/null
+++ b/drivers/video/arm/v5xx/Kconfig
@@ -0,0 +1,66 @@
+menuconfig MALI_VPU
+ tristate "Mali VPU driver"
+ ---help---
+ This enables the Mali VPU driver
+
+config MALI_VPU_DEBUG
+ depends on MALI_VPU
+ bool "Mali VPU driver debug"
+ default n
+ ---help---
+ This enabled debug output for the Mali VPU driver
+
+config MALI_VPU_DEVICE_TREE
+ depends on MALI_VPU
+ bool "Enable driver support for device trees"
+ default n
+ ---help---
+ Enable support for device trees in the Mali VPU driver.
+
+config CONFIG_MALI_VPU_ENABLE_ALOG
+ depends on MALI_VPU
+ bool "Send kernel space logs to Android logd socket."
+ default y
+ ---help---
+ Send kernel space logs to Android logd socket. This option should only be enabled for Android Lollipop or later revisions.
+
+config CONFIG_MALI_VPU_ENABLE_FTRACE
+ depends on MALI_VPU
+ bool "Send kernel space logs to ftrace."
+ default n
+ ---help---
+ Send kernel space logs to ftrace.
+
+config CONFIG_MALI_VPU_ENABLE_PRINT_FILE
+ depends on MALI_VPU
+ bool "Append file and line number to kernel space log messages."
+ default y
+ ---help---
+ Append file and line number to kernel space log messages.
+
+choice
+ depends on MALI_VPU
+ prompt "Choose power saving mode"
+config MALI_VPU_POWER_SAVING_MODE_DVFS
+ bool "Use DVFS"
+config MALI_VPU_POWER_SAVING_MODE_CLOCK_GATING
+ bool "Use HW clock gating"
+endchoice
+
+choice
+ depends on MALI_VPU
+ prompt "Choose hardware platform"
+config MALI_VPU_VEX6
+ bool "Versatile Express 6"
+config MALI_VPU_VEX7
+ bool "Versatile Express 7"
+config MALI_VPU_JUNO
+ bool "Juno"
+endchoice
+
+config MALI_VPU_DISABLE_WATCHDOG
+ depends on MALI_VPU
+ bool "Disable the watchdog"
+ default n
+ ---help---
+ Disable the watchdog that detects frozen video sessions.
diff --git a/drivers/video/arm/v5xx/Makefile b/drivers/video/arm/v5xx/Makefile
new file mode 100644
index 000000000000..016e0a8057de
--- /dev/null
+++ b/drivers/video/arm/v5xx/Makefile
@@ -0,0 +1,35 @@
+#
+#
+# (C) COPYRIGHT ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+ifneq ($(KERNELRELEASE),)
+
+obj-$(CONFIG_MALI_VPU) := base/ resource/
+
+else
+
+CROSS_COMPILE ?= arm-eabi-
+DEBUG ?= 0
+
+all: android
+
+android:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) ARCH=arm CROSS_COMPILE=$(CROSS_COMPILE) OS=android DEBUG=$(DEBUG) HW=1
+
+clean:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) HW=1 clean
+
+.PHONY: all android
+
+endif
diff --git a/drivers/video/arm/v5xx/base/Makefile b/drivers/video/arm/v5xx/base/Makefile
new file mode 100644
index 000000000000..8963557a165f
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/Makefile
@@ -0,0 +1,68 @@
+#
+#
+# (C) COPYRIGHT ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+obj-$(CONFIG_MALI_VPU) := mve_base.o
+
+mve_base-y := mve_driver.o \
+ mve_command.o \
+ mve_session.o \
+ mve_session_buffer.o \
+ mve_mmu.o \
+ mve_mem_region.o \
+ mve_fw.o \
+ mve_com.o \
+ mve_queue.o \
+ mve_com.o \
+ mve_com_host_interface_v1.o \
+ mve_com_host_interface_v2.o \
+ mve_buffer_common.o \
+ mve_buffer_valloc.o \
+ mve_buffer_attachment.o \
+ mve_buffer_dmabuf.o \
+ mve_buffer_ashmem.o
+
+ccflags-y += -I$(src)/../external/ -I$(src)/../external/host_interface_v1/ -I$(src)/../external/khronos/original/OMXIL/1.2.0 -I$(src)/../resource
+
+ccflags-$(CONFIG_MALI_VPU_ENABLE_PRINT_SESSION) += -DDEBUG_PRINT_SESSION
+ccflags-$(CONFIG_MALI_VPU_ENABLE_PRINT_MESSAGES) += -DDEBUG_PRINT_MESSAGES
+ccflags-$(CONFIG_MALI_VPU_ENABLE_PRINT_FILE) += -DMVE_LOG_PRINT_FILE_ENABLE
+
+ccflags-$(CONFIG_MALI_VPU_DISABLE_WATCHDOG) += -DDISABLE_WATCHDOG
+# If debug is enabled, disable function inlining to allow ftrace to give a more detailed picture of the executed functions
+ccflags-$(CONFIG_MALI_VPU_DEBUG) += -D_DEBUG -fno-inline
+ccflags-$(CONFIG_MALI_VPU_UNIT) += -DUNIT
+
+ifeq ($(CONFIG_MALI_VPU_TRACKMEM), y)
+ ccflags-y += -DMVE_MEM_DBG_TRACKMEM=1
+ MVE_MEM_DEBUG=y
+else
+ ccflags-y += -DMVE_MEM_DBG_TRACKMEM=0
+endif
+
+ifeq ($(CONFIG_MALI_VPU_RESFAIL), y)
+ ccflags-y += -DMVE_MEM_DBG_RESFAIL=1
+ MVE_MEM_DEBUG=y
+else
+ ccflags-y += -DMVE_MEM_DBG_RESFAIL=0
+endif
+
+ifeq ($(MVE_MEM_DEBUG), y)
+ ccflags-y += -DMVE_MEM_DBG_SUPPORT=1
+else
+ ccflags-y += -DMVE_MEM_DBG_SUPPORT=0
+endif
+
+# Always switch out on idleness
+ccflags-y += -DSCHEDULER_MODE_IDLE_SWITCHOUT=1
diff --git a/drivers/video/arm/v5xx/base/docs/Doxyfile b/drivers/video/arm/v5xx/base/docs/Doxyfile
new file mode 100644
index 000000000000..4d88e0d449f7
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/docs/Doxyfile
@@ -0,0 +1,122 @@
+#
+#
+# (C) COPYRIGHT ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+##############################################################################
+# This file contains per-module Doxygen configuration. Please do not add
+# extra settings to this file without consulting all stakeholders, as they
+# may cause override project-wide settings.
+#
+# Additionally, when defining aliases, macros, sections etc, use the module
+# name as a prefix e.g. gles_my_alias.
+##############################################################################
+
+@INCLUDE = ../../bldsys/Doxyfile_common
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT += ../../kernel/drivers/video/arm/v5xx/base
+
+##############################################################################
+# Everything below here is optional, and in most cases not required
+##############################################################################
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES +=
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS +=
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS +=
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE += ../../omx_components/prototype_decoder
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS +=
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS +=
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH += ../..
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH +=
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH +=
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED +=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED +=
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS += .
diff --git a/drivers/video/arm/v5xx/base/mve_base.h b/drivers/video/arm/v5xx/base/mve_base.h
new file mode 100644
index 000000000000..a1394ca77385
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_base.h
@@ -0,0 +1,425 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_BASE_H
+#define MVE_BASE_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+#define IOCTL_MAGIC 251
+#define MVE_BASE_COMMAND _IOWR(IOCTL_MAGIC, 0, struct mve_base_command_header)
+
+#define MVE_BASE_FLAGS_DMABUF_DISABLE_CACHE_MAINTENANCE 0x80000000
+
+/**
+ * Commands from user- to kernel space.
+ */
+enum mve_base_command_type
+{
+ MVE_BASE_CREATE_SESSION,
+ MVE_BASE_DESTROY_SESSION,
+ MVE_BASE_ACTIVATE_SESSION,
+
+ MVE_BASE_ENQUEUE_FLUSH_BUFFERS,
+ MVE_BASE_ENQUEUE_STATE_CHANGE,
+
+ MVE_BASE_GET_EVENT,
+ MVE_BASE_SET_PARAMETER,
+ MVE_BASE_GET_PARAMETER,
+ MVE_BASE_SET_CONFIG,
+ MVE_BASE_GET_CONFIG,
+
+ MVE_BASE_REGISTER_BUFFER,
+ MVE_BASE_UNREGISTER_BUFFER,
+ MVE_BASE_FILL_THIS_BUFFER,
+ MVE_BASE_EMPTY_THIS_BUFFER,
+
+ MVE_BASE_NOTIFY_REF_FRAME_RELEASE,
+
+ MVE_BASE_REQUEST_MAX_FREQUENCY,
+
+ MVE_BASE_READ_HW_INFO,
+
+ MVE_BASE_RPC_MEM_ALLOC,
+ MVE_BASE_RPC_MEM_RESIZE,
+
+ MVE_BASE_DEBUG_READ_REGISTER,
+ MVE_BASE_DEBUG_WRITE_REGISTER,
+ MVE_BASE_DEBUG_INTERRUPT_COUNT,
+ MVE_BASE_DEBUG_SEND_COMMAND,
+ MVE_BASE_DEBUG_FIRMWARE_HUNG_SIMULATION,
+};
+
+/**
+ * External buffer identifier. The data stored by this type is specific for each
+ * buffer API implementation. The mve_buffer_valloc implementation uses this
+ * type to store addresses to user space allocated memory (virtual addresses). A buffer
+ * API implementation for Android will use this type to store gralloc handles. */
+typedef uint64_t mve_base_buffer_handle_t;
+
+/**
+ * @brief Represents a command sent to the driver for processing.
+ */
+struct mve_base_command_header
+{
+ uint32_t cmd; /**< Which command to execute */
+ uint32_t size; /**< Size of the data section (excluding header size) */
+ uint8_t data[0]; /**< First byte of the data section. */
+};
+
+/**
+ * @brief Region of interest structure.
+ *
+ * The region is macroblock positions (x,y) in the range
+ * mbx_left <= x < mbx_right
+ * mby_top <= y < mby_bottom
+ */
+struct mve_base_roi_region
+{
+ uint16_t mbx_left; /**< X coordinate of left macro block */
+ uint16_t mbx_right; /**< X coordinate of right macro block */
+ uint16_t mby_top; /**< Y coordinate of top macro block */
+ uint16_t mby_bottom; /**< Y coordinate of bottom macro block */
+ int16_t qp_delta; /**< Delta relative the default QP value */
+};
+
+#define MVE_BASE_ROI_REGIONS_MAX 16
+
+/**
+ * @brief Instances of this structure are sent along with fill/empty this buffer messages.
+ */
+struct mve_base_buffer_details
+{
+ mve_base_buffer_handle_t buffer_id; /**< Buffer unique ID */
+ mve_base_buffer_handle_t handle; /**< Handle to buffer */
+ uint32_t filled_len; /**< Size of the contents in the buffer in bytes */
+ uint32_t flags; /**< OMX buffer flags */
+ uint32_t mve_flags; /**< MVE buffer flags */
+ uint32_t crc_offset; /**< Offset of the CRC data in the buffer. Only valid
+ * for CRC buffers using the attachment allocator. */
+ uint64_t timestamp; /**< Buffer timestamp */
+ uint8_t nRegions; /**< Number of ROI regions */
+ uint8_t reserved[3]; /**< Unused but required for alignment reasons */
+ struct mve_base_roi_region regions[MVE_BASE_ROI_REGIONS_MAX]; /**< ROI data */
+};
+
+/**
+ * Events from kernel- to user space.
+ */
+enum mve_base_event_code
+{
+ MVE_BASE_EVENT_RPC_PRINT = 0, /**< Data contains a NULL terminated string */
+ MVE_BASE_EVENT_SWITCHED_IN = 1,
+ MVE_BASE_EVENT_SWITCHED_OUT = 2,
+ MVE_BASE_EVENT_PONG = 3,
+ MVE_BASE_EVENT_STATE_CHANGED = 4,
+ MVE_BASE_EVENT_ERROR = 5,
+ MVE_BASE_EVENT_GENERIC = 6,
+ MVE_BASE_EVENT_PROCESSED = 7,
+ MVE_BASE_EVENT_INPUT = 8,
+ MVE_BASE_EVENT_OUTPUT = 9,
+ MVE_BASE_EVENT_GET_PARAMCONFIG = 10,
+ MVE_BASE_EVENT_SET_PARAMCONFIG = 11,
+ MVE_BASE_EVENT_INPUT_FLUSHED = 12,
+ MVE_BASE_EVENT_OUTPUT_FLUSHED = 13,
+ MVE_BASE_EVENT_CODE_DUMP = 14,
+ MVE_BASE_EVENT_SESSION_HUNG = 15,
+ MVE_BASE_EVENT_ALLOC_PARAMS = 16,
+ MVE_BASE_EVENT_SEQUENCE_PARAMS = 17,
+ MVE_BASE_EVENT_BUFFER_PARAM = 18,
+ MVE_BASE_EVENT_REF_FRAME_RELEASED = 19,
+ MVE_BASE_EVENT_RPC_MEM_ALLOC = 20,
+ MVE_BASE_EVENT_RPC_MEM_RESIZE = 21,
+
+ /* These messages are not forwarded to userspace and must therefore
+ * be places last in this enum. */
+ MVE_BASE_EVENT_JOB_DEQUEUED = 22,
+ MVE_BASE_EVENT_IDLE = 23,
+ MVE_BASE_EVENT_FW_TRACE_BUFFERS = 24,
+};
+
+/**
+ * The event header of an event. Contains the event code and
+ * size of the data attached to the event.
+ */
+struct mve_base_event_header
+{
+ uint16_t code; /**< Event code */
+ uint16_t size; /**< Size of the data attached to the event in bytes */
+ uint8_t data[0]; /**< First byte of the data attached to the event */
+};
+
+/**
+ * Error codes for MVE responses.
+ */
+typedef enum
+{
+ MVE_BASE_ERROR_NONE,
+ MVE_BASE_ERROR_UNDEFINED,
+ MVE_BASE_ERROR_BAD_PARAMETER,
+ MVE_BASE_ERROR_BAD_PORT_INDEX,
+ MVE_BASE_ERROR_FIRMWARE,
+ MVE_BASE_ERROR_HARDWARE,
+ MVE_BASE_ERROR_INSUFFICIENT_RESOURCES,
+ MVE_BASE_ERROR_NOT_IMPLEMENTED,
+ MVE_BASE_ERROR_NOT_READY,
+ MVE_BASE_ERROR_TIMEOUT,
+ MVE_BASE_ERROR_VERSION_MISMATCH
+} mve_base_error;
+
+/**
+ * @brief Represents the result of an executed command.
+ */
+struct mve_base_response_header
+{
+ uint32_t error; /**< MVE error code */
+ uint32_t firmware_error; /**< Firmware error code */
+ uint32_t size; /**< Size of the data section (excluding header size) */
+ uint8_t data[0]; /**< First byte of the data section */
+};
+
+/**
+ * This enum lists the different formats of supplied buffers.
+ */
+enum mve_base_omx_buffer_format
+{
+ MVE_BASE_OMX_BUFFER_FORMAT_YUV420_PLANAR = 0x7f000100, /**< Planar YUV buffer (3 planes) */
+ MVE_BASE_OMX_BUFFER_FORMAT_YUV420_SEMIPLANAR = 0x7f000101, /**< Semiplanar YUV (2 planes) */
+ MVE_BASE_OMX_BUFFER_FORMAT_YUYYVY_10B = 0x7f000102, /**< ARM 10-bit YUV 420 format */
+ MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC = 0x7f000103, /**< YUV buffer compressed with AFBC */
+ MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC_10B = 0x7f000104, /**< 10-bit YUV buffer compressed with AFBC */
+ MVE_BASE_OMX_BUFFER_FORMAT_YUV422_1P = 0x7f000105, /**< YUV 422 buffer (1 plane, YUY2) */
+ MVE_BASE_OMX_BUFFER_FORMAT_YVU422_1P = 0x7f000106, /**< YVU 422 buffer (1 plane, UYVY) */
+ MVE_BASE_OMX_BUFFER_FORMAT_BITSTREAM = 0x7f000107, /**< Compressed bitstream data */
+ MVE_BASE_OMX_BUFFER_FORMAT_CRC = 0x7f000108, /**< CRC buffer */
+ MVE_BASE_OMX_BUFFER_FORMAT_YV12 = 0x7f000109, /**< Planar YV12 buffer (3 planes) */
+ MVE_BASE_OMX_BUFFER_FORMAT_YVU420_SEMIPLANAR = 0x7f00010a, /**< Semilanar YVU (2 planes) */
+ MVE_BASE_OMX_BUFFER_FORMAT_RGBA_8888 = 0x7f00010b, /**< RGB format with 32 bit as Red 31:24, Green 23:16, Blue 15:8, Alpha 7:0 */
+ MVE_BASE_OMX_BUFFER_FORMAT_BGRA_8888 = 0x7f00010c, /**< RGB format with 32 bit as Blue 31:24, Green 23:16, Red 15:8, Alpha 7:0 */
+ MVE_BASE_OMX_BUFFER_FORMAT_ARGB_8888 = 0x7f00010d, /**< RGB format with 32 bit as Alpha 31:24, Red 23:16, Green 15:8, Blue 7:0 */
+ MVE_BASE_OMX_BUFFER_FORMAT_ABGR_8888 = 0x7f00010e, /**< RGB format with 32 bit as Alpha 31:24, Blue 23:16, Green 15:8, Red 7:0 */
+};
+
+/**
+ * Allocator used to allocate a user-space allocated buffer.
+ */
+enum mve_base_buffer_allocator
+{
+ MVE_BASE_BUFFER_ALLOCATOR_VMALLOC, /**< Memory allocated by valloc or malloc. */
+ MVE_BASE_BUFFER_ALLOCATOR_ATTACHMENT, /**< Represents a buffer that is part of another buffer. */
+ MVE_BASE_BUFFER_ALLOCATOR_DMABUF, /**< Memory wrapped by dma_buf. */
+ MVE_BASE_BUFFER_ALLOCATOR_ASHMEM, /**< Memory wrapped by ashmem. */
+};
+
+/**
+ * This structure is used to transfer buffer information between users- and kernel space.
+ */
+struct mve_base_buffer_userspace
+{
+ uint64_t timestamp; /**< Buffer timestamp. */
+ mve_base_buffer_handle_t buffer_id; /**< Buffer unique ID. */
+ mve_base_buffer_handle_t handle; /**< Handle to the external buffer. */
+ mve_base_buffer_handle_t crc_handle; /**< Handle to the external CRC buffer. */
+
+ enum mve_base_buffer_allocator allocator; /**< Specifies which allocator was used to allocate
+ * the buffer. */
+ uint32_t size; /**< Size of the external buffer in bytes. */
+ uint32_t width; /**< Width of the buffer (only for pixel formats). */
+ uint32_t height; /**< Height of the buffer (only for pixel formats). */
+ uint32_t stride; /**< Stride of the buffer (only for pixel formats). */
+ uint32_t stride_alignment; /**< Aligntment of the stride in bytes (only for pixel formats). */
+ enum mve_base_omx_buffer_format format; /**< Format of the buffer. */
+
+ uint32_t decoded_width; /**< Width of the decoded frame. Only valid for a returned frame buffer */
+ uint32_t decoded_height; /**< Height of the decoded frame. Only valid for a returned frame buffer */
+
+ uint32_t afbc_width_in_superblocks; /**< Width of the AFBC buffer in superblocks (only for AFBC formats) */
+ uint32_t afbc_alloc_bytes; /**< Size of the AFBC frame */
+
+ uint32_t filled_len; /**< Number of bytes worth of data in the buffer. */
+ uint32_t offset; /**< Offset from start of buffer to first byte. */
+ uint32_t flags; /**< Flags for OMX use. */
+ uint32_t mve_flags; /**< MVE sideband information. */
+ uint32_t pic_index; /**< Picture index in decode order. Output from FW. */
+
+ uint16_t cropx; /**< Luma x crop. */
+ uint16_t cropy; /**< Luma y crop. */
+ uint8_t y_offset; /**< Deblocking y offset of picture. */
+
+ enum mve_base_buffer_allocator crc_allocator; /**< CRC buffer allocator. */
+ uint32_t crc_size; /**< Size of the CRC buffer. */
+ uint32_t crc_offset; /**< Offset of the CRC data in the buffer. */
+};
+
+enum mve_base_hw_state
+{
+ MVE_BASE_HW_STATE_STOPPED = 0, /**< HW in STOPPED state. */
+ MVE_BASE_HW_STATE_RUNNING = 2, /**< HW in RUNNING state. */
+ MVE_BASE_HW_STATE_PENDING = 4 /**< Requested for HW State change and waiting for the response. */
+};
+
+/**
+ * Defines what port(s) to flush.
+ */
+enum mve_base_flush
+{
+ MVE_BASE_FLUSH_INPUT_PORT = 1, /**< Flush the input port */
+ MVE_BASE_FLUSH_OUTPUT_PORT = 2, /**< Flush the output port */
+ MVE_BASE_FLUSH_ALL_PORTS = MVE_BASE_FLUSH_INPUT_PORT | MVE_BASE_FLUSH_OUTPUT_PORT,
+ /**< Flush input and output ports */
+ MVE_BASE_FLUSH_QUICK = 1 << 2, /**< Perform a quick flush. Quick flush means that
+ * all flushed buffers will automatically be
+ * re-enqueued once all buffers have been flushed.
+ * Userspace will not be notified of the flushed
+ * buffers or that the flush is complete. */
+ MVE_BASE_FLUSH_QUICK_SET_INTERLACE = 1 << 3,
+ /**< Makes all output buffers added as interlaced
+ * buffers once the quick flush is completed */
+};
+
+struct mve_base_hw_info
+{
+ uint32_t fuse; /**< Hardware fuse register. */
+ uint32_t version; /**< Hardware version. */
+ uint32_t ncores; /**< Number of MVE Cores*/
+};
+
+struct mve_base_fw_version
+{
+ uint8_t major; /**< Firmware major version. */
+ uint8_t minor; /**< Firmware minor version. */
+};
+
+struct mve_base_fw_secure_descriptor
+{
+ struct mve_base_fw_version fw_version; /**< FW protocol version */
+ uint32_t l2pages; /**< Physical address of l2pages created by secure OS */
+};
+
+struct mve_base_fw_frame_alloc_parameters
+{
+ uint16_t planar_alloc_frame_width; /**< Width of planar YUV buffer */
+ uint16_t planar_alloc_frame_height; /**< Height of planar YUV buffer */
+
+ uint32_t afbc_alloc_bytes; /**< Number of bytes needed for an AFBC buffer */
+
+ uint32_t afbc_alloc_bytes_downscaled; /**< Number of bytes needed for downscaled AFBC buffer */
+
+ uint16_t afbc_width_in_superblocks; /**< Width of the AFBC buffer needed by the FW */
+ uint16_t afbc_width_in_superblocks_downscaled; /**< Width of the downscaled AFBC buffer needed by the FW */
+
+ uint16_t cropx; /**< Hints on how much to adjust the plane addresses to get optimal AXI bursts */
+ uint16_t cropy; /**< Hints on how much to adjust the plane addresses to get optimal AXI bursts */
+
+ uint32_t mbinfo_alloc_bytes; /* Only for debugging */
+};
+
+/*
+ * Defines the type of memory region to allocate in RPC memory calls.
+ * Refer to mve_rpc_params.region from mve_protocol_def.h.
+ */
+enum mve_base_memory_region
+{
+ MVE_BASE_MEMORY_REGION_PROTECTED = 0,
+ MVE_BASE_MEMORY_REGION_OUTBUF = 1,
+ MVE_BASE_MEMORY_REGION_FRAMEBUF = MVE_BASE_MEMORY_REGION_OUTBUF
+};
+
+struct mve_base_rpc_memory
+{
+ enum mve_base_memory_region region;
+ uint32_t size;
+};
+
+/*
+ * Copied from mve_protocol_def.h.
+ */
+struct mve_base_response_sequence_parameters
+{
+ uint8_t interlace;
+ uint8_t chroma_format;
+ #define MVE_BASE_CHROMA_FORMAT_MONO 0x0
+ #define MVE_BASE_CHROMA_FORMAT_420 0x1
+ #define MVE_BASE_CHROMA_FORMAT_422 0x2
+ uint8_t bitdepth_luma;
+ uint8_t bitdepth_chroma;
+ uint8_t num_buffers_planar;
+ uint8_t num_buffers_afbc;
+ uint8_t range_mapping_enabled;
+ uint8_t reserved0;
+};
+
+/*
+ * Copied from mve_protocol_def.h.
+ */
+
+/* output from decoder */
+struct mve_base_buffer_param_display_size
+{
+ uint16_t display_width;
+ uint16_t display_height;
+};
+
+/* output from decoder, colour information needed for hdr */
+struct mve_base_buffer_param_colour_description
+{
+ uint32_t flags;
+ #define MVE_BASE_BUFFER_PARAM_COLOUR_FLAG_MASTERING_DISPLAY_DATA_VALID (1)
+ #define MVE_BASE_BUFFER_PARAM_COLOUR_FLAG_CONTENT_LIGHT_DATA_VALID (2)
+
+ uint8_t range; /* Unspecified=0, Limited=1, Full=2 */
+ #define MVE_BASE_BUFFER_PARAM_COLOUR_RANGE_UNSPECIFIED (0)
+ #define MVE_BASE_BUFFER_PARAM_COLOUR_RANGE_LIMITED (1)
+ #define MVE_BASE_BUFFER_PARAM_COLOUR_RANGE_FULL (2)
+
+ uint8_t colour_primaries; /* see hevc spec. E.3.1 */
+ uint8_t transfer_characteristics; /* see hevc spec. E.3.1 */
+ uint8_t matrix_coeff; /* see hevc spec. E.3.1 */
+
+ uint16_t mastering_display_primaries_x[3]; /* see hevc spec. D.3.27 */
+ uint16_t mastering_display_primaries_y[3]; /* see hevc spec. D.3.27 */
+ uint16_t mastering_white_point_x; /* see hevc spec. D.3.27 */
+ uint16_t mastering_white_point_y; /* see hevc spec. D.3.27 */
+ uint32_t max_display_mastering_luminance; /* see hevc spec. D.3.27 */
+ uint32_t min_display_mastering_luminance; /* see hevc spec. D.3.27 */
+
+ uint32_t max_content_light_level; /* unused */
+ uint32_t avg_content_light_level; /* unused */
+};
+
+/* Parameters that are sent in the same communication channels
+ * as the buffers. A parameter applies to all subsequent buffers.
+ * Some types are only valid for decode, and some only for encode.
+ */
+struct mve_base_buffer_param
+{
+ uint32_t type;
+ #define MVE_BASE_BUFFER_PARAM_TYPE_DISPLAY_SIZE (5)
+ #define MVE_BASE_BUFFER_PARAM_TYPE_COLOUR_DESCRIPTION (15)
+ #define MVE_BASE_BUFFER_PARAM_TYPE_FRAME_FIELD_INFO (17)
+ #define MVE_BASE_BUFFER_PARAM_TYPE_DPB_HELD_FRAMES (19)
+
+ union
+ {
+ struct mve_base_buffer_param_display_size display_size;
+ struct mve_base_buffer_param_colour_description colour_description;
+ }
+ data;
+};
+
+#endif /* MVE_BASE_H */
diff --git a/drivers/video/arm/v5xx/base/mve_buffer.h b/drivers/video/arm/v5xx/base/mve_buffer.h
new file mode 100644
index 000000000000..3ae2b944327a
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_buffer.h
@@ -0,0 +1,192 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_BUFFER_H
+#define MVE_BUFFER_H
+
+#ifndef __KERNEL__
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#endif
+
+#include "mve_base.h"
+#include "mve_mem_region.h"
+
+/* Invalid MVE handle */
+#define MVE_HANDLE_INVALID -1
+
+/**
+ * This module defines the interface for registering user allocated memory
+ * buffers with the MVE.
+ */
+
+/**
+ * This enum lists the port indices
+ */
+enum mve_port_index
+{
+ MVE_PORT_INDEX_INPUT = 0,
+ MVE_PORT_INDEX_OUTPUT = 1
+};
+
+enum mve_buffer_owner
+{
+ MVE_BUFFER_OWNER_CPU,
+ MVE_BUFFER_OWNER_DEVICE
+};
+
+#define ROUND_UP(v, a) (((v) + (a) - 1) & ~((a) - 1))
+
+/**
+ * This structure contains all information that is needed to describe a userspace
+ * allocated buffer.
+ */
+struct mve_buffer_info
+{
+ mve_base_buffer_handle_t buffer_id; /**< Unique buffer ID */
+ mve_base_buffer_handle_t handle; /**< Handle to the external buffer */
+ enum mve_base_buffer_allocator allocator; /**< Specifies which allocator was used to allocate
+ * the buffer */
+ uint32_t size; /**< Size of the external buffer in bytes */
+ uint32_t width; /**< Width of the buffer (only for pixel formats). */
+ uint32_t height; /**< Height of the buffer (only for pixel formats). */
+ uint32_t stride; /**< Stride of the buffer (only for pixel formats). */
+ uint32_t stride_alignment; /**< Stride alignment in bytes (only for pixel formats). */
+ enum mve_base_omx_buffer_format format; /**< Format of the buffer. */
+ uint32_t offset; /**< If allocator is attachment, this member marks
+ * the offset of the first byte in the buffer */
+ bool do_cache_maintenance; /**< True if cache maintenance is needed for this buffer */
+
+ uint32_t afbc_width_in_superblocks; /**< Width of the AFBC buffer in superblocks (only for AFBC formats) */
+ uint32_t afbc_alloc_bytes; /**< AFBC frame size */
+};
+
+/**
+ * This structure contains data necessary to describe a buffer mapping into MVE address space.
+ */
+struct mve_buffer_mapping
+{
+ struct mve_mem_virt_region region; /**< Defines the mapping in MVE virtual address space */
+
+ phys_addr_t *pages; /**< Array of physical pages */
+ uint32_t num_pages; /**< Number of pages in the array above */
+ uint32_t write; /**< Pages to be mapped writable or read only */
+
+ uint32_t offset_in_page; /**< Offset in bytes to the start of the buffer. 0 if
+ * the allocation is page aligned. */
+};
+
+/**
+ * This struct encapsulates information about a userspace allocated buffer
+ */
+struct mve_buffer_external
+{
+ struct mve_buffer_info info; /**< External buffer info */
+ struct mve_buffer_mapping mapping; /**< Mapping data */
+};
+
+/**
+ * This structure stores information regarding external buffer mappings for one
+ * input/output buffer. An OMX client may allocate a buffer in userspace and
+ * instruct the OMX component to use the memory backing that buffer. To do that,
+ * the driver must map the physical pages backing the memory buffer into the MVE
+ * virtual address space. The client may at a later point chose to unregister the
+ * buffer to prevent further MVE write operations to the buffer. This structure
+ * stores all data to support these operations.
+ */
+struct mve_buffer_client
+{
+ struct mve_buffer_external *buffer; /**< Buffer descriptor */
+ struct mve_buffer_external *crc; /**< CRC buffer descriptor */
+
+ uint32_t filled_len; /**< Number of bytes worth of data in the buffer */
+ uint32_t offset; /**< Offset from start of buffer to first byte */
+ uint32_t flags; /**< Flags for omx use */
+ uint32_t mve_flags; /**< MVE sideband information */
+ uint32_t pic_index; /**< Picture index in decode order. Output from FW. */
+ uint64_t timestamp; /**< Buffer timestamp. */
+
+ uint32_t decoded_width; /**< Width of the decoded frame */
+ uint32_t decoded_height; /**< Height of the decoded frame */
+
+ uint16_t cropx; /**< Luma x crop */
+ uint16_t cropy; /**< Luma y crop */
+ uint8_t y_offset; /**< Deblocking y offset of picture */
+
+ int mve_handle; /**< Firmware buffer identifier */
+ int in_use; /**< Tells whether this buffer is in
+ * use by the MVE or not */
+ enum mve_port_index port_index; /**< Port index of this buffer */
+
+ struct list_head register_list; /**< Linked list register entry*/
+ struct list_head quick_flush_list; /**< Linked list quick flush entry */
+};
+
+struct mve_mmu_ctx;
+
+/**
+ * Validate the supplied buffer information.
+ * @param info Buffer to validate.
+ * @return True if the supplied buffer information is valid, false otherwise.
+ */
+bool mve_buffer_is_valid(const struct mve_buffer_info *info);
+
+/**
+ * Allocate memory for private data and a mve_buffer_external instance. This is
+ * also the place to e.g. import the external buffer etc.
+ * @param info Buffer information.
+ * @param port OMX port on which buffer is registered
+ * @return Pointer to the mve_buffer_external part of the allocated structure. NULL
+ * if no such structure could be allocated.
+ */
+struct mve_buffer_external *mve_buffer_create_buffer_external(struct mve_buffer_info *info, uint32_t port);
+
+/**
+ * Free the memory allocated in mve_buffer_create_buffer_external.
+ * @param buffer Pointer to the mve_buffer_external instance to free.
+ */
+void mve_buffer_destroy_buffer_external(struct mve_buffer_external *buffer);
+
+/**
+ * Constructs an array of physical pages backing the user allocated buffer and
+ * stores the array in the supplied mve_buffer_external. Note that the pages
+ * must be mapped in the MVE MMU table before MVE can access the buffer. This
+ * is the responsibility of the client.
+ * @param buffer The buffer to map.
+ * @return True on success, false on failure.
+ */
+bool mve_buffer_map_physical_pages(struct mve_buffer_external *buffer);
+
+/**
+ * Hand back the pages to the user allocated buffer allocator. Note that the
+ * pages must also be unmapped from the MVE MMU table. This is the responsibility
+ * of the client.
+ * @param buffer The buffer to unmap.
+ * @return True on success, false on failure. This function fails if the
+ * supplied buffer is currently not registered.
+ */
+bool mve_buffer_unmap_physical_pages(struct mve_buffer_external *buffer);
+
+/**
+ * Set the owner of the buffer. This function takes care of cache flushing.
+ * @param buffer The buffer to change ownership of.
+ * @param owner The new owner of the buffer.
+ * @param port The port this buffer is connected to (input/output).
+ */
+void mve_buffer_set_owner(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port);
+
+#endif /* MVE_BUFFER_H */
diff --git a/drivers/video/arm/v5xx/base/mve_buffer_ashmem.c b/drivers/video/arm/v5xx/base/mve_buffer_ashmem.c
new file mode 100644
index 000000000000..0c5beb66613b
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_buffer_ashmem.c
@@ -0,0 +1,176 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/shmem_fs.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+
+#include "mve_mmu.h"
+#include "mve_buffer_ashmem.h"
+#include "mve_driver.h"
+
+#include "mve_rsrc_driver.h"
+
+/* This is an implementation of the mve_buffer_client interface where the client
+ * buffers have been allocated using ashmem allocation.
+ */
+
+struct mve_buffer_external *mve_buffer_ashmem_create_buffer_external(struct mve_buffer_info *info)
+{
+ /* The ashmem implementation doesn't need to store any private data in
+ * the mve_buffer_external structure. Just create an instance and return it. */
+ struct mve_buffer_external *ret;
+
+ ret = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(struct mve_buffer_external), GFP_KERNEL);
+ if (NULL != ret)
+ {
+ ret->info = *info;
+ }
+
+ return ret;
+}
+
+void mve_buffer_ashmem_destroy_buffer_external(struct mve_buffer_external *buffer)
+{
+ MVE_RSRC_MEM_CACHE_FREE(buffer, sizeof(struct mve_buffer_external));
+}
+
+bool mve_buffer_ashmem_map_physical_pages(struct mve_buffer_external *buffer)
+{
+ uint32_t size;
+ void *ptr;
+ uint32_t num_pages;
+ uint32_t offset;
+ phys_addr_t *pages;
+
+ buffer->mapping.pages = NULL;
+ buffer->mapping.num_pages = 0;
+
+ size = buffer->info.size;
+ ptr = (void *)(ptrdiff_t)buffer->info.handle;
+ /* Calculate the number of needed pages */
+ offset = ((size_t)ptr) & (MVE_MMU_PAGE_SIZE - 1);
+ size += offset;
+ num_pages = (size + MVE_MMU_PAGE_SIZE - 1) / MVE_MMU_PAGE_SIZE;
+ /* Get the physical pages */
+ pages = mve_rsrc_mem_map_virt_to_phys(ptr - offset, size, buffer->mapping.write);
+ if (NULL == pages)
+ {
+ return false;
+ }
+
+ /* Register buffer as an external mapping */
+ buffer->mapping.pages = pages;
+ buffer->mapping.num_pages = num_pages;
+
+ return true;
+}
+
+bool mve_buffer_ashmem_unmap_physical_pages(struct mve_buffer_external *buffer)
+{
+ if (NULL == buffer)
+ {
+ return false;
+ }
+
+ if (NULL == buffer->mapping.pages || 0 == buffer->mapping.num_pages)
+ {
+ return false;
+ }
+
+ /* mve_mem_unmap_virt_to_phys frees the memory allocated for the array
+ * buffer->pages. */
+ mve_rsrc_mem_unmap_virt_to_phys(buffer->mapping.pages, buffer->mapping.num_pages);
+
+ buffer->mapping.pages = NULL;
+ buffer->mapping.num_pages = 0;
+
+ return true;
+}
+
+void mve_buffer_ashmem_set_owner(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port)
+{
+ int i, len = 0;
+ void (*sync_func)(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction);
+ enum dma_data_direction direction;
+
+ if (NULL == buffer)
+ {
+ return;
+ }
+
+ if (MVE_BUFFER_OWNER_CPU == owner && MVE_PORT_INDEX_INPUT == port)
+ {
+ return;
+ }
+
+ /* Make sure the buffer handle is valid */
+ if (0 == buffer->info.handle)
+ {
+ return;
+ }
+
+ if (MVE_BUFFER_OWNER_CPU == owner)
+ {
+ sync_func = dma_sync_single_for_cpu;
+ }
+ else
+ {
+ sync_func = dma_sync_single_for_device;
+ }
+
+ if (MVE_PORT_INDEX_OUTPUT == port)
+ {
+ /* Data going from device to CPU */
+ direction = DMA_FROM_DEVICE;
+ }
+ else
+ {
+ /* Data going from CPU to device */
+ direction = DMA_TO_DEVICE;
+ }
+
+ for (i = 0; i < buffer->mapping.num_pages; ++i)
+ {
+ uint32_t offset;
+ uint32_t size;
+
+ if (i == 0)
+ {
+ offset = buffer->mapping.offset_in_page;
+ size = PAGE_SIZE - buffer->mapping.offset_in_page;
+ }
+ else if (i == buffer->mapping.num_pages - 1)
+ {
+ offset = 0;
+ size = buffer->info.size - len;
+ }
+ else
+ {
+ offset = 0;
+ size = PAGE_SIZE;
+ }
+
+ len += size;
+ /* Use the device struct from the resource module since this was used when
+ * mapping the physical pages */
+ sync_func(&mve_rsrc_data.pdev->dev, buffer->mapping.pages[i] + offset, size, direction);
+ }
+}
diff --git a/drivers/video/arm/v5xx/base/mve_buffer_ashmem.h b/drivers/video/arm/v5xx/base/mve_buffer_ashmem.h
new file mode 100644
index 000000000000..f6003070eb76
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_buffer_ashmem.h
@@ -0,0 +1,65 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_BUFFER_ASHMEM_H
+#define MVE_BUFFER_ASHMEM_H
+
+#include "mve_buffer.h"
+
+/**
+ * Allocate memory for private data and a mve_buffer_external instance. This is
+ * also the place to e.g. import the external buffer etc.
+ * @param info Buffer information.
+ * @return Pointer to the mve_buffer_external part of the allocated structure. NULL
+ * if no such structure could be allocated.
+ */
+struct mve_buffer_external *mve_buffer_ashmem_create_buffer_external(struct mve_buffer_info *info);
+
+/**
+ * Free the memory allocated in mve_buffer_create_buffer_external.
+ * @param buffer Pointer to the mve_buffer_external instance to free.
+ */
+void mve_buffer_ashmem_destroy_buffer_external(struct mve_buffer_external *buffer);
+
+/**
+ * Constructs an array of physical pages backing the user allocated buffer and
+ * stores the array in the supplied mve_buffer_external. Note that the pages
+ * must be mapped in the MVE MMU table before MVE can access the buffer. This
+ * is the responsibility of the client.
+ * @param buffer The buffer to map.
+ * @return True on success, false on failure.
+ */
+bool mve_buffer_ashmem_map_physical_pages(struct mve_buffer_external *buffer);
+
+/**
+ * Hand back the pages to the user allocated buffer allocator. Note that the
+ * pages must also be unmapped from the MVE MMU table. This is the responsibility
+ * of the client.
+ * @param buffer The buffer to unmap.
+ * @return True on success, false on failure. This function fails if the
+ * supplied buffer is currently not registered.
+ */
+bool mve_buffer_ashmem_unmap_physical_pages(struct mve_buffer_external *buffer);
+
+/**
+ * Set the owner of the buffer. This function takes care of cache flushing.
+ * @param buffer The buffer to change ownership of.
+ * @param owner The new owner of the buffer.
+ * @param port The port this buffer is connected to (input/output).
+ */
+void mve_buffer_ashmem_set_owner(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port);
+
+#endif /* MVE_BUFFER_VALLOC_H */
diff --git a/drivers/video/arm/v5xx/base/mve_buffer_ashmem_stub.c b/drivers/video/arm/v5xx/base/mve_buffer_ashmem_stub.c
new file mode 100644
index 000000000000..eb6580129eda
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_buffer_ashmem_stub.c
@@ -0,0 +1,38 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "mve_buffer_ashmem.h"
+
+struct mve_buffer_external *mve_buffer_ashmem_create_buffer_external(struct mve_buffer_info *info)
+{
+ return NULL;
+}
+
+void mve_buffer_ashmem_destroy_buffer_external(struct mve_buffer_external *buffer)
+{}
+
+bool mve_buffer_ashmem_map_physical_pages(struct mve_buffer_external *buffer)
+{
+ return false;
+}
+
+bool mve_buffer_ashmem_unmap_physical_pages(struct mve_buffer_external *buffer)
+{
+ return false;
+}
+
+void mve_buffer_ashmem_set_owner(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port)
+{}
diff --git a/drivers/video/arm/v5xx/base/mve_buffer_attachment.c b/drivers/video/arm/v5xx/base/mve_buffer_attachment.c
new file mode 100644
index 000000000000..548fe7a562f1
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_buffer_attachment.c
@@ -0,0 +1,67 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "mve_mmu.h"
+#include "mve_buffer_attachment.h"
+
+/* This is an implementation of the mve_buffer_client interface where the client
+ * buffers are attachment to other buffers.
+ */
+
+#ifndef CSTD_UNUSED
+#define CSTD_UNUSED(x) ((void)(x))
+#endif
+
+struct mve_buffer_external *mve_buffer_attachment_create_buffer_external(struct mve_buffer_info *info)
+{
+ /* The attachment implementation doesn't need to store any private data in
+ * the mve_buffer_client structure. Just create an instance and return it. */
+ struct mve_buffer_external *ret;
+
+ ret = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(struct mve_buffer_external), GFP_KERNEL);
+ if (NULL != ret)
+ {
+ ret->info = *info;
+ }
+
+ return ret;
+}
+
+void mve_buffer_attachment_destroy_buffer_external(struct mve_buffer_external *buffer)
+{
+ MVE_RSRC_MEM_CACHE_FREE(buffer, sizeof(struct mve_buffer_external));
+}
+
+bool mve_buffer_attachment_map_physical_pages(struct mve_buffer_external *buffer)
+{
+ CSTD_UNUSED(buffer);
+
+ return true;
+}
+
+bool mve_buffer_attachment_unmap_physical_pages(struct mve_buffer_external *buffer)
+{
+ CSTD_UNUSED(buffer);
+
+ return true;
+}
+
+void mve_buffer_attachment_set_owner(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port)
+{
+ CSTD_UNUSED(buffer);
+ CSTD_UNUSED(owner);
+ CSTD_UNUSED(port);
+}
diff --git a/drivers/video/arm/v5xx/base/mve_buffer_attachment.h b/drivers/video/arm/v5xx/base/mve_buffer_attachment.h
new file mode 100644
index 000000000000..78c161104314
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_buffer_attachment.h
@@ -0,0 +1,55 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_BUFFER_ATTACHMENT_H
+#define MVE_BUFFER_ATTACHMENT_H
+
+#include "mve_buffer.h"
+
+/**
+ * Allocate memory for private data and a mve_buffer_external instance.
+ * @param info Buffer information.
+ * @return Pointer to the mve_buffer_external part of the allocated structure. NULL
+ * if no such structure could be allocated.
+ */
+struct mve_buffer_external *mve_buffer_attachment_create_buffer_external(struct mve_buffer_info *info);
+
+/**
+ * Free the memory allocated in mve_buffer_create_buffer_external.
+ * @param buffer Pointer to the mve_buffer_external instance to free.
+ */
+void mve_buffer_attachment_destroy_buffer_external(struct mve_buffer_external *buffer);
+
+/**
+ * This function doesn't do anything for attachment buffers.
+ * @param buffer The buffer.
+ * @return Always returns true
+ */
+bool mve_buffer_attachment_map_physical_pages(struct mve_buffer_external *buffer);
+
+/**
+ * This function doesn't do anything for attachment buffers.
+ * @param buffer The buffer.
+ * @return Always returns true
+ */
+bool mve_buffer_attachment_unmap_physical_pages(struct mve_buffer_external *buffer);
+
+/**
+ * This function doesn't do anything for attachment buffers.
+ */
+void mve_buffer_attachment_set_owner(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port);
+
+#endif /* MVE_BUFFER_ATTACHMENT_H */
diff --git a/drivers/video/arm/v5xx/base/mve_buffer_common.c b/drivers/video/arm/v5xx/base/mve_buffer_common.c
new file mode 100644
index 000000000000..d137c69c2340
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_buffer_common.c
@@ -0,0 +1,263 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "mve_buffer.h"
+
+#include "mve_buffer_valloc.h"
+#include "mve_buffer_attachment.h"
+#include "mve_buffer_dmabuf.h"
+#include "mve_buffer_ashmem.h"
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <asm/bug.h>
+#endif
+
+/** @brief Allocator interface
+ *
+ * Collection of function-pointers that together create an interface for an
+ * allocator.
+ */
+struct allocator_interface
+{
+ /** Create the buffer client */
+ struct mve_buffer_external *(*create_buffer_external)(struct mve_buffer_info *info);
+ /** Destroy the buffer client */
+ void (*destroy_buffer_external)(struct mve_buffer_external *buffer);
+ /** Map the memory */
+ bool (*map_physical_pages)(struct mve_buffer_external *buffer);
+ /** Unmap the memory */
+ bool (*unmap_physical_pages)(struct mve_buffer_external *buffer);
+ /** Set the owner of the buffer (CPU or device) */
+ void (*set_owner)(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port);
+};
+
+static struct allocator_interface allocators[] =
+{
+ { /* Valloc */
+ .create_buffer_external = mve_buffer_valloc_create_buffer_external,
+ .destroy_buffer_external = mve_buffer_valloc_destroy_buffer_external,
+ .map_physical_pages = mve_buffer_valloc_map_physical_pages,
+ .unmap_physical_pages = mve_buffer_valloc_unmap_physical_pages,
+ .set_owner = mve_buffer_valloc_set_owner
+ },
+ { /* Attachment */
+ .create_buffer_external = mve_buffer_attachment_create_buffer_external,
+ .destroy_buffer_external = mve_buffer_attachment_destroy_buffer_external,
+ .map_physical_pages = mve_buffer_attachment_map_physical_pages,
+ .unmap_physical_pages = mve_buffer_attachment_unmap_physical_pages,
+ .set_owner = mve_buffer_attachment_set_owner
+ },
+#ifndef EMULATOR
+ { /* dma buf */
+ .create_buffer_external = mve_buffer_dmabuf_create_buffer_external,
+ .destroy_buffer_external = mve_buffer_dmabuf_destroy_buffer_external,
+ .map_physical_pages = mve_buffer_dmabuf_map_physical_pages,
+ .unmap_physical_pages = mve_buffer_dmabuf_unmap_physical_pages,
+ .set_owner = mve_buffer_dmabuf_set_owner
+ },
+ { /* ashmem buf */
+ .create_buffer_external = mve_buffer_ashmem_create_buffer_external,
+ .destroy_buffer_external = mve_buffer_ashmem_destroy_buffer_external,
+ .map_physical_pages = mve_buffer_ashmem_map_physical_pages,
+ .unmap_physical_pages = mve_buffer_ashmem_unmap_physical_pages,
+ .set_owner = mve_buffer_ashmem_set_owner
+ }
+#endif
+};
+
+bool mve_buffer_is_valid(const struct mve_buffer_info *info)
+{
+ int bpp, size;
+
+ if (0 == info->handle)
+ {
+ return false;
+ }
+
+ if (0 == info->size)
+ {
+ return false;
+ }
+
+ if (MVE_BASE_OMX_BUFFER_FORMAT_BITSTREAM != info->format &&
+ MVE_BASE_OMX_BUFFER_FORMAT_CRC != info->format)
+ {
+ if (0 == info->width ||
+ 0 == info->height ||
+ 0 == info->stride)
+ {
+ return false;
+ }
+ }
+
+ if (MVE_BASE_BUFFER_ALLOCATOR_VMALLOC != info->allocator &&
+ MVE_BASE_BUFFER_ALLOCATOR_DMABUF != info->allocator &&
+ MVE_BASE_BUFFER_ALLOCATOR_ASHMEM != info->allocator &&
+ MVE_BASE_BUFFER_ALLOCATOR_ATTACHMENT != info->allocator)
+ {
+ return false;
+ }
+
+ if (MVE_BASE_BUFFER_ALLOCATOR_ATTACHMENT == info->allocator &&
+ 0 == info->offset)
+ {
+ return false;
+ }
+
+ switch (info->format)
+ {
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_PLANAR:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_SEMIPLANAR:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YVU420_SEMIPLANAR:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YV12:
+ bpp = 12;
+ if (((info->width * info->height * bpp) / 8) > info->size)
+ {
+ return false;
+ }
+ if (info->stride * info->height > info->size)
+ {
+ return false;
+ }
+ if (info->stride < info->width)
+ {
+ return false;
+ }
+
+ if (MVE_BASE_BUFFER_ALLOCATOR_DMABUF == info->allocator ||
+ MVE_BASE_BUFFER_ALLOCATOR_ASHMEM == info->allocator)
+ {
+ size = ROUND_UP(info->stride, info->stride_alignment) * info->height +
+ ROUND_UP((info->stride + 1) >> 1, info->stride_alignment) * info->height;
+ /* This will help detect stride alignment changes that causes
+ * the buffer to become smaller than expected */
+ WARN_ON(size > info->size);
+ }
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUYYVY_10B:
+ bpp = 16;
+ if (((info->width * info->height * bpp) / 8) > info->size)
+ {
+ return false;
+ }
+ if (info->stride < info->width)
+ {
+ return false;
+ }
+ if (info->stride * 4 * info->height / 2 > info->size)
+ {
+ return false;
+ }
+ if (MVE_BASE_BUFFER_ALLOCATOR_DMABUF == info->allocator ||
+ MVE_BASE_BUFFER_ALLOCATOR_ASHMEM == info->allocator)
+ {
+ size = ROUND_UP(info->stride * 4, info->stride_alignment) * info->height / 2;
+ /* This will help detect stride alignment changes that causes
+ * the buffer to become smaller than expected */
+ WARN_ON(size > info->size);
+ }
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_RGBA_8888:
+ case MVE_BASE_OMX_BUFFER_FORMAT_BGRA_8888:
+ case MVE_BASE_OMX_BUFFER_FORMAT_ARGB_8888:
+ case MVE_BASE_OMX_BUFFER_FORMAT_ABGR_8888:
+ bpp = 32;
+ if (((info->width * info->height * bpp) / 8) > info->size)
+ {
+ return false;
+ }
+ if (info->stride < info->width * 4)
+ {
+ return false;
+ }
+ if (info->stride * info->height > info->size)
+ {
+ return false;
+ }
+
+ size = ROUND_UP(info->stride, info->stride_alignment) * info->height;
+ /* This will help detect stride alignment changes that causes
+ * the buffer to become smaller than expected */
+ WARN_ON(size > info->size);
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC:
+ {
+ int w = (info->width + 15) / 16;
+ int h = (info->height + 15) / 16;
+ /* Magic formula for worstcase content */
+ if (w * 16 * h * 16 / 16 + w * 16 * h * 16 * 3 / 2 > info->size)
+ {
+ return false;
+ }
+ break;
+ }
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC_10B:
+ {
+ int w = (info->width + 15) / 16;
+ int h = (info->height + 15) / 16;
+ /* Magic formula for worstcase content */
+ if (w * h * 16 + w * 16 * h * 16 * 15 / 8 > info->size)
+ {
+ return false;
+ }
+ break;
+ }
+ case MVE_BASE_OMX_BUFFER_FORMAT_BITSTREAM:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV422_1P:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YVU422_1P:
+ case MVE_BASE_OMX_BUFFER_FORMAT_CRC:
+ break;
+ default:
+ /* Unsupported format */
+ return false;
+ }
+
+ return true;
+}
+
+struct mve_buffer_external *mve_buffer_create_buffer_external(struct mve_buffer_info *info, uint32_t port)
+{
+ struct mve_buffer_external *buffer = allocators[info->allocator].create_buffer_external(info);
+ if (NULL != buffer)
+ {
+ buffer->mapping.write = 1; /*MVE_PORT_INDEX_INPUT == port ? 0 : 1*/
+ }
+ return buffer;
+}
+
+void mve_buffer_destroy_buffer_external(struct mve_buffer_external *buffer)
+{
+ allocators[buffer->info.allocator].destroy_buffer_external(buffer);
+}
+
+bool mve_buffer_map_physical_pages(struct mve_buffer_external *buffer)
+{
+ return allocators[buffer->info.allocator].map_physical_pages(buffer);
+}
+
+bool mve_buffer_unmap_physical_pages(struct mve_buffer_external *buffer)
+{
+ return allocators[buffer->info.allocator].unmap_physical_pages(buffer);
+}
+
+void mve_buffer_set_owner(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port)
+{
+ allocators[buffer->info.allocator].set_owner(buffer, owner, port);
+}
diff --git a/drivers/video/arm/v5xx/base/mve_buffer_dmabuf.c b/drivers/video/arm/v5xx/base/mve_buffer_dmabuf.c
new file mode 100644
index 000000000000..b275c13741bd
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_buffer_dmabuf.c
@@ -0,0 +1,230 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "mve_buffer_dmabuf.h"
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <asm/cacheflush.h>
+
+#include "mve_driver.h"
+
+#include "mve_rsrc_driver.h"
+#include "mve_rsrc_mem_frontend.h"
+
+#ifndef CSTD_UNUSED
+#define CSTD_UNUSED(x) ((void)(x))
+#endif
+
+/** @brief Buffer-client for the dmabuf interface.
+ */
+struct dmabuf_buffer_external
+{
+ struct mve_buffer_external buffer; /**< Buffer client common data.
+ * This member must be the first entry
+ * of the struct */
+ struct dma_buf *handle; /**< The dma_buf handle */
+ struct dma_buf_attachment *attachment; /**< Pointer to the attechment */
+ struct sg_table *sg; /**< Pointer to the scatter-gather */
+};
+
+struct mve_buffer_external *mve_buffer_dmabuf_create_buffer_external(struct mve_buffer_info *info)
+{
+ struct dmabuf_buffer_external *buffer_external = NULL;
+ unsigned long fd;
+
+ if (NULL == info)
+ {
+ return NULL;
+ }
+
+ buffer_external = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(struct dmabuf_buffer_external) + 100, GFP_KERNEL);
+ if (NULL == buffer_external)
+ {
+ return NULL;
+ }
+
+ fd = (unsigned long)info->handle;
+ buffer_external->handle = dma_buf_get((int)fd);
+ if (IS_ERR_OR_NULL(buffer_external->handle))
+ {
+ goto error;
+ }
+
+ buffer_external->attachment = dma_buf_attach(buffer_external->handle, &mve_rsrc_data.pdev->dev);
+ if (IS_ERR(buffer_external->attachment))
+ {
+ goto error;
+ }
+
+ buffer_external->buffer.info = *info;
+
+ return &buffer_external->buffer;
+
+error:
+ if (NULL != buffer_external)
+ {
+ if (!IS_ERR_OR_NULL(buffer_external->handle))
+ {
+ dma_buf_put(buffer_external->handle);
+ }
+
+ MVE_RSRC_MEM_CACHE_FREE(buffer_external, sizeof(struct dmabuf_buffer_external) + 100);
+ }
+
+ return NULL;
+}
+
+void mve_buffer_dmabuf_destroy_buffer_external(struct mve_buffer_external *buffer)
+{
+ struct dmabuf_buffer_external *buffer_external;
+
+ if (NULL == buffer)
+ {
+ return;
+ }
+
+ buffer_external = (struct dmabuf_buffer_external *)buffer;
+
+ dma_buf_detach(buffer_external->handle, buffer_external->attachment);
+ dma_buf_put(buffer_external->handle);
+ MVE_RSRC_MEM_CACHE_FREE(buffer_external, sizeof(struct dmabuf_buffer_external) + 100);
+}
+
+bool mve_buffer_dmabuf_map_physical_pages(struct mve_buffer_external *buffer)
+{
+ struct dmabuf_buffer_external *buffer_external;
+ struct scatterlist *sgl;
+ uint32_t num_pages;
+ phys_addr_t *pages = NULL;
+ uint32_t i;
+ uint32_t count = 0;
+
+ buffer_external = (struct dmabuf_buffer_external *)buffer;
+
+ buffer_external->sg = dma_buf_map_attachment(buffer_external->attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR(buffer_external->sg))
+ {
+ return false;
+ }
+
+ num_pages = (buffer->info.size + PAGE_SIZE - 1) / PAGE_SIZE;
+ pages = MVE_RSRC_MEM_VALLOC(sizeof(phys_addr_t) * num_pages);
+ if (NULL == pages)
+ {
+ goto error;
+ }
+
+ /* Extract the address of each page and save the address in the pages array */
+ for_each_sg(buffer_external->sg->sgl, sgl, buffer_external->sg->nents, i)
+ {
+ uint32_t npages = PFN_UP(sg_dma_len(sgl));
+ uint32_t j;
+
+ for (j = 0; (j < npages) && (count < num_pages); ++j)
+ {
+ pages[count++] = sg_dma_address(sgl) + (j << PAGE_SHIFT);
+ }
+ WARN_ONCE(j < npages, "[MVE] scatterlist is bigger than the expected size (%d pages)\n", num_pages);
+ }
+
+ if (WARN_ONCE(count < num_pages, "[MVE] scatterlist doesn't contain enough pages (found: %d expected: %d pages)\n",
+ count, num_pages))
+ {
+ goto error;
+ }
+
+ /* Register buffer as an external mapping */
+ buffer->mapping.pages = pages;
+ buffer->mapping.num_pages = count;
+
+ return true;
+
+error:
+ dma_buf_unmap_attachment(buffer_external->attachment,
+ buffer_external->sg,
+ DMA_BIDIRECTIONAL);
+
+ buffer_external->sg = NULL;
+ MVE_RSRC_MEM_VFREE(pages);
+
+ return false;
+}
+
+bool mve_buffer_dmabuf_unmap_physical_pages(struct mve_buffer_external *buffer)
+{
+ struct dmabuf_buffer_external *buffer_external;
+
+ if (NULL == buffer)
+ {
+ return false;
+ }
+
+ buffer_external = (struct dmabuf_buffer_external *)buffer;
+
+ dma_buf_unmap_attachment(buffer_external->attachment,
+ buffer_external->sg,
+ DMA_BIDIRECTIONAL);
+ MVE_RSRC_MEM_VFREE(buffer->mapping.pages);
+
+ buffer_external->sg = NULL;
+ buffer->mapping.pages = NULL;
+ buffer->mapping.num_pages = 0;
+
+ return true;
+}
+
+void mve_buffer_dmabuf_set_owner(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port)
+{
+ void (*sync_func)(struct device *dev,
+ struct scatterlist *sg,
+ int nelems,
+ enum dma_data_direction direction);
+ enum dma_data_direction direction;
+
+ struct dmabuf_buffer_external *buffer_external = (struct dmabuf_buffer_external *)buffer;
+
+ if (NULL == buffer)
+ {
+ return;
+ }
+
+ if (MVE_BUFFER_OWNER_CPU == owner && MVE_PORT_INDEX_INPUT == port)
+ {
+ return;
+ }
+
+ if (MVE_BUFFER_OWNER_CPU == owner)
+ {
+ sync_func = dma_sync_sg_for_cpu;
+ }
+ else
+ {
+ sync_func = dma_sync_sg_for_device;
+ }
+
+ if (MVE_PORT_INDEX_OUTPUT == port)
+ {
+ /* Data going from device to CPU */
+ direction = DMA_FROM_DEVICE;
+ }
+ else
+ {
+ /* Data going from CPU to device */
+ direction = DMA_TO_DEVICE;
+ }
+
+ sync_func(&mve_rsrc_data.pdev->dev, buffer_external->sg->sgl, buffer_external->sg->nents, direction);
+}
diff --git a/drivers/video/arm/v5xx/base/mve_buffer_dmabuf.h b/drivers/video/arm/v5xx/base/mve_buffer_dmabuf.h
new file mode 100644
index 000000000000..be34c9eab024
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_buffer_dmabuf.h
@@ -0,0 +1,65 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_BUFFER_DMA_BUF_H
+#define MVE_BUFFER_DMA_BUF_H
+
+#include "mve_buffer.h"
+
+/**
+ * Allocate memory for private data and a mve_buffer_external instance. This is
+ * also the place to e.g. import the external buffer etc.
+ * @param info Buffer information.
+ * @return Pointer to the mve_buffer_client part of the allocated structure. NULL
+ * if no such structure could be allocated.
+ */
+struct mve_buffer_external *mve_buffer_dmabuf_create_buffer_external(struct mve_buffer_info *info);
+
+/**
+ * Free the memory allocated in mve_buffer_create_buffer_external.
+ * @param buffer Pointer to the mve_buffer_external instance to free.
+ */
+void mve_buffer_dmabuf_destroy_buffer_external(struct mve_buffer_external *buffer);
+
+/**
+ * Constructs an array of physical pages backing the user allocated buffer and
+ * stores the array in the supplied mve_buffer_external. Note that the pages
+ * must be mapped in the MVE MMU table before MVE can access the buffer. This
+ * is the responsibility of the client.
+ * @param buffer The buffer to map.
+ * @return True on success, false on failure.
+ */
+bool mve_buffer_dmabuf_map_physical_pages(struct mve_buffer_external *buffer);
+
+/**
+ * Hand back the pages to the user allocated buffer allocator. Note that the
+ * pages must also be unmapped from the MVE MMU table. This is the responsibility
+ * of the client.
+ * @param buffer The buffer to unmap.
+ * @return True on success, false on failure. This function fails if the
+ * supplied buffer is currently not registered.
+ */
+bool mve_buffer_dmabuf_unmap_physical_pages(struct mve_buffer_external *buffer);
+
+/**
+ * Set the owner of the buffer. This function takes care of cache flushing.
+ * @param buffer The buffer to change ownership of.
+ * @param owner The new owner of the buffer.
+ * @param port The port this buffer is connected to (input/output).
+ */
+void mve_buffer_dmabuf_set_owner(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port);
+
+#endif /* MVE_BUFFER_DMA_BUF_H */
diff --git a/drivers/video/arm/v5xx/base/mve_buffer_dmabuf_stub.c b/drivers/video/arm/v5xx/base/mve_buffer_dmabuf_stub.c
new file mode 100644
index 000000000000..4001d22b8ef0
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_buffer_dmabuf_stub.c
@@ -0,0 +1,38 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "mve_buffer_dmabuf.h"
+
+struct mve_buffer_external *mve_buffer_dmabuf_create_buffer_external(struct mve_buffer_info *info)
+{
+ return NULL;
+}
+
+void mve_buffer_dmabuf_destroy_buffer_external(struct mve_buffer_external *buffer)
+{}
+
+bool mve_buffer_dmabuf_map_physical_pages(struct mve_buffer_external *buffer)
+{
+ return false;
+}
+
+bool mve_buffer_dmabuf_unmap_physical_pages(struct mve_buffer_external *buffer)
+{
+ return false;
+}
+
+void mve_buffer_dmabuf_set_owner(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port)
+{}
diff --git a/drivers/video/arm/v5xx/base/mve_buffer_valloc.c b/drivers/video/arm/v5xx/base/mve_buffer_valloc.c
new file mode 100644
index 000000000000..fa060c31fee3
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_buffer_valloc.c
@@ -0,0 +1,170 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/slab.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#endif
+
+#include "mve_mmu.h"
+#include "mve_buffer_valloc.h"
+#include "mve_driver.h"
+
+#include "mve_rsrc_driver.h"
+
+/* This is an implementation of the mve_buffer_client interface where the client
+ * buffers have been allocated using vmalloc.
+ */
+
+struct mve_buffer_external *mve_buffer_valloc_create_buffer_external(struct mve_buffer_info *info)
+{
+ /* The vmalloc implementation doesn't need to store any private data in
+ * the mve_buffer_client structure. Just create an instance and return it. */
+ struct mve_buffer_external *ret;
+
+ ret = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(struct mve_buffer_external), GFP_KERNEL);
+ if (NULL != ret)
+ {
+ ret->info = *info;
+ }
+
+ return ret;
+}
+
+void mve_buffer_valloc_destroy_buffer_external(struct mve_buffer_external *buffer)
+{
+ MVE_RSRC_MEM_CACHE_FREE(buffer, sizeof(struct mve_buffer_external));
+}
+
+bool mve_buffer_valloc_map_physical_pages(struct mve_buffer_external *buffer)
+{
+ uint32_t size;
+ void *ptr;
+ uint32_t num_pages;
+ uint32_t offset;
+ phys_addr_t *pages;
+
+ buffer->mapping.pages = NULL;
+ buffer->mapping.num_pages = 0;
+
+ size = buffer->info.size;
+ ptr = (void *)(size_t)buffer->info.handle;
+ /* Calculate the number of needed pages */
+ offset = ((size_t)ptr) & (MVE_MMU_PAGE_SIZE - 1);
+ size += offset;
+ num_pages = (size + MVE_MMU_PAGE_SIZE - 1) / MVE_MMU_PAGE_SIZE;
+ /* Get the physical pages */
+ pages = mve_rsrc_mem_map_virt_to_phys(ptr - offset, size, buffer->mapping.write);
+ if (NULL == pages)
+ {
+ return false;
+ }
+
+ /* Register buffer as an external mapping */
+ buffer->mapping.pages = pages;
+ buffer->mapping.num_pages = num_pages;
+ /* If the buffer is not page aligned */
+ buffer->mapping.offset_in_page = offset;
+
+ return true;
+}
+
+bool mve_buffer_valloc_unmap_physical_pages(struct mve_buffer_external *buffer)
+{
+ if (NULL == buffer)
+ {
+ return false;
+ }
+
+ /* mve_mem_unmap_virt_to_phys frees the memory allocated for the array
+ * buffer->pages. */
+ mve_rsrc_mem_unmap_virt_to_phys(buffer->mapping.pages, buffer->mapping.num_pages);
+
+ buffer->mapping.pages = NULL;
+ buffer->mapping.num_pages = 0;
+
+ return true;
+}
+
+void mve_buffer_valloc_set_owner(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port)
+{
+ int i, len = 0;
+ void (*sync_func)(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction);
+ enum dma_data_direction direction;
+
+ if (NULL == buffer)
+ {
+ return;
+ }
+
+ if (MVE_BUFFER_OWNER_CPU == owner && MVE_PORT_INDEX_INPUT == port)
+ {
+ return;
+ }
+
+ if (MVE_BUFFER_OWNER_CPU == owner)
+ {
+ sync_func = dma_sync_single_for_cpu;
+ }
+ else
+ {
+ sync_func = dma_sync_single_for_device;
+ }
+
+ if (MVE_PORT_INDEX_OUTPUT == port)
+ {
+ /* Data going from device to CPU */
+ direction = DMA_FROM_DEVICE;
+ }
+ else
+ {
+ /* Data going from CPU to device */
+ direction = DMA_TO_DEVICE;
+ }
+
+ for (i = 0; i < buffer->mapping.num_pages; ++i)
+ {
+ uint32_t offset;
+ uint32_t size;
+
+ if (i == 0)
+ {
+ offset = buffer->mapping.offset_in_page;
+ size = PAGE_SIZE - buffer->mapping.offset_in_page;
+ }
+ else if (i == buffer->mapping.num_pages - 1)
+ {
+ offset = 0;
+ size = buffer->info.size - len;
+ }
+ else
+ {
+ offset = 0;
+ size = PAGE_SIZE;
+ }
+
+ len += size;
+ /* Use the device struct from the resource module since this was used when
+ * mapping the physical pages */
+ sync_func(&mve_rsrc_data.pdev->dev, buffer->mapping.pages[i] + offset, size, direction);
+ }
+}
diff --git a/drivers/video/arm/v5xx/base/mve_buffer_valloc.h b/drivers/video/arm/v5xx/base/mve_buffer_valloc.h
new file mode 100644
index 000000000000..6f08663db5cd
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_buffer_valloc.h
@@ -0,0 +1,65 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_BUFFER_VALLOC_H
+#define MVE_BUFFER_VALLOC_H
+
+#include "mve_buffer.h"
+
+/**
+ * Allocate memory for private data and a mve_buffer_external instance. This is
+ * also the place to e.g. import the external buffer etc.
+ * @param info Buffer information.
+ * @return Pointer to the mve_buffer_external part of the allocated structure. NULL
+ * if no such structure could be allocated.
+ */
+struct mve_buffer_external *mve_buffer_valloc_create_buffer_external(struct mve_buffer_info *info);
+
+/**
+ * Free the memory allocated in mve_buffer_create_buffer_external.
+ * @param buffer Pointer to the mve_buffer_external instance to free.
+ */
+void mve_buffer_valloc_destroy_buffer_external(struct mve_buffer_external *buffer);
+
+/**
+ * Constructs an array of physical pages backing the user allocated buffer and
+ * stores the array in the supplied mve_buffer_external. Note that the pages
+ * must be mapped in the MVE MMU table before MVE can access the buffer. This
+ * is the responsibility of the client.
+ * @param buffer The buffer to map.
+ * @return True on success, false on failure.
+ */
+bool mve_buffer_valloc_map_physical_pages(struct mve_buffer_external *buffer);
+
+/**
+ * Hand back the pages to the user allocated buffer allocator. Note that the
+ * pages must also be unmapped from the MVE MMU table. This is the responsibility
+ * of the client.
+ * @param buffer The buffer to unmap.
+ * @return True on success, false on failure. This function fails if the
+ * supplied buffer is currently not registered.
+ */
+bool mve_buffer_valloc_unmap_physical_pages(struct mve_buffer_external *buffer);
+
+/**
+ * Set the owner of the buffer. This function takes care of cache flushing.
+ * @param buffer The buffer to change ownership of.
+ * @param owner The new owner of the buffer.
+ * @param port The port this buffer is connected to (input/output).
+ */
+void mve_buffer_valloc_set_owner(struct mve_buffer_external *buffer,
+ enum mve_buffer_owner owner,
+ enum mve_port_index port);
+
+#endif /* MVE_BUFFER_VALLOC_H */
diff --git a/drivers/video/arm/v5xx/base/mve_com.c b/drivers/video/arm/v5xx/base/mve_com.c
new file mode 100644
index 000000000000..4d1a93641d06
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_com.c
@@ -0,0 +1,230 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "mve_com.h"
+#include "mve_session.h"
+#include "mve_rsrc_log.h"
+
+#include <host_interface_v1/mve_protocol_kernel.h>
+
+#include "mve_com_host_interface_v1.h"
+#include "mve_com_host_interface_v2.h"
+
+mve_base_error mve_com_add_message(struct mve_session *session,
+ uint16_t code,
+ uint16_t size,
+ uint32_t *data)
+{
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+
+ if (session->com == NULL || session->com->host_interface.add_message == NULL)
+ {
+ return MVE_BASE_ERROR_VERSION_MISMATCH;
+ }
+
+ ret = session->com->host_interface.add_message(session, code, size, data);
+
+ if (MVE_BASE_ERROR_NONE != ret)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_fwif, MVE_LOG_ERROR, "%p mve_com_add_message() returned error status. ret=%u, code=%u, size=%u.", session, ret, code, size);
+ }
+
+ if (MVE_MESSAGE_CODE_SWITCH != code)
+ {
+ session->state.idle_state = IDLE_STATE_ACTIVE;
+ }
+
+ return ret;
+}
+
+uint32_t *mve_com_get_message(struct mve_session *session,
+ struct mve_msg_header *header)
+{
+ uint32_t *ret = NULL;
+
+ if (session->com == NULL || session->com->host_interface.get_message == NULL)
+ {
+ return NULL;
+ }
+
+ ret = session->com->host_interface.get_message(session, header);
+
+ return ret;
+}
+
+mve_base_error mve_com_add_input_buffer(struct mve_session *session,
+ mve_com_buffer *buffer,
+ enum mve_com_buffer_type type)
+{
+ mve_base_error res;
+
+ if (session->com == NULL || session->com->host_interface.add_input_buffer == NULL)
+ {
+ return MVE_BASE_ERROR_VERSION_MISMATCH;
+ }
+
+ res = session->com->host_interface.add_input_buffer(session, buffer, type);
+
+ if (MVE_BASE_ERROR_NONE == res)
+ {
+ if (type != MVE_COM_BUFFER_TYPE_ROI)
+ {
+ session->input_buffer_count++;
+ }
+ }
+ else
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_fwif, MVE_LOG_ERROR, "%p mve_com_add_input_buffer() returned error status. ret=%u.", session, res);
+ }
+
+ return res;
+}
+
+mve_base_error mve_com_add_output_buffer(struct mve_session *session,
+ mve_com_buffer *buffer,
+ enum mve_com_buffer_type type)
+{
+ mve_base_error res;
+
+ if (session->com == NULL || session->com->host_interface.add_output_buffer == NULL)
+ {
+ return MVE_BASE_ERROR_VERSION_MISMATCH;
+ }
+
+ res = session->com->host_interface.add_output_buffer(session, buffer, type);
+
+ if (MVE_BASE_ERROR_NONE == res)
+ {
+ if (type != MVE_COM_BUFFER_TYPE_ROI)
+ {
+ session->output_buffer_count++;
+ }
+ }
+ else
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_fwif, MVE_LOG_ERROR, "%p mve_com_add_output_buffer() returned error status. ret=%u.", session, res);
+ }
+
+ return res;
+}
+
+mve_base_error mve_com_get_input_buffer(struct mve_session *session,
+ mve_com_buffer *buffer)
+{
+ mve_base_error ret;
+
+ if (session->com == NULL || session->com->host_interface.get_input_buffer == NULL)
+ {
+ return MVE_BASE_ERROR_VERSION_MISMATCH;
+ }
+
+ ret = session->com->host_interface.get_input_buffer(session, buffer);
+
+ if (MVE_BASE_ERROR_NONE == ret)
+ {
+ session->input_buffer_count--;
+ }
+ else
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_fwif, MVE_LOG_ERROR, "%p mve_com_get_input_buffer() returned error status. ret=%u.", session, ret);
+ }
+
+ return ret;
+}
+
+mve_base_error mve_com_get_output_buffer(struct mve_session *session,
+ mve_com_buffer *buffer)
+{
+ mve_base_error ret;
+
+ if (session->com == NULL || session->com->host_interface.get_output_buffer == NULL)
+ {
+ return MVE_BASE_ERROR_VERSION_MISMATCH;
+ }
+
+ ret = session->com->host_interface.get_output_buffer(session, buffer);
+
+ if (MVE_BASE_ERROR_NONE == ret)
+ {
+ session->output_buffer_count--;
+ }
+ else
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_fwif, MVE_LOG_ERROR, "%p mve_com_get_output_buffer() returned error status. ret=%u.", session, ret);
+ }
+
+ return ret;
+}
+
+mve_base_error mve_com_get_rpc_message(struct mve_session *session,
+ mve_com_rpc *rpc)
+{
+ if (NULL == session->com || NULL == session->com->host_interface.get_rpc_message)
+ {
+ return MVE_BASE_ERROR_VERSION_MISMATCH;
+ }
+
+ return session->com->host_interface.get_rpc_message(session, rpc);
+}
+
+mve_base_error mve_com_put_rpc_message(struct mve_session *session,
+ mve_com_rpc *rpc)
+{
+ if (NULL == session->com || NULL == session->com->host_interface.put_rpc_message)
+ {
+ return MVE_BASE_ERROR_VERSION_MISMATCH;
+ }
+
+ return session->com->host_interface.put_rpc_message(session, rpc);
+}
+
+void mve_com_delete(struct mve_session *session)
+{
+ MVE_RSRC_MEM_FREE(session->com);
+ session->com = NULL;
+}
+
+mve_base_error mve_com_set_interface_version(struct mve_session *session,
+ enum mve_fw_protocol_version version)
+{
+ mve_com_delete(session);
+
+ switch (version)
+ {
+ case MVE_FW_PROTOCOL_VERSION_1_0:
+ {
+ session->com = mve_com_host_interface_v1_new();
+ break;
+ }
+
+ case MVE_FW_PROTOCOL_VERSION_2_0:
+ {
+ session->com = mve_com_host_interface_v2_new();
+ break;
+ }
+
+ default:
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_fwif, MVE_LOG_ERROR, "%p unsupported interface version configured. version=%u.", session, version);
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+ }
+
+ if (session->com == NULL)
+ {
+ return MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ }
+
+ return MVE_BASE_ERROR_NONE;
+}
diff --git a/drivers/video/arm/v5xx/base/mve_com.h b/drivers/video/arm/v5xx/base/mve_com.h
new file mode 100644
index 000000000000..abefb372d17e
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_com.h
@@ -0,0 +1,313 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_COM_H
+#define MVE_COM_H
+
+#ifndef __KERNEL__
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#endif
+
+#include "mve_base.h"
+#include "mve_mem_region.h"
+#include "mve_fw.h"
+
+/* Additional message codes defined in HIv2 but not in HIv1 */
+#define MVE_MESSAGE_CODE_RELEASE_REF_FRAME 15
+#define MVE_RESPONSE_CODE_TRACE_BUFFERS 19
+
+/* Forward declarations */
+struct mve_session;
+struct mve_msg_header;
+
+/* The buffer representation (MVE_BUFFER) for the different host interface
+ * versions can differ a lot. The following structures are therefore used by
+ * the driver and converted to the correct format by the interface implementations.
+ */
+struct mve_com_buffer_planar
+{
+ int16_t stride[3]; /* Stride between rows for 0 and 180 deg rotation */
+ int16_t stride_90[3]; /* Stride between rows for 90 and 270 deg rotation */
+ uint32_t plane_top[3]; /* Y,Cb,Cr top field */
+ uint32_t plane_bot[3]; /* Y,Cb,Cr bottom field (interlace only) */
+};
+
+struct mve_com_buffer_afbc
+{
+ uint32_t plane_top; /* Top field (interlace) or frame (progressive) */
+ uint32_t plane_bot; /* Bottom field (interlace only) */
+ uint16_t cropx; /* Luma x crop */
+ uint16_t cropy; /* Luma y crop */
+ uint8_t y_offset; /* Deblocking y offset of picture */
+ uint8_t rangemap; /* Packed VC-1 Luma and Chroma range map coefs */
+ uint32_t alloc_bytes_top; /* Buffer size for top field (interlace) or frame (progressive) */
+ uint32_t alloc_bytes_bot; /* Buffer size for bottom field (interlace only) */
+ uint32_t afbc_alloc_bytes; /* AFBC buffer size requested by the firmware */
+ uint32_t afbc_width_in_superblocks[2]; /* Width in superblocks */
+};
+
+struct mve_com_buffer_frame
+{
+ uint16_t nHandle; /* Host buffer handle number / ** WARNING: struct mve_core_buffer_header relies on having the same nHandle position as MVE_BUFFER ** / */
+ enum mve_base_omx_buffer_format format; /* Buffer format. */
+ uint32_t pic_index; /* Picture index in decode order */
+ uint16_t decoded_height; /* Decoded height may be smaller than display height */
+ uint16_t decoded_width; /* Decoded width may be smaller than display width */
+ uint32_t nFlags; /* OMX BufferFlags */
+ uint32_t nMVEFlags; /* MVE sideband information; */
+ uint32_t nStreamStartOffset; /* Start of picture stream byte offset */
+ uint32_t nStreamEndOffset; /* End of picture stream byte offset */
+ union
+ {
+ struct mve_com_buffer_planar planar;
+ struct mve_com_buffer_afbc afbc;
+ }
+ data;
+ /* Below fields are valid since Host interface spec v0.1 */
+ uint32_t crc_top; /* CRC map address top field or frame */
+ uint32_t crc_bot; /* CRC map bottom field */
+
+ uint64_t timestamp; /* Host supplied buffer timestamp */
+};
+
+struct mve_com_buffer_bitstream
+{
+ uint16_t nHandle; /* Host buffer handle number / ** WARNING: struct mve_core_buffer_header relies on having the same nHandle position as MVE_BUFFER ** / */
+ uint32_t pBufferData; /* Buffer start */
+ uint32_t nAllocLen; /* Length of allocated buffer */
+ uint32_t nFilledLen; /* Number of bytes in the buffer */
+ uint32_t nOffset; /* Byte offset from start to first byte */
+ uint32_t nFlags; /* OMX BufferFlags */
+ uint64_t timestamp; /* Host supplied buffer timestamp */
+};
+
+#define MAX_ROI_REGIONS 16
+struct mve_com_buffer_roi
+{
+ uint8_t nRegions;
+ struct mve_base_roi_region regions[MAX_ROI_REGIONS];
+};
+
+typedef union mve_com_buffer
+{
+ struct mve_com_buffer_frame frame;
+ struct mve_com_buffer_bitstream bitstream;
+ struct mve_com_buffer_roi roi;
+} mve_com_buffer;
+
+enum mve_com_buffer_type
+{
+ MVE_COM_BUFFER_TYPE_FRAME,
+ MVE_COM_BUFFER_TYPE_BITSTREAM,
+ MVE_COM_BUFFER_TYPE_ROI
+};
+
+#define MVE_COM_RPC_AREA_SIZE_IN_WORDS 256
+#define MVE_COM_RPC_DATA_SIZE_IN_WORDS (MVE_COM_RPC_AREA_SIZE_IN_WORDS - 3)
+union mve_com_rpc_params
+{
+ volatile uint32_t data[MVE_COM_RPC_DATA_SIZE_IN_WORDS];
+ struct
+ {
+ char string[MVE_COM_RPC_DATA_SIZE_IN_WORDS * 4];
+ }
+ debug_print;
+ struct
+ {
+ uint32_t size;
+ uint32_t max_size;
+ enum mve_base_memory_region region; /* Memory region selection */
+ /* The newly allocated memory must be placed
+ * on (at least) a 2^(log2_alignment) boundary
+ */
+ uint8_t log2_alignment;
+ }
+ mem_alloc;
+ struct
+ {
+ uint32_t ve_pointer;
+ uint32_t new_size;
+ }
+ mem_resize;
+ struct
+ {
+ uint32_t ve_pointer;
+ }
+ mem_free;
+};
+
+typedef struct mve_com_rpc
+{
+ volatile uint32_t state;
+ #define MVE_COM_RPC_STATE_FREE (0)
+ #define MVE_COM_RPC_STATE_PARAM (1)
+ #define MVE_COM_RPC_STATE_RETURN (2)
+ volatile uint32_t call_id;
+ #define MVE_COM_RPC_FUNCTION_DEBUG_PRINTF (1)
+ #define MVE_COM_RPC_FUNCTION_MEM_ALLOC (2)
+ #define MVE_COM_RPC_FUNCTION_MEM_RESIZE (3)
+ #define MVE_COM_RPC_FUNCTION_MEM_FREE (4)
+ volatile uint32_t size;
+ union mve_com_rpc_params params;
+} mve_com_rpc;
+
+struct mve_com_notify_release_ref_frame
+{
+ uint32_t mve_buffer_addr;
+};
+
+struct mve_com_trace_buffers
+{
+ uint16_t reserved;
+ uint8_t num_cores;
+ uint8_t rasc_mask;
+#define MVE_MAX_TRACE_BUFFERS 40
+ /* this array will contain one buffer per rasc in rasc_mask per num_core */
+ struct
+ {
+ uint32_t rasc_addr; /* rasc address of the buffer */
+ uint32_t size; /* size of the buffer in bytes */
+ }
+ buffers[MVE_MAX_TRACE_BUFFERS];
+};
+
+/**
+ * Host interface function pointers.
+ */
+struct mve_com_host_interface
+{
+ mve_base_error (*add_message)(struct mve_session *session, uint16_t code, uint16_t size, uint32_t *data);
+ uint32_t *(*get_message)(struct mve_session *session, struct mve_msg_header *header);
+ mve_base_error (*add_input_buffer)(struct mve_session *session, mve_com_buffer *buffer, enum mve_com_buffer_type type);
+ mve_base_error (*add_output_buffer)(struct mve_session *session, mve_com_buffer *buffer, enum mve_com_buffer_type type);
+ mve_base_error (*get_input_buffer)(struct mve_session *session, mve_com_buffer *buffer);
+ mve_base_error (*get_output_buffer)(struct mve_session *session, mve_com_buffer *buffer);
+ mve_base_error (*get_rpc_message)(struct mve_session *session, mve_com_rpc *rpc);
+ mve_base_error (*put_rpc_message)(struct mve_session *session, mve_com_rpc *rpc);
+};
+
+/**
+ * Host interface base class.
+ */
+struct mve_com
+{
+ struct mve_com_host_interface host_interface;
+};
+
+/**
+ * Factory function. Set the interface version and creates the com class instance.
+ *
+ * @param session Pointer to session object.
+ * @param version Protocol version.
+ * @return MVE_BASE_ERROR_NONE or success, else error code.
+ */
+mve_base_error mve_com_set_interface_version(struct mve_session *session,
+ enum mve_fw_protocol_version version);
+
+/**
+ * Delete com object.
+ *
+ * @param session Pointer to session object.
+ */
+void mve_com_delete(struct mve_session *session);
+
+/**
+ * Enqueues a message to the MVE in-queue. Note that this function does not
+ * notify MVE that a message has been added.
+ * @param session Pointer to the session which wants to send a message
+ * @param code Message code.
+ * @param size Size of the attached data.
+ * @param data Pointer to the message data.
+ * @return MVE_BASE_ERROR_NONE if successful, otherwise a suitable error code.
+ */
+mve_base_error mve_com_add_message(struct mve_session *session,
+ uint16_t code,
+ uint16_t size,
+ uint32_t *data);
+
+/**
+ * Retrieves a message sent from the MVE to the host. Note that the client
+ * must free the returned data to avoid memory leaks.
+ * @param session Pointer to the session that is sent the message.
+ * @param header Pointer to a data area which will receive the message header.
+ * @param The data associated to the message. NULL on failure. This pointer must
+ * be freed by the client.
+ */
+uint32_t *mve_com_get_message(struct mve_session *session,
+ struct mve_msg_header *header);
+
+/**
+ * Enqueues an input buffer to the MVE in buffer queue. Note that this function
+ * does not notify MVE that a buffer has been added.
+ * @param session Pointer to the session which wants to add an input buffer.
+ * @param buffer Pointer to the MVE buffer to add.
+ * @param type Buffer type.
+ * @return MVE_BASE_ERROR_NONE if the buffer was added successfully. Error code otherwise.
+ */
+mve_base_error mve_com_add_input_buffer(struct mve_session *session,
+ mve_com_buffer *buffer,
+ enum mve_com_buffer_type type);
+
+/**
+ * Enqueues an output buffer to the MVE out buffer queue. Note that this function
+ * does not notify MVE that a buffer has been added and does not clean the CPU
+ * cache.
+ * @param session Pointer to the session which wants to add an output buffer.
+ * @param buffer Pointer to the MVE buffer to add to the queue.
+ * @param type Buffer type.
+ * @return MVE_BASE_ERROR_NONE if the buffer was added successfully. Error code otherwise.
+ */
+mve_base_error mve_com_add_output_buffer(struct mve_session *session,
+ mve_com_buffer *buffer,
+ enum mve_com_buffer_type type);
+
+/**
+ * Retreives an input buffer that the MVE has returned to the host.
+ * @param session Pointer to the session to retrieve the input buffer.
+ * @param buffer The returned buffer will be copied into this buffer.
+ * @return MVE_BASE_ERROR_NONE if buffer contains a valid buffer. Error code otherwise.
+ */
+mve_base_error mve_com_get_input_buffer(struct mve_session *session,
+ mve_com_buffer *buffer);
+
+/**
+ * Retreives an output buffer that the MVE has returned to the host.
+ * @param session Pointer to the session to retrieve the output buffer.
+ * @param buffer The returned buffer will be copied into this buffer.
+ * @return MVE_BASE_ERROR_NONE if buffer contains a valid buffer. Error code otherwise.
+ */
+mve_base_error mve_com_get_output_buffer(struct mve_session *session,
+ mve_com_buffer *buffer);
+
+/**
+ * Retreives a RPC request from MVE.
+ * @param session Pointer to the session to retreive the RPC.
+ * @param rpc The details about the RPC call will be placed in this instance.
+ * @return MVE_BASE_ERROR_NONE if rpc contains valid data. Error code otherwise.
+ */
+mve_base_error mve_com_get_rpc_message(struct mve_session *session,
+ mve_com_rpc *rpc);
+
+/**
+ * Stores the response to a RPC request from MVE.
+ * @param session Pointer to the session to retreive the RPC.
+ * @param rpc The RPC response to write to MVE.
+ * @return MVE_BASE_ERROR_NONE if rpc contains valid data. Error code otherwise.
+ */
+mve_base_error mve_com_put_rpc_message(struct mve_session *session,
+ mve_com_rpc *rpc);
+
+#endif /* MVE_COM_H */
diff --git a/drivers/video/arm/v5xx/base/mve_com_host_interface_v1.c b/drivers/video/arm/v5xx/base/mve_com_host_interface_v1.c
new file mode 100644
index 000000000000..19e6d47efa91
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_com_host_interface_v1.c
@@ -0,0 +1,821 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#include <linux/delay.h>
+#endif
+
+#include "mve_session.h"
+#include "mve_com.h"
+#include "mve_rsrc_log.h"
+#include "mve_rsrc_log_ram.h"
+
+#include <host_interface_v1/mve_protocol_kernel.h>
+
+/**
+ * Write data at offset in the buffer pointed out by ptr.
+ * The type of the data is specified by the argument type.
+ */
+#define WRITE_QUEUE(ptr, type, offset, data) \
+ *((type *)&((ptr)[(offset) % MVE_COMM_QUEUE_SIZE_IN_WORDS])) = data
+
+/**
+ * Read data at offset in the buffer pointed out by src.
+ * The type of data is specified by the type argument.
+ */
+#define READ_QUEUE(src, type, offset, dst) \
+ dst = *((type *)&((src)[(offset) % MVE_COMM_QUEUE_SIZE_IN_WORDS]))
+
+/**
+ * Round up value. Rounding must be power of 2.
+ */
+#define ROUNDUP(v, r) (((v) + (r) - 1) & ~(r - 1))
+
+/**
+ * Convert from driver buffer structure to FW buffer structure.
+ */
+static void convert_to_mve_buffer(struct mve_session *session,
+ mve_com_buffer *src,
+ union MVE_BUFFER *dst,
+ enum mve_com_buffer_type type)
+{
+ int i;
+
+ memset(dst, 0, sizeof(union MVE_BUFFER));
+
+ switch (type)
+ {
+ case MVE_COM_BUFFER_TYPE_FRAME:
+ dst->frame.nHandle = src->frame.nHandle;
+ dst->frame.nFlags = src->frame.nFlags;
+ dst->frame.nUserDataTag = src->frame.timestamp;
+ dst->frame.nMVEFlags = src->frame.nMVEFlags;
+ dst->frame.pic_index = src->frame.pic_index;
+ dst->frame.decoded_height = src->frame.decoded_height;
+ dst->frame.decoded_width = src->frame.decoded_width;
+ memcpy(&dst->frame.data, &src->frame.data, sizeof(dst->frame.data));
+ dst->frame.crc_top = src->frame.crc_top;
+ dst->frame.crc_bot = src->frame.crc_bot;
+
+ if ((dst->frame.nMVEFlags & (MVE_FLAGS_TOP_PRESENT | MVE_FLAGS_BOT_PRESENT)) == 0)
+ {
+ dst->frame.decoded_height = 0;
+ dst->frame.decoded_width = 0;
+ }
+ break;
+ case MVE_COM_BUFFER_TYPE_BITSTREAM:
+ dst->bitstream.nHandle = src->bitstream.nHandle;
+ dst->bitstream.nFlags = src->bitstream.nFlags;
+ dst->bitstream.nUserDataTag = src->bitstream.timestamp;
+ dst->bitstream.nAllocLen = src->bitstream.nAllocLen;
+ dst->bitstream.nOffset = src->bitstream.nOffset;
+ dst->bitstream.nFilledLen = src->bitstream.nFilledLen;
+ dst->bitstream.pBufferData = src->bitstream.pBufferData;
+ break;
+ case MVE_COM_BUFFER_TYPE_ROI:
+ dst->region.nRegions = src->roi.nRegions;
+ for (i = 0; i < src->roi.nRegions; ++i)
+ {
+ dst->region.region[i].mbx_left = src->roi.regions[i].mbx_left;
+ dst->region.region[i].mbx_right = src->roi.regions[i].mbx_right;
+ dst->region.region[i].mby_bottom = src->roi.regions[i].mby_bottom;
+ dst->region.region[i].mby_top = src->roi.regions[i].mby_top;
+ dst->region.region[i].qp_delta = (int8_t)src->roi.regions[i].qp_delta;
+ }
+ break;
+ }
+}
+
+/**
+ * Convert from FW buffer structure to driver buffer structure.
+ */
+static void convert_from_mve_buffer(struct mve_session *session,
+ union MVE_BUFFER *src,
+ mve_com_buffer *dst,
+ enum mve_com_buffer_type type)
+{
+ switch (type)
+ {
+ case MVE_COM_BUFFER_TYPE_FRAME:
+ dst->frame.nHandle = src->frame.nHandle;
+ dst->frame.nFlags = src->frame.nFlags;
+ dst->frame.timestamp = src->frame.nUserDataTag;
+ dst->frame.nMVEFlags = src->frame.nMVEFlags;
+ dst->frame.pic_index = src->frame.pic_index;
+ dst->frame.decoded_height = src->frame.decoded_height;
+ dst->frame.decoded_width = src->frame.decoded_width;
+ memcpy(&dst->frame.data, &src->frame.data, sizeof(src->frame.data));
+ dst->frame.crc_top = src->frame.crc_top;
+ dst->frame.crc_bot = src->frame.crc_bot;
+ break;
+ case MVE_COM_BUFFER_TYPE_BITSTREAM:
+ dst->bitstream.nHandle = src->bitstream.nHandle;
+ dst->bitstream.nFlags = src->bitstream.nFlags;
+ dst->bitstream.timestamp = src->bitstream.nUserDataTag;
+ dst->bitstream.nAllocLen = src->bitstream.nAllocLen;
+ dst->bitstream.nOffset = src->bitstream.nOffset;
+ dst->bitstream.nFilledLen = src->bitstream.nFilledLen;
+ dst->bitstream.pBufferData = src->bitstream.pBufferData;
+ break;
+ default:
+ WARN_ON(true); /* Should never end up here */
+ break;
+ }
+}
+
+/**
+ * Waits until there is enough space available in the message in-queue to
+ * add a message of a given size. If a static timeout expires, this function
+ * returns false.
+ * @param rpos Pointer to read position in the queue.
+ * @param wpos Pointer to write position in the queue.
+ * @param words Number of data slot required in the message queue.
+ * @param queue_size Size of the queue
+ * @return MVE_BASE_ERROR_NONE if there is enough space available in the queue to add
+ * a message of the given size. MVE_BASE_ERROR_TIMEOUT if the timeout triggered.
+ */
+static mve_base_error wait_until_space_available(volatile uint16_t *rpos,
+ volatile uint16_t *wpos,
+ uint32_t words,
+ uint32_t queue_size)
+{
+#define ITERATION_TIMEOUT 100
+ int i = 0;
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+ uint32_t free_words;
+
+ if (words > queue_size)
+ {
+ return MVE_BASE_ERROR_TIMEOUT;
+ }
+
+ do
+ {
+ if (*rpos <= *wpos)
+ {
+ free_words = queue_size - *wpos + *rpos;
+ }
+ else
+ {
+ free_words = *rpos - *wpos;
+ }
+ i++;
+ mb();
+ }
+ while (free_words < (words + 1) && i < ITERATION_TIMEOUT);
+
+ /* Did we timeout? */
+ if (i >= ITERATION_TIMEOUT)
+ {
+ ret = MVE_BASE_ERROR_TIMEOUT;
+ }
+
+ return ret;
+#undef ITERATION_TIMEOUT
+}
+
+/**
+ * Write data the input queue represented by host_area.
+ * @param host_area The host area of the queue.
+ * @param mve_area The MVE area of the queue.
+ * @param header Header of the message to add.
+ * @param data Data section of the message to add.
+ * @param num_words Size of the data section in 32-bit words.
+ * @return MVE_BASE_ERROR_NONE if the message was added to the queue. Error code otherwise.
+ */
+static mve_base_error write_to_queue(struct mve_comm_area_host *host_area,
+ struct mve_comm_area_mve *mve_area,
+ struct mve_msg_header *header,
+ uint32_t *data,
+ uint32_t num_words)
+{
+ mve_base_error ret;
+ uint32_t i;
+ uint16_t wpos;
+
+ ret = wait_until_space_available(&mve_area->in_rpos,
+ &host_area->in_wpos,
+ 1 + num_words,
+ MVE_COMM_QUEUE_SIZE_IN_WORDS);
+ if (MVE_BASE_ERROR_NONE == ret)
+ {
+ wpos = host_area->in_wpos;
+ WRITE_QUEUE(host_area->in_data, struct mve_msg_header, wpos, *header);
+ wpos = (wpos + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS;
+ for (i = 0; i < num_words; ++i)
+ {
+ WRITE_QUEUE(host_area->in_data, uint32_t, wpos, data[i]);
+ wpos = (wpos + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS;
+ }
+
+ mve_rsrc_mem_flush_write_buffer();
+ host_area->in_wpos = wpos;
+ mve_rsrc_mem_flush_write_buffer();
+ }
+
+ return ret;
+}
+
+static void read_from_queue(struct mve_comm_area_host *host_area,
+ struct mve_comm_area_mve *mve_area,
+ struct mve_msg_header *header,
+ uint32_t *dst)
+{
+ uint16_t rpos;
+ uint32_t i, words;
+
+ WARN_ON((unsigned short)0 >= (unsigned short)(mve_area->out_wpos - host_area->out_rpos));
+ rpos = host_area->out_rpos;
+ READ_QUEUE(mve_area->out_data, struct mve_msg_header, rpos, *header);
+ rpos = (rpos + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS;
+ words = (header->size + sizeof(uint32_t) - 1) / sizeof(uint32_t);
+
+ for (i = 0; i < words; ++i)
+ {
+ READ_QUEUE(mve_area->out_data, uint32_t, rpos, dst[i]);
+ rpos = (rpos + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS;
+ }
+
+ mve_rsrc_mem_flush_write_buffer();
+ host_area->out_rpos = rpos;
+ mve_rsrc_mem_flush_write_buffer();
+}
+
+static void log_header(struct iovec *vec,
+ unsigned count,
+ struct mve_session *session,
+ enum mve_log_fwif_channel channel,
+ enum mve_log_fwif_direction direction)
+{
+ struct mve_log_header header;
+ struct mve_log_fwif fwif;
+ struct timespec timespec;
+ unsigned i;
+
+ getnstimeofday(&timespec);
+
+ header.magic = MVE_LOG_MAGIC;
+ header.length = 0;
+ header.type = MVE_LOG_TYPE_FWIF;
+ header.severity = MVE_LOG_INFO;
+ header.timestamp.sec = timespec.tv_sec;
+ header.timestamp.nsec = timespec.tv_nsec;
+
+ fwif.version_minor = 0;
+ fwif.version_major = 1;
+ fwif.channel = channel;
+ fwif.direction = direction;
+ fwif.session = (uintptr_t)session;
+
+ vec[0].iov_base = &header;
+ vec[0].iov_len = sizeof(header);
+
+ vec[1].iov_base = &fwif;
+ vec[1].iov_len = sizeof(fwif);
+
+ for (i = 1; i < count; ++i)
+ {
+ header.length += vec[i].iov_len;
+ }
+
+ MVE_LOG_DATA(&mve_rsrc_log_fwif, MVE_LOG_INFO, vec, count);
+}
+
+static void log_buffer(struct mve_session *session,
+ enum mve_log_fwif_channel channel,
+ enum mve_log_fwif_direction direction,
+ struct mve_msg_header *msg_header,
+ void *data,
+ unsigned queued)
+{
+ struct
+ {
+ struct mve_msg_header msg_header;
+ struct mve_log_fwif_stat stat;
+ }
+ stat;
+ struct iovec vec[5];
+
+ stat.msg_header.code = MVE_LOG_FWIF_CODE_STAT;
+ stat.msg_header.size = sizeof(stat.stat);
+ stat.stat.handle = 0;
+ stat.stat.queued = queued;
+
+ vec[2].iov_base = msg_header;
+ vec[2].iov_len = sizeof(*msg_header);
+
+ vec[3].iov_base = data;
+ vec[3].iov_len = ROUNDUP(msg_header->size, 4);
+
+ vec[4].iov_base = &stat;
+ vec[4].iov_len = ROUNDUP(sizeof(stat), 4);
+
+ log_header(vec, 5, session, channel, direction);
+}
+
+static void log_message(struct mve_session *session,
+ enum mve_log_fwif_direction direction,
+ struct mve_msg_header *msg_header,
+ void *data)
+{
+ struct iovec vec[4];
+
+ vec[2].iov_base = msg_header;
+ vec[2].iov_len = sizeof(*msg_header);
+
+ vec[3].iov_base = data;
+ vec[3].iov_len = msg_header->size;
+
+ log_header(vec, 4, session, MVE_LOG_FWIF_CHANNEL_MESSAGE, direction);
+}
+
+static mve_base_error add_message(struct mve_session *session,
+ uint16_t code,
+ uint16_t size,
+ uint32_t *data)
+{
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+
+ struct mve_comm_area_host *host_area;
+ struct mve_comm_area_mve *mve_area;
+
+ if (0 < size && NULL == data)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ host_area = mve_rsrc_dma_mem_map(session->msg_in_queue);
+ mve_area = mve_rsrc_dma_mem_map(session->msg_out_queue);
+
+ if (NULL == host_area || NULL == mve_area)
+ {
+ ret = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ }
+ else
+ {
+ uint32_t words;
+ struct mve_msg_header header;
+
+ header.code = code;
+ header.size = size;
+
+ words = (size + sizeof(uint32_t) - 1) / sizeof(uint32_t);
+ ret = write_to_queue(host_area, mve_area, &header, data, words);
+
+ MVE_LOG_EXECUTE(&mve_rsrc_log_fwif, MVE_LOG_INFO,
+ log_message(session, MVE_LOG_FWIF_DIRECTION_HOST_TO_FIRMWARE, &header, data));
+ }
+
+ switch (code)
+ {
+ case MVE_MESSAGE_CODE_GO:
+ case MVE_MESSAGE_CODE_STOP:
+ case MVE_MESSAGE_CODE_SET_PARAMETER:
+ case MVE_MESSAGE_CODE_GET_PARAMETER:
+ case MVE_MESSAGE_CODE_SET_CONFIG:
+ case MVE_MESSAGE_CODE_GET_CONFIG:
+ case MVE_MESSAGE_CODE_INPUT_FLUSH:
+ case MVE_MESSAGE_CODE_OUTPUT_FLUSH:
+ session->pending_response_count++;
+ break;
+ default:
+ break;
+ }
+
+ mve_rsrc_dma_mem_unmap(session->msg_out_queue);
+ mve_rsrc_dma_mem_unmap(session->msg_in_queue);
+
+ return ret;
+}
+
+static uint32_t *get_message(struct mve_session *session,
+ struct mve_msg_header *header)
+{
+ uint32_t *ret = NULL;
+ uint32_t available_words;
+
+ struct mve_comm_area_host *host_area;
+ struct mve_comm_area_mve *mve_area;
+
+ if (NULL == header)
+ {
+ return NULL;
+ }
+
+ host_area = mve_rsrc_dma_mem_map(session->msg_in_queue);
+ mve_area = mve_rsrc_dma_mem_map(session->msg_out_queue);
+
+ mve_rsrc_dma_mem_invalidate_cache(session->msg_out_queue);
+
+ if (host_area->out_rpos <= mve_area->out_wpos)
+ {
+ available_words = mve_area->out_wpos - host_area->out_rpos;
+ }
+ else
+ {
+ available_words = MVE_COMM_QUEUE_SIZE_IN_WORDS + mve_area->out_wpos - host_area->out_rpos;
+ }
+
+ if (available_words > 0)
+ {
+ /* There is a message in the queue */
+ uint16_t rpos;
+ uint32_t words;
+
+ rpos = host_area->out_rpos;
+
+ *header = *((struct mve_msg_header *)&mve_area->out_data[rpos]);
+ rpos = (rpos + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS;
+
+ words = (header->size + sizeof(uint32_t) - 1) / sizeof(uint32_t);
+
+ ret = MVE_RSRC_MEM_CACHE_ALLOC(words * sizeof(uint32_t), GFP_KERNEL);
+ if (NULL == ret)
+ {
+ /* Failed to allocate temporary memory needed to process this
+ * message. This message will be dropped! */
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_WARNING, session, "Message from MVE dropped due to out of memory conditions.");
+ /* Update rpos to skip this message */
+ rpos += words;
+ }
+ else
+ {
+ int i;
+
+ /* Read the data associated with the message */
+ for (i = 0; i < words; ++i)
+ {
+ ret[i] = mve_area->out_data[rpos];
+ rpos = (rpos + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS;
+ }
+
+ mb();
+
+ MVE_LOG_EXECUTE(&mve_rsrc_log_fwif, MVE_LOG_INFO,
+ log_message(session, MVE_LOG_FWIF_DIRECTION_FIRMWARE_TO_HOST, header, ret));
+ }
+
+ host_area->out_rpos = rpos;
+ mb();
+
+ switch (header->code)
+ {
+ case MVE_RESPONSE_CODE_STATE_CHANGE:
+ case MVE_RESPONSE_CODE_GET_PARAMETER_REPLY:
+ case MVE_RESPONSE_CODE_SET_PARAMETER_REPLY:
+ case MVE_RESPONSE_CODE_GET_CONFIG_REPLY:
+ case MVE_RESPONSE_CODE_SET_CONFIG_REPLY:
+ case MVE_RESPONSE_CODE_INPUT_FLUSHED:
+ case MVE_RESPONSE_CODE_OUTPUT_FLUSHED:
+ session->pending_response_count--;
+ break;
+ default:
+ break;
+ }
+
+ mve_rsrc_dma_mem_clean_cache(session->msg_in_queue);
+ mve_rsrc_dma_mem_invalidate_cache(session->msg_out_queue);
+ }
+
+ mve_rsrc_dma_mem_unmap(session->msg_out_queue);
+ mve_rsrc_dma_mem_unmap(session->msg_in_queue);
+
+ return ret;
+}
+
+/**
+ * Send a buffer message to the FW.
+ */
+static mve_base_error send_buffer_msg(struct mve_session *session,
+ mve_com_buffer *buffer,
+ enum mve_com_buffer_type type,
+ struct mve_comm_area_host *host_area,
+ struct mve_comm_area_mve *mve_area,
+ union MVE_BUFFER *mve_buffer,
+ struct mve_msg_header *header)
+{
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+
+ switch (type)
+ {
+ case MVE_COM_BUFFER_TYPE_BITSTREAM:
+ header->code = MVE_MSG_HEADER_CODE_BUFFER_BITSTREAM;
+ header->size = sizeof(struct MVE_BUFFER_BITSTREAM);
+ break;
+
+ case MVE_COM_BUFFER_TYPE_FRAME:
+ header->code = MVE_MSG_HEADER_CODE_BUFFER_FRAME;
+ header->size = sizeof(struct MVE_BUFFER_FRAME);
+ break;
+
+ case MVE_COM_BUFFER_TYPE_ROI:
+ header->code = MVE_MSG_HEADER_CODE_BUFFER_REGION;
+ header->size = sizeof(struct MVE_BUFFER_REGION);
+ break;
+
+ default:
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Unknown type (%d).", type);
+ ret = MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ if (MVE_BASE_ERROR_NONE == ret)
+ {
+ uint32_t words;
+ uint32_t *data;
+
+ convert_to_mve_buffer(session, buffer, mve_buffer, type);
+
+ words = (header->size + sizeof(uint32_t) - 1) / sizeof(uint32_t);
+ data = (uint32_t *)mve_buffer;
+ ret = write_to_queue(host_area, mve_area, header, data, words);
+ }
+
+ return ret;
+}
+
+static mve_base_error add_input_buffer(struct mve_session *session,
+ mve_com_buffer *buffer,
+ enum mve_com_buffer_type type)
+{
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+
+ struct mve_comm_area_host *host_area;
+ struct mve_comm_area_mve *mve_area;
+
+ host_area = mve_rsrc_dma_mem_map(session->buf_input_in);
+ mve_area = mve_rsrc_dma_mem_map(session->buf_input_out);
+
+ if (NULL == host_area || NULL == mve_area || NULL == buffer)
+ {
+ ret = MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+ else
+ {
+ union MVE_BUFFER mve_buffer;
+ struct mve_msg_header header;
+
+ ret = send_buffer_msg(session, buffer, type, host_area, mve_area, &mve_buffer, &header);
+
+ if (MVE_BASE_ERROR_NONE == ret)
+ {
+ MVE_LOG_EXECUTE(&mve_rsrc_log_fwif, MVE_LOG_INFO,
+ log_buffer(session,
+ MVE_LOG_FWIF_CHANNEL_INPUT_BUFFER,
+ MVE_LOG_FWIF_DIRECTION_HOST_TO_FIRMWARE,
+ &header,
+ &mve_buffer,
+ session->input_buffer_count + 1));
+ }
+ }
+
+ mve_rsrc_dma_mem_unmap(session->buf_input_in);
+ mve_rsrc_dma_mem_unmap(session->buf_input_out);
+
+ return ret;
+}
+
+static mve_base_error add_output_buffer(struct mve_session *session,
+ mve_com_buffer *buffer,
+ enum mve_com_buffer_type type)
+{
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+
+ struct mve_comm_area_host *host_area;
+ struct mve_comm_area_mve *mve_area;
+
+ host_area = mve_rsrc_dma_mem_map(session->buf_output_in);
+ mve_area = mve_rsrc_dma_mem_map(session->buf_output_out);
+
+ if (NULL == host_area || NULL == mve_area || NULL == buffer)
+ {
+ ret = MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+ else
+ {
+ union MVE_BUFFER mve_buffer;
+ struct mve_msg_header header;
+
+ ret = send_buffer_msg(session, buffer, type, host_area, mve_area, &mve_buffer, &header);
+
+ if (MVE_BASE_ERROR_NONE == ret)
+ {
+ MVE_LOG_EXECUTE(&mve_rsrc_log_fwif, MVE_LOG_INFO,
+ log_buffer(session,
+ MVE_LOG_FWIF_CHANNEL_OUTPUT_BUFFER,
+ MVE_LOG_FWIF_DIRECTION_HOST_TO_FIRMWARE,
+ &header,
+ &mve_buffer,
+ session->output_buffer_count + 1));
+ }
+ }
+
+ mve_rsrc_dma_mem_unmap(session->buf_output_in);
+ mve_rsrc_dma_mem_unmap(session->buf_output_out);
+
+ return ret;
+}
+
+static mve_base_error get_input_buffer(struct mve_session *session,
+ mve_com_buffer *buffer)
+{
+ mve_base_error ret = MVE_BASE_ERROR_BAD_PARAMETER;
+ struct mve_comm_area_host *host_area;
+ struct mve_comm_area_mve *mve_area;
+
+ host_area = mve_rsrc_dma_mem_map(session->buf_input_in);
+ mve_area = mve_rsrc_dma_mem_map(session->buf_input_out);
+ if (NULL != host_area && NULL != mve_area)
+ {
+ union MVE_BUFFER mve_buffer;
+ struct mve_msg_header header;
+ enum mve_com_buffer_type type;
+
+ mve_rsrc_dma_mem_invalidate_cache(session->buf_input_out);
+ read_from_queue(host_area, mve_area, &header, (uint32_t *)&mve_buffer);
+ mve_rsrc_dma_mem_clean_cache(session->buf_input_in);
+
+ type = (MVE_SESSION_TYPE_DECODER == session->session_type) ?
+ MVE_COM_BUFFER_TYPE_BITSTREAM :
+ MVE_COM_BUFFER_TYPE_FRAME;
+ convert_from_mve_buffer(session, &mve_buffer, buffer, type);
+ ret = MVE_BASE_ERROR_NONE;
+
+ MVE_LOG_EXECUTE(&mve_rsrc_log_fwif, MVE_LOG_INFO,
+ log_buffer(session, MVE_LOG_FWIF_CHANNEL_INPUT_BUFFER, MVE_LOG_FWIF_DIRECTION_FIRMWARE_TO_HOST, &header, &mve_buffer, session->input_buffer_count - 1));
+ }
+
+ mve_rsrc_dma_mem_unmap(session->buf_input_in);
+ mve_rsrc_dma_mem_unmap(session->buf_input_out);
+
+ return ret;
+}
+
+static mve_base_error get_output_buffer(struct mve_session *session,
+ mve_com_buffer *buffer)
+{
+ mve_base_error ret = MVE_BASE_ERROR_BAD_PARAMETER;
+ struct mve_comm_area_host *host_area;
+ struct mve_comm_area_mve *mve_area;
+
+ host_area = mve_rsrc_dma_mem_map(session->buf_output_in);
+ mve_area = mve_rsrc_dma_mem_map(session->buf_output_out);
+ if (NULL != host_area && NULL != mve_area)
+ {
+ union MVE_BUFFER mve_buffer;
+ struct mve_msg_header header;
+ enum mve_com_buffer_type type;
+
+ mve_rsrc_dma_mem_invalidate_cache(session->buf_output_out);
+ read_from_queue(host_area, mve_area, &header, (uint32_t *)&mve_buffer);
+ mve_rsrc_dma_mem_clean_cache(session->buf_output_in);
+
+ type = (MVE_SESSION_TYPE_DECODER == session->session_type) ?
+ MVE_COM_BUFFER_TYPE_FRAME :
+ MVE_COM_BUFFER_TYPE_BITSTREAM;
+ convert_from_mve_buffer(session, &mve_buffer, buffer, type);
+ ret = MVE_BASE_ERROR_NONE;
+
+ MVE_LOG_EXECUTE(&mve_rsrc_log_fwif, MVE_LOG_INFO,
+ log_buffer(session, MVE_LOG_FWIF_CHANNEL_OUTPUT_BUFFER, MVE_LOG_FWIF_DIRECTION_FIRMWARE_TO_HOST, &header, &mve_buffer, session->output_buffer_count - 1));
+ }
+
+ mve_rsrc_dma_mem_unmap(session->buf_output_in);
+ mve_rsrc_dma_mem_unmap(session->buf_output_out);
+
+ return ret;
+}
+
+static mve_base_error get_rpc_message(struct mve_session *session,
+ mve_com_rpc *rpc)
+{
+ struct mve_rpc_comunication_area *rpc_area;
+ mve_base_error err = MVE_BASE_ERROR_NOT_READY;
+
+ rpc_area = mve_rsrc_dma_mem_map(session->rpc_area);
+ if (NULL == rpc_area)
+ {
+ return MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ }
+
+ mve_rsrc_dma_mem_invalidate_cache(session->rpc_area);
+
+ if (MVE_RPC_STATE_PARAM == rpc_area->state)
+ {
+ /* Copy RPC details to the client supplied structure */
+ rpc->state = rpc_area->state;
+ rpc->call_id = rpc_area->call_id;
+ rpc->size = rpc_area->size;
+
+ switch (rpc_area->call_id)
+ {
+ case MVE_RPC_FUNCTION_DEBUG_PRINTF:
+ memcpy(rpc->params.debug_print.string,
+ rpc_area->params.debug_print.string,
+ MVE_RPC_DATA_SIZE_IN_WORDS * 4);
+ break;
+ case MVE_RPC_FUNCTION_MEM_ALLOC:
+ rpc->params.mem_alloc.size = rpc_area->params.mem_alloc.size;
+ rpc->params.mem_alloc.max_size = rpc_area->params.mem_alloc.max_size;
+ rpc->params.mem_alloc.region = rpc_area->params.mem_alloc.region;
+ rpc->params.mem_alloc.log2_alignment = MVE_MMU_PAGE_SHIFT;
+ break;
+ case MVE_RPC_FUNCTION_MEM_RESIZE:
+ rpc->params.mem_resize.ve_pointer = rpc_area->params.mem_resize.ve_pointer;
+ rpc->params.mem_resize.new_size = rpc_area->params.mem_resize.new_size;
+ break;
+ case MVE_RPC_FUNCTION_MEM_FREE:
+ rpc->params.mem_free.ve_pointer = rpc_area->params.mem_free.ve_pointer;
+ break;
+ }
+
+ err = MVE_BASE_ERROR_NONE;
+ }
+
+ mve_rsrc_dma_mem_unmap(session->rpc_area);
+
+ return err;
+}
+
+static mve_base_error put_rpc_message(struct mve_session *session,
+ mve_com_rpc *rpc)
+{
+ struct mve_rpc_comunication_area *rpc_area;
+
+ rpc_area = mve_rsrc_dma_mem_map(session->rpc_area);
+ if (NULL == rpc_area)
+ {
+ return MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ }
+
+ /* Copy RPC details to the client supplied structure */
+ switch (rpc->call_id)
+ {
+ case MVE_RPC_FUNCTION_DEBUG_PRINTF:
+ break;
+ case MVE_RPC_FUNCTION_MEM_ALLOC:
+ rpc_area->params.data[0] = rpc->params.data[0];
+ break;
+ case MVE_RPC_FUNCTION_MEM_RESIZE:
+ rpc_area->params.data[0] = rpc->params.data[0];
+ break;
+ case MVE_RPC_FUNCTION_MEM_FREE:
+ break;
+ }
+
+ rpc_area->call_id = rpc->call_id;
+ rpc_area->size = rpc->size;
+ wmb();
+ rpc_area->state = rpc->state;
+
+ wmb();
+ mve_rsrc_dma_mem_clean_cache(session->rpc_area);
+ mve_rsrc_dma_mem_unmap(session->rpc_area);
+
+ return MVE_BASE_ERROR_NONE;
+}
+
+static void mve_com_host_interface_v1_construct(struct mve_com *com)
+{
+ memset(com, 0, sizeof(*com));
+
+ com->host_interface.add_message = add_message;
+ com->host_interface.get_message = get_message;
+ com->host_interface.add_input_buffer = add_input_buffer;
+ com->host_interface.add_output_buffer = add_output_buffer;
+ com->host_interface.get_input_buffer = get_input_buffer;
+ com->host_interface.get_output_buffer = get_output_buffer;
+ com->host_interface.get_rpc_message = get_rpc_message;
+ com->host_interface.put_rpc_message = put_rpc_message;
+}
+
+struct mve_com *mve_com_host_interface_v1_new(void)
+{
+ struct mve_com *com;
+
+ /* Allocate com object. */
+ com = MVE_RSRC_MEM_ZALLOC(sizeof(*com), GFP_KERNEL);
+ if (com == NULL)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_WARNING, "Failed to allocate com object.");
+ return NULL;
+ }
+
+ /* Run constructor. */
+ mve_com_host_interface_v1_construct(com);
+
+ return com;
+}
diff --git a/drivers/video/arm/v5xx/base/mve_com_host_interface_v1.h b/drivers/video/arm/v5xx/base/mve_com_host_interface_v1.h
new file mode 100644
index 000000000000..9f8ec183eb19
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_com_host_interface_v1.h
@@ -0,0 +1,20 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_COM_HOST_INTERFACE_V1_H
+#define MVE_COM_HOST_INTERFACE_V1_H
+
+struct mve_com *mve_com_host_interface_v1_new(void);
+
+#endif /* MVE_COM_HOST_INTERFACE_V1_H */
diff --git a/drivers/video/arm/v5xx/base/mve_com_host_interface_v2.c b/drivers/video/arm/v5xx/base/mve_com_host_interface_v2.c
new file mode 100644
index 000000000000..8814732e1f39
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_com_host_interface_v2.c
@@ -0,0 +1,1549 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#define MVE_FLAGS_SCALE_HALF 0x00000100 /* Frame is scaled by half */
+#define MVE_FLAGS_SCALE_QUARTER 0x00000200 /* Frame is scaled by quarter */
+#define MVE_FLAGS_AFBC_TILED 0x00001000 /* AFBC uses tiling for headers and blocks */
+#define MVE_FLAGS_AFBC_WIDEBLK 0x00002000 /* AFBC uses wide super block 32x8, default is 16x16 */
+#define MVE_BUFFER_FRAME_FLAG_SCALING_2 0x00000040
+#define MVE_BUFFER_FRAME_FLAG_SCALING_4 0x00000080
+
+/****************************************************************************
+ * Include v1 interface
+ ****************************************************************************/
+
+#define mve_comm_area_host mve_comm_area_host_v1
+#define mve_comm_area_mve mve_comm_area_mve_v1
+#define rpc_params rpc_params_v1
+#define mve_msg_header mve_msg_header_v1
+#define mve_trace_event mve_trace_event_v1
+
+#define MVE_RESPONSE_CODE_INPUT MVE_RESPONSE_CODE_INPUT_V1
+#define MVE_RESPONSE_CODE_OUTPUT MVE_RESPONSE_CODE_OUTPUT_V1
+#define MVE_RESPONSE_CODE_PROCESSED MVE_RESPONSE_CODE_PROCESSED_V1
+#define MVE_RESPONSE_CODE_EVENT MVE_RESPONSE_CODE_EVENT_V1
+#define MVE_RESPONSE_CODE_SWITCHED_OUT MVE_RESPONSE_CODE_SWITCHED_OUT_V1
+#define MVE_RESPONSE_CODE_SWITCHED_IN MVE_RESPONSE_CODE_SWITCHED_IN_V1
+#define MVE_RESPONSE_CODE_ERROR MVE_RESPONSE_CODE_ERROR_V1
+#define MVE_RESPONSE_CODE_PONG MVE_RESPONSE_CODE_PONG_V1
+#define MVE_RESPONSE_CODE_STATE_CHANGE MVE_RESPONSE_CODE_STATE_CHANGE_V1
+#define MVE_RESPONSE_CODE_GET_PARAMETER_REPLY MVE_RESPONSE_CODE_GET_PARAMETER_REPLY_V1
+#define MVE_RESPONSE_CODE_SET_PARAMETER_REPLY MVE_RESPONSE_CODE_SET_PARAMETER_REPLY_V1
+#define MVE_RESPONSE_CODE_GET_CONFIG_REPLY MVE_RESPONSE_CODE_GET_CONFIG_REPLY_V1
+#define MVE_RESPONSE_CODE_SET_CONFIG_REPLY MVE_RESPONSE_CODE_SET_CONFIG_REPLY_V1
+#define MVE_RESPONSE_CODE_INPUT_FLUSHED MVE_RESPONSE_CODE_INPUT_FLUSHED_V1
+#define MVE_RESPONSE_CODE_OUTPUT_FLUSHED MVE_RESPONSE_CODE_OUTPUT_FLUSHED_V1
+#define MVE_RESPONSE_CODE_DUMP MVE_RESPONSE_CODE_DUMP_V1
+#define MVE_RESPONSE_CODE_JOB_DEQUEUED MVE_RESPONSE_CODE_JOB_DEQUEUED_V1
+#define MVE_RESPONSE_CODE_IDLE MVE_RESPONSE_CODE_IDLE_V1
+
+#include "host_interface_v1/mve_protocol_kernel.h"
+
+#undef mve_comm_area_host
+#undef mve_comm_area_mve
+#undef rpc_params
+#undef mve_msg_header
+#undef mve_trace_event
+
+#undef MVE_RESPONSE_CODE_INPUT
+#undef MVE_RESPONSE_CODE_OUTPUT
+#undef MVE_RESPONSE_CODE_PROCESSED
+#undef MVE_RESPONSE_CODE_EVENT
+#undef MVE_RESPONSE_CODE_SWITCHED_OUT
+#undef MVE_RESPONSE_CODE_SWITCHED_IN
+#undef MVE_RESPONSE_CODE_ERROR
+#undef MVE_RESPONSE_CODE_PONG
+#undef MVE_RESPONSE_CODE_STATE_CHANGE
+#undef MVE_RESPONSE_CODE_GET_PARAMETER_REPLY
+#undef MVE_RESPONSE_CODE_SET_PARAMETER_REPLY
+#undef MVE_RESPONSE_CODE_GET_CONFIG_REPLY
+#undef MVE_RESPONSE_CODE_SET_CONFIG_REPLY
+#undef MVE_RESPONSE_CODE_INPUT_FLUSHED
+#undef MVE_RESPONSE_CODE_OUTPUT_FLUSHED
+#undef MVE_RESPONSE_CODE_DUMP
+#undef MVE_RESPONSE_CODE_JOB_DEQUEUED
+#undef MVE_RESPONSE_CODE_IDLE
+#undef MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN
+#undef MVE_MEM_REGION_FW_INSTANCE1_ADDR_END
+#undef MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN
+#undef MVE_MEM_REGION_FW_INSTANCE2_ADDR_END
+#undef MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN
+#undef MVE_MEM_REGION_FW_INSTANCE3_ADDR_END
+#undef MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN
+#undef MVE_MEM_REGION_FW_INSTANCE4_ADDR_END
+#undef MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN
+#undef MVE_MEM_REGION_FW_INSTANCE5_ADDR_END
+#undef MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN
+#undef MVE_MEM_REGION_FW_INSTANCE6_ADDR_END
+#undef MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN
+#undef MVE_MEM_REGION_FW_INSTANCE7_ADDR_END
+
+#define MVE_STATE_STOPPED_V1 MVE_STATE_STOPPED
+#undef MVE_STATE_STOPPED
+
+#define MVE_STATE_RUNNING_V1 MVE_STATE_RUNNING
+#undef MVE_STATE_RUNNING
+
+/****************************************************************************
+ * Include v2 interface
+ ****************************************************************************/
+
+#include "host_interface_v2/mve_protocol_def.h"
+
+enum { MVE_EVENT_PROCESSED_v2 = MVE_EVENT_PROCESSED };
+#undef MVE_EVENT_PROCESSED
+
+/****************************************************************************
+ * Includes
+ ****************************************************************************/
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#include <linux/delay.h>
+#endif
+
+#include "mve_com_host_interface_v2.h"
+#include "mve_com.h"
+#include "mve_session.h"
+#include "mve_rsrc_log.h"
+#include "mve_rsrc_log_ram.h"
+#include "mve_rsrc_mem_dma.h"
+#include "mve_rsrc_mem_frontend.h"
+#include "mve_rsrc_register.h"
+
+/****************************************************************************
+ * Defines
+ ****************************************************************************/
+
+#define OMX_ErrorNone 0
+#define OMX_EventError 1
+#define OMX_ErrorStreamCorrupt 0x8000100b
+#define OMX_ErrorNotImplemented 0x80001006
+
+#define OMX_BUFFERFLAG_EOS 0x00000001
+#define OMX_BUFFERFLAG_STARTTIME 0x00000002
+#define OMX_BUFFERFLAG_DECODEONLY 0x00000004
+#define OMX_BUFFERFLAG_DATACORRUPT 0x00000008
+#define OMX_BUFFERFLAG_ENDOFFRAME 0x00000010
+#define OMX_BUFFERFLAG_SYNCFRAME 0x00000020
+#define OMX_BUFFERFLAG_EXTRADATA 0x00000040
+#define OMX_BUFFERFLAG_CODECCONFIG 0x00000080
+#define OMX_BUFFERFLAG_TIMESTAMPINVALID 0x00000100
+#define OMX_BUFFERFLAG_READONLY 0x00000200
+#define OMX_BUFFERFLAG_ENDOFSUBFRAME 0x00000400
+#define OMX_BUFFERFLAG_SKIPFRAME 0x00000800
+
+/****************************************************************************
+ * Types
+ ****************************************************************************/
+
+/*
+ * Structure and defines for RGB to YUV conversion mode
+ */
+#define MALI_ENCODE_RGB_TO_YUV_MODE 0x7F000300
+typedef struct mali_encode_rgbyuv_coversionmode
+{
+ uint32_t nSize;
+ uint32_t nVersion;
+ uint32_t nPortIndex;
+ uint32_t nMode;
+} mali_encode_rgbyuv_coversionmode;
+
+struct mve_com_v2
+{
+ struct mve_com base;
+ struct mve_response_frame_alloc_parameters alloc_params;
+};
+
+struct mve_switched_in_v1
+{
+ uint32_t core;
+};
+
+struct mve_switched_out_v1
+{
+ uint32_t core;
+ uint32_t reason;
+ uint32_t sub_reason;
+};
+
+struct mve_set_reply_v1
+{
+ uint32_t index;
+ uint32_t return_code;
+};
+
+#define MVE_MAX_ERROR_MESSAGE_SIZE_V1 80
+struct mve_error_v1
+{
+ uint32_t reason;
+ char message[MVE_MAX_ERROR_MESSAGE_SIZE_V1];
+};
+
+struct mve_state_change_v1
+{
+ uint32_t new_state;
+};
+
+struct mve_event_v1
+{
+ uint32_t event;
+ uint32_t data1;
+ uint32_t data2;
+};
+
+/****************************************************************************
+ * Functions
+ ****************************************************************************/
+
+/**
+ * Return true if port is of type frame, else return false (port is bitstream).
+ */
+static bool is_frame(struct mve_session *session,
+ bool direction_input)
+{
+ return (session->session_type == MVE_SESSION_TYPE_ENCODER) ? direction_input : !direction_input;
+}
+
+/**
+ * Round up value. Rounding must be power of 2.
+ */
+#define ROUNDUP(v, r) (((v) + (r) - 1) & ~(r - 1))
+
+/**
+ * Log message.
+ */
+static void log_message(struct mve_session *session,
+ enum mve_log_fwif_channel channel,
+ enum mve_log_fwif_direction direction,
+ struct mve_msg_header *msg_header,
+ void *data)
+{
+ struct mve_log_header header;
+ struct mve_log_fwif fwif;
+ struct iovec vec[5];
+ struct timespec timespec;
+ int vecs = 4;
+ int queued = -1;
+
+ struct
+ {
+ struct mve_msg_header msg_header;
+ struct mve_log_fwif_stat stat;
+ }
+ stat;
+
+ getnstimeofday(&timespec);
+
+ header.magic = MVE_LOG_MAGIC;
+ header.length = sizeof(fwif) + sizeof(*msg_header) + ROUNDUP(msg_header->size, 4);
+ header.type = MVE_LOG_TYPE_FWIF;
+ header.severity = MVE_LOG_INFO;
+ header.timestamp.sec = timespec.tv_sec;
+ header.timestamp.nsec = timespec.tv_nsec;
+
+ fwif.version_major = 2;
+ fwif.version_minor = 0;
+ fwif.channel = channel;
+ fwif.direction = direction;
+ fwif.session = (uintptr_t)session;
+
+ vec[0].iov_base = &header;
+ vec[0].iov_len = sizeof(header);
+
+ vec[1].iov_base = &fwif;
+ vec[1].iov_len = sizeof(fwif);
+
+ vec[2].iov_base = msg_header;
+ vec[2].iov_len = sizeof(*msg_header);
+
+ vec[3].iov_base = data;
+ vec[3].iov_len = ROUNDUP(msg_header->size, 4);
+
+ if (msg_header->code == MVE_BUFFER_CODE_FRAME || msg_header->code == MVE_BUFFER_CODE_BITSTREAM)
+ {
+ int change = direction == MVE_LOG_FWIF_DIRECTION_HOST_TO_FIRMWARE ? 1 : -1;
+ switch (channel)
+ {
+ case MVE_LOG_FWIF_CHANNEL_INPUT_BUFFER:
+ queued = session->input_buffer_count + change;
+ break;
+ case MVE_LOG_FWIF_CHANNEL_OUTPUT_BUFFER:
+ queued = session->output_buffer_count + change;
+ break;
+ default:
+ queued = -1;
+ }
+ }
+
+ if (queued != -1)
+ {
+ stat.msg_header.code = MVE_LOG_FWIF_CODE_STAT;
+ stat.msg_header.size = sizeof(stat.stat);
+ stat.stat.handle = 0;
+ stat.stat.queued = queued;
+
+ vec[4].iov_base = &stat;
+ vec[4].iov_len = ROUNDUP(sizeof(stat), 4);
+
+ header.length += vec[4].iov_len;
+ vecs = 5;
+ }
+
+ MVE_LOG_DATA(&mve_rsrc_log_fwif, MVE_LOG_INFO, vec, vecs);
+}
+
+static uint32_t read_32(struct mve_comm_area_mve *mve_area,
+ uint32_t *rpos)
+{
+ uint32_t value;
+
+ value = mve_area->out_data[*rpos];
+ *rpos = (*rpos + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS;
+
+ return value;
+}
+
+static mve_base_error read_from_queue_v2(struct mve_session *session,
+ struct mve_rsrc_dma_mem_t *host,
+ struct mve_rsrc_dma_mem_t *mve,
+ enum mve_log_fwif_channel channel,
+ struct mve_msg_header *msg_header,
+ void *data,
+ uint32_t size)
+{
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+ struct mve_comm_area_host *host_area;
+ struct mve_comm_area_mve *mve_area;
+ uint32_t available;
+ uint32_t rpos;
+ uint32_t *d = data;
+
+ host_area = mve_rsrc_dma_mem_map(host);
+ mve_area = mve_rsrc_dma_mem_map(mve);
+
+ if (host_area == NULL || mve_area == NULL)
+ {
+ ret = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ goto out;
+ }
+
+ mve_rsrc_dma_mem_invalidate_cache(mve);
+
+ /* Check that there is enough data for the header. */
+ if (host_area->out_rpos <= mve_area->out_wpos)
+ {
+ available = mve_area->out_wpos - host_area->out_rpos;
+ }
+ else
+ {
+ available = MVE_COMM_QUEUE_SIZE_IN_WORDS - (host_area->out_rpos - mve_area->out_wpos);
+ }
+
+ if (available == 0)
+ {
+ ret = MVE_BASE_ERROR_TIMEOUT;
+ goto out;
+ }
+
+ rpos = host_area->out_rpos;
+
+ /* Read header. */
+ *(uint32_t *)msg_header = read_32(mve_area, &rpos);
+ available--;
+
+ /* Check that header size is not larger than available data. */
+ if (((msg_header->size + 3) / 4) > available)
+ {
+ ret = MVE_BASE_ERROR_TIMEOUT;
+ goto out;
+ }
+
+ /* Check that size is not larger that provided size. */
+ if (msg_header->size > size)
+ {
+ ret = MVE_BASE_ERROR_TIMEOUT;
+ goto out;
+ }
+
+ /* Read data. */
+ size = (msg_header->size + 3) / 4;
+ while (size-- > 0)
+ {
+ *d++ = read_32(mve_area, &rpos);
+ }
+
+ mve_rsrc_mem_flush_write_buffer();
+ /* Update read position. */
+ host_area->out_rpos = rpos;
+ mve_rsrc_mem_flush_write_buffer();
+
+ /* Update pending response counter. */
+ switch (msg_header->code)
+ {
+ case MVE_RESPONSE_CODE_SET_OPTION_CONFIRM:
+ case MVE_RESPONSE_CODE_SET_OPTION_FAIL:
+ case MVE_RESPONSE_CODE_INPUT_FLUSHED:
+ case MVE_RESPONSE_CODE_OUTPUT_FLUSHED:
+ case MVE_RESPONSE_CODE_STATE_CHANGE:
+ session->pending_response_count--;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Log firmware message. */
+ MVE_LOG_EXECUTE(&mve_rsrc_log_fwif, MVE_LOG_INFO,
+ log_message(session, channel, MVE_LOG_FWIF_DIRECTION_FIRMWARE_TO_HOST, msg_header, data));
+
+out:
+ mve_rsrc_dma_mem_clean_cache(host);
+ mve_rsrc_dma_mem_unmap(host);
+ mve_rsrc_dma_mem_unmap(mve);
+
+ return ret;
+}
+
+static void write_32(struct mve_comm_area_host *host_area,
+ uint32_t *wpos,
+ uint32_t value)
+{
+ host_area->in_data[*wpos] = value;
+ *wpos = (*wpos + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS;
+}
+
+static mve_base_error write_to_queue_v2(struct mve_session *session,
+ struct mve_rsrc_dma_mem_t *host,
+ struct mve_rsrc_dma_mem_t *mve,
+ enum mve_log_fwif_channel channel,
+ uint32_t code,
+ uint32_t size,
+ void *data)
+{
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+ struct mve_comm_area_host *host_area;
+ struct mve_comm_area_mve *mve_area;
+ struct mve_msg_header msg_header;
+ uint32_t available;
+ uint32_t wpos;
+ uint32_t *d = data;
+
+ host_area = mve_rsrc_dma_mem_map(host);
+ mve_area = mve_rsrc_dma_mem_map(mve);
+
+ if ((host_area == NULL) || (mve_area == NULL))
+ {
+ ret = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ goto out;
+ }
+
+ /* Set up header. */
+ msg_header.code = code;
+ msg_header.size = size;
+
+ /* Add size of header and calculate number of 32-bit words. */
+ size += sizeof(msg_header);
+ size = (size + 3) / 4;
+
+ /* Check that enough space is available in the buffer. */
+ if (mve_area->in_rpos <= host_area->in_wpos)
+ {
+ available = MVE_COMM_QUEUE_SIZE_IN_WORDS - (host_area->in_wpos - mve_area->in_rpos);
+ }
+ else
+ {
+ available = mve_area->in_rpos - host_area->in_wpos;
+ }
+
+ if (size > available)
+ {
+ ret = MVE_BASE_ERROR_TIMEOUT;
+ goto out;
+ }
+
+ wpos = host_area->in_wpos;
+
+ /* Write header. */
+ write_32(host_area, &wpos, *((uint32_t *)&msg_header));
+ size--;
+
+ /* Write data. */
+ while (size-- > 0)
+ {
+ write_32(host_area, &wpos, *d++);
+ }
+
+ mve_rsrc_mem_flush_write_buffer();
+ /* Set write position. */
+ host_area->in_wpos = wpos;
+ mve_rsrc_mem_flush_write_buffer();
+
+ /* Update pending response counter. */
+ switch (code)
+ {
+ case MVE_REQUEST_CODE_GO:
+ case MVE_REQUEST_CODE_STOP:
+ case MVE_REQUEST_CODE_INPUT_FLUSH:
+ case MVE_REQUEST_CODE_OUTPUT_FLUSH:
+ case MVE_REQUEST_CODE_SET_OPTION:
+ session->pending_response_count++;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Log firmware message. */
+ MVE_LOG_EXECUTE(&mve_rsrc_log_fwif, MVE_LOG_INFO,
+ log_message(session, channel, MVE_LOG_FWIF_DIRECTION_HOST_TO_FIRMWARE, &msg_header, data));
+
+out:
+ mve_rsrc_dma_mem_unmap(host);
+ mve_rsrc_dma_mem_unmap(mve);
+
+ return ret;
+}
+
+static uint32_t divide_by_power2_round_up(uint32_t n, uint32_t pow)
+{
+ return (n + (1 << pow) - 1) >> pow;
+}
+
+static mve_base_error write_buffer_frame(struct mve_session *session,
+ struct mve_rsrc_dma_mem_t *host,
+ struct mve_rsrc_dma_mem_t *mve,
+ enum mve_log_fwif_channel channel,
+ struct mve_com_buffer_frame *frame)
+{
+ mve_base_error ret;
+ struct mve_buffer_frame mve_frame;
+ uint32_t rotation;
+
+ /* Convert com- to fw buffer. */
+ mve_frame.host_handle = frame->nHandle;
+ mve_frame.user_data_tag = frame->timestamp;
+
+ mve_frame.frame_flags = frame->nMVEFlags & (MVE_FLAGS_INTERLACE | MVE_BUFFER_FRAME_FLAG_BOT_FIRST | MVE_FLAGS_TOP_PRESENT | MVE_FLAGS_BOT_PRESENT);
+ mve_frame.frame_flags |= (frame->nFlags & OMX_BUFFERFLAG_EOS) ? MVE_BUFFER_FRAME_FLAG_EOS : 0;
+ mve_frame.frame_flags |= (frame->nFlags & OMX_BUFFERFLAG_DECODEONLY) ? MVE_BUFFER_FRAME_FLAG_DECODE_ONLY : 0;
+ mve_frame.frame_flags |= (frame->nFlags & OMX_BUFFERFLAG_DATACORRUPT) ? MVE_BUFFER_FRAME_FLAG_CORRUPT : 0;
+ mve_frame.frame_flags |= (frame->nFlags & OMX_BUFFERFLAG_READONLY) ? MVE_BUFFER_FRAME_FLAG_REF_FRAME : 0;
+ mve_frame.frame_flags |= (frame->nMVEFlags & MVE_FLAGS_SCALE_HALF) ? MVE_BUFFER_FRAME_FLAG_SCALING_2 : 0;
+ mve_frame.frame_flags |= (frame->nMVEFlags & MVE_FLAGS_SCALE_QUARTER) ? MVE_BUFFER_FRAME_FLAG_SCALING_4 : 0;
+
+ mve_frame.frame_flags |= (frame->nMVEFlags & MVE_FLAGS_ROTATION_MASK);
+
+ rotation = (frame->nMVEFlags & MVE_FLAGS_ROTATION_MASK) >> 4;
+
+ mve_frame.visible_frame_width = frame->decoded_width;
+ mve_frame.visible_frame_height = frame->decoded_height;
+
+ switch (frame->format)
+ {
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_PLANAR:
+ mve_frame.format = MVE_FORMAT_YUV420_I420;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_SEMIPLANAR:
+ mve_frame.format = MVE_FORMAT_YUV420_NV12;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUYYVY_10B:
+ mve_frame.format = MVE_FORMAT_YUV420_Y0L2;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC:
+ mve_frame.format = MVE_FORMAT_YUV420_AFBC_8;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC_10B:
+ mve_frame.format = MVE_FORMAT_YUV420_AFBC_10;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV422_1P:
+ mve_frame.format = MVE_FORMAT_YUV422_YUY2;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YVU422_1P:
+ mve_frame.format = MVE_FORMAT_YUV422_UYVY;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YV12:
+ mve_frame.format = MVE_FORMAT_YUV420_I420;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YVU420_SEMIPLANAR:
+ mve_frame.format = MVE_FORMAT_YUV420_NV21;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_RGBA_8888:
+ mve_frame.format = MVE_FORMAT_RGBA_8888;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_BGRA_8888:
+ mve_frame.format = MVE_FORMAT_BGRA_8888;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_ARGB_8888:
+ mve_frame.format = MVE_FORMAT_ARGB_8888;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_ABGR_8888:
+ mve_frame.format = MVE_FORMAT_ABGR_8888;
+ break;
+ default:
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_ERROR, session, "Illegal com buffer format. format=%u", frame->format);
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ switch (frame->format)
+ {
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC_10B:
+ {
+ mve_frame.data.afbc.plane[0] = frame->data.afbc.plane_top;
+ mve_frame.data.afbc.plane[1] = frame->data.afbc.plane_bot;
+ mve_frame.data.afbc.alloc_bytes[0] = frame->data.afbc.alloc_bytes_top;
+ mve_frame.data.afbc.alloc_bytes[1] = frame->data.afbc.alloc_bytes_bot;
+
+ if (0 == frame->data.afbc.afbc_width_in_superblocks[0])
+ {
+ #define LOG2_32 (5)
+ #define LOG2_16 (4)
+ uint32_t pow2 = (frame->nMVEFlags & MVE_FLAGS_AFBC_WIDEBLK) ? LOG2_32 : LOG2_16;
+ #undef LOG2_32
+ #undef LOG2_16
+ bool interlace = (frame->nMVEFlags & MVE_FLAGS_INTERLACE) ? true : false;
+
+ mve_frame.data.afbc.afbc_width_in_superblocks[0] = (false != interlace) ? 0 : divide_by_power2_round_up(mve_frame.visible_frame_width, pow2);
+ mve_frame.data.afbc.afbc_width_in_superblocks[1] = 0;
+ }
+ else
+ {
+ mve_frame.data.afbc.afbc_width_in_superblocks[0] = frame->data.afbc.afbc_width_in_superblocks[0];
+ mve_frame.data.afbc.afbc_width_in_superblocks[1] = frame->data.afbc.afbc_width_in_superblocks[1];
+ }
+
+ if (session->session_type == MVE_SESSION_TYPE_ENCODER)
+ {
+ mve_frame.data.afbc.cropx = 0;
+ mve_frame.data.afbc.cropy = 0;
+ }
+ else
+ {
+ mve_frame.data.afbc.cropx = frame->data.afbc.cropx;
+ mve_frame.data.afbc.cropy = frame->data.afbc.cropy + frame->data.afbc.y_offset;
+ }
+ mve_frame.data.afbc.afbc_params = 0;
+ if (frame->nMVEFlags & MVE_FLAGS_AFBC_TILED)
+ {
+ mve_frame.data.afbc.afbc_params |= MVE_BUFFER_FRAME_AFBC_TILED_BODY | MVE_BUFFER_FRAME_AFBC_TILED_HEADER;
+ }
+ if (frame->nMVEFlags & MVE_FLAGS_AFBC_WIDEBLK)
+ {
+ mve_frame.data.afbc.afbc_params |= MVE_BUFFER_FRAME_AFBC_32X8_SUPERBLOCK;
+ }
+ break;
+ }
+
+ default:
+ {
+ bool interlace = (frame->nMVEFlags & MVE_FLAGS_INTERLACE) ? true : false;
+
+ mve_frame.data.planar.plane_top[0] = frame->data.planar.plane_top[0];
+ mve_frame.data.planar.plane_top[1] = frame->data.planar.plane_top[1];
+ mve_frame.data.planar.plane_top[2] = frame->data.planar.plane_top[2];
+
+ mve_frame.data.planar.stride[0] = frame->data.planar.stride[0];
+ mve_frame.data.planar.stride[1] = frame->data.planar.stride[1];
+ mve_frame.data.planar.stride[2] = frame->data.planar.stride[2];
+
+ mve_frame.data.planar.max_frame_width = frame->decoded_width;
+ mve_frame.data.planar.max_frame_height = frame->decoded_height;
+
+ if (MVE_SESSION_TYPE_DECODER == session->session_type)
+ {
+ /* stride alignment for rotation frame */
+ if (1 == rotation || 3 == rotation)
+ {
+ int slice_height = frame->decoded_width;
+ if (true == interlace)
+ {
+ /*
+ * The implementation of interlaced stream rotation here depends on
+ * EGIL-2953 that FW is able to output yuv content according to
+ * the stride, plane_top and plane_bot settings below.
+ */
+ slice_height >>= 1;
+ }
+
+ if (MVE_BASE_OMX_BUFFER_FORMAT_YUV420_SEMIPLANAR == frame->format || MVE_BASE_OMX_BUFFER_FORMAT_YVU420_SEMIPLANAR == frame->format)
+ {
+ mve_frame.data.planar.plane_top[1] = mve_frame.data.planar.plane_top[0] +
+ frame->data.planar.stride_90[0] * slice_height;
+ mve_frame.data.planar.plane_top[2] = 0;
+ }
+ else if (MVE_BASE_OMX_BUFFER_FORMAT_YV12 == frame->format)
+ {
+ mve_frame.data.planar.plane_top[2] = mve_frame.data.planar.plane_top[0] +
+ frame->data.planar.stride_90[0] * slice_height;
+ mve_frame.data.planar.plane_top[1] = mve_frame.data.planar.plane_top[2] +
+ frame->data.planar.stride_90[2] * (slice_height >> 1);
+ }
+ else
+ {
+ mve_frame.data.planar.plane_top[1] = mve_frame.data.planar.plane_top[0] +
+ frame->data.planar.stride_90[0] * slice_height;
+ mve_frame.data.planar.plane_top[2] = mve_frame.data.planar.plane_top[1] +
+ frame->data.planar.stride_90[1] * (slice_height >> 1);
+ }
+
+ mve_frame.data.planar.stride[0] = frame->data.planar.stride_90[0];
+ mve_frame.data.planar.stride[1] = frame->data.planar.stride_90[1];
+ mve_frame.data.planar.stride[2] = frame->data.planar.stride_90[2];
+
+ mve_frame.visible_frame_width = frame->decoded_height;
+ mve_frame.visible_frame_height = frame->decoded_width;
+
+ mve_frame.data.planar.max_frame_width = frame->decoded_height;
+ mve_frame.data.planar.max_frame_height = frame->decoded_width;
+ }
+ }
+ if (true == interlace)
+ {
+ if (1 == rotation || 3 == rotation)
+ {
+ mve_frame.data.planar.plane_bot[0] = mve_frame.data.planar.plane_top[0] + (frame->data.planar.stride_90[0] >> 1);
+ mve_frame.data.planar.plane_bot[1] = mve_frame.data.planar.plane_top[1] + (frame->data.planar.stride_90[1] >> 1);
+ mve_frame.data.planar.plane_bot[2] = mve_frame.data.planar.plane_top[2] + (frame->data.planar.stride_90[2] >> 1);
+ }
+ else
+ {
+ mve_frame.data.planar.plane_bot[0] = frame->data.planar.plane_bot[0];
+ mve_frame.data.planar.plane_bot[1] = frame->data.planar.plane_bot[1];
+ mve_frame.data.planar.plane_bot[2] = frame->data.planar.plane_bot[2];
+ }
+ }
+ else
+ {
+ mve_frame.data.planar.plane_bot[0] = 0;
+ mve_frame.data.planar.plane_bot[1] = 0;
+ mve_frame.data.planar.plane_bot[2] = 0;
+ }
+ break;
+ }
+ }
+
+ ret = write_to_queue_v2(session, host, mve, channel, MVE_BUFFER_CODE_FRAME, sizeof(mve_frame), &mve_frame);
+
+ return ret;
+}
+
+static mve_base_error write_buffer_bitstream(struct mve_session *session,
+ struct mve_rsrc_dma_mem_t *host,
+ struct mve_rsrc_dma_mem_t *mve,
+ enum mve_log_fwif_channel channel,
+ struct mve_com_buffer_bitstream *bitstream)
+{
+ mve_base_error ret;
+ struct mve_buffer_bitstream mve_bitstream;
+
+ /* Convert com- to fw buffer. */
+ mve_bitstream.host_handle = bitstream->nHandle;
+ mve_bitstream.user_data_tag = bitstream->timestamp;
+ mve_bitstream.bitstream_alloc_bytes = bitstream->nAllocLen;
+ mve_bitstream.bitstream_offset = bitstream->nOffset;
+ mve_bitstream.bitstream_filled_len = bitstream->nFilledLen;
+ mve_bitstream.bitstream_buf_addr = bitstream->pBufferData;
+
+ mve_bitstream.bitstream_flags = 0;
+ mve_bitstream.bitstream_flags |= (bitstream->nFlags & OMX_BUFFERFLAG_EOS) ? MVE_BUFFER_BITSTREAM_FLAG_EOS : 0;
+ mve_bitstream.bitstream_flags |= (bitstream->nFlags & OMX_BUFFERFLAG_ENDOFFRAME) ? MVE_BUFFER_BITSTREAM_FLAG_ENDOFFRAME : 0;
+ mve_bitstream.bitstream_flags |= (bitstream->nFlags & OMX_BUFFERFLAG_SYNCFRAME) ? MVE_BUFFER_BITSTREAM_FLAG_SYNCFRAME : 0;
+ mve_bitstream.bitstream_flags |= (bitstream->nFlags & OMX_BUFFERFLAG_CODECCONFIG) ? MVE_BUFFER_BITSTREAM_FLAG_CODECCONFIG : 0;
+ mve_bitstream.bitstream_flags |= (bitstream->nFlags & OMX_BUFFERFLAG_ENDOFSUBFRAME) ? MVE_BUFFER_BITSTREAM_FLAG_ENDOFSUBFRAME : 0;
+
+ ret = write_to_queue_v2(session, host, mve, channel, MVE_BUFFER_CODE_BITSTREAM, sizeof(mve_bitstream), &mve_bitstream);
+
+ return ret;
+}
+
+static mve_base_error write_buffer_roi(struct mve_session *session,
+ struct mve_rsrc_dma_mem_t *host,
+ struct mve_rsrc_dma_mem_t *mve,
+ enum mve_log_fwif_channel channel,
+ struct mve_com_buffer_roi *roi)
+{
+ mve_base_error ret;
+ struct mve_buffer_param mve_regions;
+ int i;
+
+ mve_regions.type = MVE_BUFFER_PARAM_TYPE_REGIONS;
+ mve_regions.data.regions.n_regions = roi->nRegions;
+
+ for (i = 0; i < roi->nRegions; ++i)
+ {
+ mve_regions.data.regions.region[i].mbx_left = roi->regions[i].mbx_left;
+ mve_regions.data.regions.region[i].mbx_right = roi->regions[i].mbx_right;
+ mve_regions.data.regions.region[i].mby_top = roi->regions[i].mby_top;
+ mve_regions.data.regions.region[i].mby_bottom = roi->regions[i].mby_bottom;
+ mve_regions.data.regions.region[i].qp_delta = roi->regions[i].qp_delta;
+ }
+
+ ret = write_to_queue_v2(session,
+ host,
+ mve,
+ channel,
+ MVE_BUFFER_CODE_PARAM,
+ sizeof(mve_regions),
+ &mve_regions);
+
+ return ret;
+}
+
+static mve_base_error read_buffer_frame(struct mve_session *session,
+ struct mve_rsrc_dma_mem_t *host,
+ struct mve_rsrc_dma_mem_t *mve,
+ enum mve_log_fwif_channel channel,
+ struct mve_com_buffer_frame *frame)
+{
+ mve_base_error ret;
+ struct mve_msg_header msg_header;
+ struct mve_buffer_frame mve_frame;
+
+ ret = read_from_queue_v2(session, host, mve, channel, &msg_header, &mve_frame, sizeof(mve_frame));
+ if (ret != MVE_BASE_ERROR_NONE)
+ {
+ return ret;
+ }
+
+ /* Convert com- to fw buffer. */
+ frame->nHandle = mve_frame.host_handle;
+ frame->nMVEFlags = mve_frame.frame_flags;
+ frame->timestamp = mve_frame.user_data_tag;
+ frame->pic_index = 0;
+ frame->decoded_height = mve_frame.visible_frame_height;
+ frame->decoded_width = mve_frame.visible_frame_width;
+
+ frame->nFlags = 0;
+ frame->nFlags |= (mve_frame.frame_flags & MVE_BUFFER_FRAME_FLAG_EOS) ? OMX_BUFFERFLAG_EOS : 0;
+ frame->nFlags |= (mve_frame.frame_flags & MVE_BUFFER_FRAME_FLAG_DECODE_ONLY) ? OMX_BUFFERFLAG_DECODEONLY : 0;
+ frame->nFlags |= (mve_frame.frame_flags & MVE_BUFFER_FRAME_FLAG_CORRUPT) ? OMX_BUFFERFLAG_DATACORRUPT : 0;
+ frame->nFlags |= (mve_frame.frame_flags & MVE_BUFFER_FRAME_FLAG_REF_FRAME) ? OMX_BUFFERFLAG_READONLY : 0;
+
+ if ((mve_frame.format & MVE_FORMAT_BF_A) != 0)
+ {
+ frame->data.afbc.plane_top = mve_frame.data.afbc.plane[0];
+ frame->data.afbc.plane_bot = mve_frame.data.afbc.plane[1];
+ frame->data.afbc.alloc_bytes_top = mve_frame.data.afbc.alloc_bytes[0];
+ frame->data.afbc.alloc_bytes_bot = mve_frame.data.afbc.alloc_bytes[1];
+ frame->data.afbc.cropx = mve_frame.data.afbc.cropx;
+ frame->data.afbc.cropy = mve_frame.data.afbc.cropy;
+ frame->data.afbc.y_offset = 0;
+ frame->data.afbc.rangemap = 0;
+ }
+ else
+ {
+ frame->data.planar.plane_top[0] = mve_frame.data.planar.plane_top[0];
+ frame->data.planar.plane_top[1] = mve_frame.data.planar.plane_top[1];
+ frame->data.planar.plane_top[2] = mve_frame.data.planar.plane_top[2];
+ frame->data.planar.plane_bot[0] = mve_frame.data.planar.plane_bot[0];
+ frame->data.planar.plane_bot[1] = mve_frame.data.planar.plane_bot[1];
+ frame->data.planar.plane_bot[2] = mve_frame.data.planar.plane_bot[2];
+ frame->data.planar.stride[0] = mve_frame.data.planar.stride[0];
+ frame->data.planar.stride[1] = mve_frame.data.planar.stride[1];
+ frame->data.planar.stride[2] = mve_frame.data.planar.stride[2];
+ mve_frame.data.planar.max_frame_width = frame->decoded_width;
+ mve_frame.data.planar.max_frame_height = frame->decoded_height;
+ frame->crc_top = 0;
+ frame->crc_bot = 0;
+ }
+
+ return ret;
+}
+
+static mve_base_error read_buffer_bitstream(struct mve_session *session,
+ struct mve_rsrc_dma_mem_t *host,
+ struct mve_rsrc_dma_mem_t *mve,
+ enum mve_log_fwif_channel channel,
+ struct mve_com_buffer_bitstream *bitstream)
+{
+ mve_base_error ret;
+ struct mve_msg_header msg_header;
+ struct mve_buffer_bitstream mve_bitstream;
+
+ ret = read_from_queue_v2(session, host, mve, channel, &msg_header, &mve_bitstream, sizeof(mve_bitstream));
+ if (ret != MVE_BASE_ERROR_NONE)
+ {
+ return ret;
+ }
+
+ /* Convert com- to fw buffer. */
+ bitstream->nHandle = mve_bitstream.host_handle;
+ bitstream->nFlags = mve_bitstream.bitstream_flags;
+ bitstream->timestamp = mve_bitstream.user_data_tag;
+ bitstream->nAllocLen = mve_bitstream.bitstream_alloc_bytes;
+ bitstream->nOffset = mve_bitstream.bitstream_offset;
+ bitstream->nFilledLen = mve_bitstream.bitstream_filled_len;
+ bitstream->pBufferData = mve_bitstream.bitstream_buf_addr;
+
+ bitstream->nFlags = 0;
+ bitstream->nFlags |= (mve_bitstream.bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_EOS) ? OMX_BUFFERFLAG_EOS : 0;
+ bitstream->nFlags |= (mve_bitstream.bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_ENDOFFRAME) ? OMX_BUFFERFLAG_ENDOFFRAME : 0;
+ bitstream->nFlags |= (mve_bitstream.bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_SYNCFRAME) ? OMX_BUFFERFLAG_SYNCFRAME : 0;
+ bitstream->nFlags |= (mve_bitstream.bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_CODECCONFIG) ? OMX_BUFFERFLAG_CODECCONFIG : 0;
+ bitstream->nFlags |= (mve_bitstream.bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_ENDOFSUBFRAME) ? OMX_BUFFERFLAG_ENDOFSUBFRAME : 0;
+
+ return ret;
+}
+
+static mve_base_error set_config(struct mve_session *session,
+ uint16_t size,
+ uint32_t *data)
+{
+ mve_base_error ret;
+ uint32_t *index = (uint32_t *)data;
+
+ /* Subtract the size for the prepended index. */
+ size -= sizeof(*index);
+
+ switch (*index)
+ {
+ case MVE_REQUEST_CODE_SET_OPTION:
+ {
+ struct mve_request_set_option *set_option = (struct mve_request_set_option *)(index + 1);
+ ret = write_to_queue_v2(session, session->msg_in_queue, session->msg_out_queue, MVE_LOG_FWIF_CHANNEL_MESSAGE, *index, size, set_option);
+ break;
+ }
+
+ case MVE_BUFFER_CODE_PARAM:
+ {
+ struct mve_buffer_param *buffer_param = (struct mve_buffer_param *)(index + 1);
+ ret = write_to_queue_v2(session, session->buf_input_in, session->buf_input_out, MVE_LOG_FWIF_CHANNEL_OUTPUT_BUFFER, MVE_BUFFER_CODE_PARAM, size, buffer_param);
+ break;
+ }
+
+ default:
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_ERROR, session, "Received com set parameter message with with illegal code. code=%u.", *index);
+ ret = MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ return ret;
+}
+
+static mve_base_error add_message(struct mve_session *session,
+ uint16_t code,
+ uint16_t size,
+ uint32_t *data)
+{
+ mve_base_error ret;
+
+ switch (code)
+ {
+ case MVE_MESSAGE_CODE_GO:
+ {
+ ret = write_to_queue_v2(session, session->msg_in_queue, session->msg_out_queue, MVE_LOG_FWIF_CHANNEL_MESSAGE, MVE_REQUEST_CODE_GO, 0, NULL);
+ break;
+ }
+ case MVE_MESSAGE_CODE_STOP:
+ {
+ ret = write_to_queue_v2(session, session->msg_in_queue, session->msg_out_queue, MVE_LOG_FWIF_CHANNEL_MESSAGE, MVE_REQUEST_CODE_STOP, 0, NULL);
+ break;
+ }
+ case MVE_MESSAGE_CODE_INPUT_FLUSH:
+ {
+ ret = write_to_queue_v2(session, session->msg_in_queue, session->msg_out_queue, MVE_LOG_FWIF_CHANNEL_MESSAGE, MVE_REQUEST_CODE_INPUT_FLUSH, 0, NULL);
+ break;
+ }
+ case MVE_MESSAGE_CODE_OUTPUT_FLUSH:
+ {
+ ret = write_to_queue_v2(session, session->msg_in_queue, session->msg_out_queue, MVE_LOG_FWIF_CHANNEL_MESSAGE, MVE_REQUEST_CODE_OUTPUT_FLUSH, 0, NULL);
+ break;
+ }
+ case MVE_MESSAGE_CODE_SET_PARAMETER:
+ {
+ ret = set_config(session, size, data);
+ break;
+ }
+ case MVE_MESSAGE_CODE_GET_PARAMETER:
+ {
+ ret = MVE_BASE_ERROR_NOT_IMPLEMENTED;
+ break;
+ }
+ case MVE_MESSAGE_CODE_SWITCH:
+ {
+ ret = write_to_queue_v2(session, session->msg_in_queue, session->msg_out_queue, MVE_LOG_FWIF_CHANNEL_MESSAGE, MVE_REQUEST_CODE_SWITCH, 0, NULL);
+ break;
+ }
+ case MVE_MESSAGE_CODE_PING:
+ {
+ ret = write_to_queue_v2(session, session->msg_in_queue, session->msg_out_queue, MVE_LOG_FWIF_CHANNEL_MESSAGE, MVE_REQUEST_CODE_PING, 0, NULL);
+ break;
+ }
+ case MVE_MESSAGE_CODE_SET_CONFIG:
+ {
+ ret = set_config(session, size, data);
+ break;
+ }
+ case MVE_MESSAGE_CODE_GET_CONFIG:
+ {
+ ret = MVE_BASE_ERROR_NOT_IMPLEMENTED;
+ break;
+ }
+ case MVE_MESSAGE_CODE_DUMP:
+ {
+ ret = write_to_queue_v2(session, session->msg_in_queue, session->msg_out_queue, MVE_LOG_FWIF_CHANNEL_MESSAGE, MVE_REQUEST_CODE_DUMP, 0, NULL);
+ break;
+ }
+ case MVE_MESSAGE_CODE_JOB:
+ {
+ struct mve_job_command *job_v1 = (struct mve_job_command *)data;
+ struct mve_request_job job_v2;
+
+ job_v2.cores = job_v1->cores;
+ job_v2.frames = job_v1->frames;
+ job_v2.flags = job_v1->flags;
+
+ ret = write_to_queue_v2(session, session->msg_in_queue, session->msg_out_queue, MVE_LOG_FWIF_CHANNEL_MESSAGE, MVE_REQUEST_CODE_JOB, sizeof(job_v2), &job_v2);
+ break;
+ }
+ case MVE_MESSAGE_CODE_RELEASE_REF_FRAME:
+ {
+ struct mve_com_notify_release_ref_frame *source = (struct mve_com_notify_release_ref_frame *)data;
+ struct mve_request_release_ref_frame dest;
+
+ dest.buffer_address = source->mve_buffer_addr;
+ ret = write_to_queue_v2(session, session->msg_in_queue, session->msg_out_queue, MVE_LOG_FWIF_CHANNEL_MESSAGE, MVE_REQUEST_CODE_RELEASE_REF_FRAME, sizeof(dest), &dest);
+ break;
+ }
+ default:
+ {
+ ret = MVE_BASE_ERROR_BAD_PARAMETER;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+#define COPY_IDENTICAL_MESSAGE(_code, _header, _out, _in, _size) \
+ { \
+ /* Verify that size matches expected size. */ \
+ if ((_header)->size != _size) \
+ { \
+ MVE_LOG_PRINT_SESSION( &mve_rsrc_log_session, MVE_LOG_WARNING, session, "Com message size did not match expected size. code=%u, size=%u, expected=%u.", \
+ (_header)->code, (_header)->size, _size); \
+ break; \
+ } \
+ \
+ /* Allocate message structure. */ \
+ _out = MVE_RSRC_MEM_CACHE_ALLOC(_size, GFP_KERNEL); \
+ if (_out == NULL) \
+ { \
+ MVE_LOG_PRINT_SESSION( &mve_rsrc_log_session, MVE_LOG_WARNING, session, "Failed to allocate memory for com message."); \
+ break; \
+ } \
+ \
+ /* v1 and v2 structures are identical. */ \
+ (_header)->code = _code; \
+ memcpy((_out), & (_in), _size); \
+ }
+
+static uint32_t *get_message(struct mve_session *session,
+ struct mve_msg_header *header)
+{
+ struct mve_com_v2 *com = (struct mve_com_v2 *)session->com;
+ mve_base_error ret;
+ union
+ {
+ uint32_t *ret;
+ struct mve_switched_in_v1 *switched_in;
+ struct mve_switched_out_v1 *switched_out;
+ struct mve_set_reply_v1 *set_reply;
+ struct mve_error_v1 *error;
+ struct mve_state_change_v1 *state_change;
+ struct mve_event_v1 *event;
+ }
+ response_v1 = { .ret = NULL };
+ union
+ {
+ struct mve_response_switched_in switched_in;
+ struct mve_response_switched_out switched_out;
+ struct mve_response_state_change state_change;
+ struct mve_response_job_dequeued job_dequeue;
+ struct mve_response_error error;
+ struct mve_response_frame_alloc_parameters alloc_parameters;
+ struct mve_response_sequence_parameters sequence_parameters;
+ struct mve_response_event event;
+ struct mve_buffer_param buffer_param;
+ struct mve_event_processed event_processed;
+ struct mve_response_set_option_fail set_option_fail;
+ struct mve_response_ref_frame_unused ref_frame_unused;
+ }
+ response_v2;
+
+ ret = read_from_queue_v2(session, session->msg_in_queue, session->msg_out_queue, MVE_LOG_FWIF_CHANNEL_MESSAGE, header, &response_v2, sizeof(response_v2));
+ if (ret != MVE_BASE_ERROR_NONE)
+ {
+ return NULL;
+ }
+
+ switch (header->code)
+ {
+ case MVE_RESPONSE_CODE_SWITCHED_IN:
+ {
+ COPY_IDENTICAL_MESSAGE(MVE_RESPONSE_CODE_SWITCHED_IN_V1, header, response_v1.switched_in, response_v2.switched_in, sizeof(response_v2.switched_in));
+ break;
+ }
+ case MVE_RESPONSE_CODE_SWITCHED_OUT:
+ {
+ COPY_IDENTICAL_MESSAGE(MVE_RESPONSE_CODE_SWITCHED_OUT_V1, header, response_v1.switched_out, response_v2.switched_out, sizeof(response_v2.switched_out));
+ break;
+ }
+ case MVE_RESPONSE_CODE_SET_OPTION_CONFIRM:
+ {
+ header->code = MVE_RESPONSE_CODE_SET_CONFIG_REPLY_V1;
+ header->size = sizeof(*response_v1.set_reply);
+
+ response_v1.set_reply = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(*response_v1.set_reply), GFP_KERNEL);
+ if (response_v1.set_reply == NULL)
+ {
+ ret = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ break;
+ }
+
+ response_v1.set_reply->index = -1;
+ response_v1.set_reply->return_code = OMX_ErrorNone;
+
+ break;
+ }
+ case MVE_RESPONSE_CODE_SET_OPTION_FAIL:
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_ERROR, session, "MVE_RESPONSE_CODE_SET_OPTION_FAIL(%d): %s", response_v2.set_option_fail.index, response_v2.set_option_fail.message);
+
+ /*
+ * This is a pending response, so still need to process it as the normal response with error return code.
+ */
+ header->code = MVE_RESPONSE_CODE_SET_CONFIG_REPLY_V1;
+ header->size = sizeof(*response_v1.set_reply);
+
+ response_v1.set_reply = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(*response_v1.set_reply), GFP_KERNEL);
+ if (response_v1.set_reply == NULL)
+ {
+ ret = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ break;
+ }
+ response_v1.set_reply->index = -1;
+ response_v1.set_reply->return_code = MVE_BASE_ERROR_FIRMWARE;
+ break;
+ }
+ case MVE_RESPONSE_CODE_JOB_DEQUEUED:
+ {
+ header->code = MVE_RESPONSE_CODE_JOB_DEQUEUED_V1;
+ header->size = 0;
+ response_v1.ret = MVE_RSRC_MEM_CACHE_ALLOC(0, GFP_KERNEL);
+ break;
+ }
+ case MVE_RESPONSE_CODE_INPUT:
+ {
+ header->code = MVE_RESPONSE_CODE_INPUT_V1;
+ header->size = 0;
+ response_v1.ret = MVE_RSRC_MEM_CACHE_ALLOC(0, GFP_KERNEL);
+ break;
+ }
+ case MVE_RESPONSE_CODE_OUTPUT:
+ {
+ header->code = MVE_RESPONSE_CODE_OUTPUT_V1;
+ header->size = 0;
+ response_v1.ret = MVE_RSRC_MEM_CACHE_ALLOC(0, GFP_KERNEL);
+ break;
+ }
+ case MVE_RESPONSE_CODE_INPUT_FLUSHED:
+ {
+ header->code = MVE_RESPONSE_CODE_INPUT_FLUSHED_V1;
+ header->size = 0;
+ response_v1.ret = MVE_RSRC_MEM_CACHE_ALLOC(0, GFP_KERNEL);
+ break;
+ }
+ case MVE_RESPONSE_CODE_OUTPUT_FLUSHED:
+ {
+ header->code = MVE_RESPONSE_CODE_OUTPUT_FLUSHED_V1;
+ header->size = 0;
+ response_v1.ret = MVE_RSRC_MEM_CACHE_ALLOC(0, GFP_KERNEL);
+ break;
+ }
+ case MVE_RESPONSE_CODE_PONG:
+ {
+ header->code = MVE_RESPONSE_CODE_PONG_V1;
+ header->size = 0;
+ response_v1.ret = MVE_RSRC_MEM_CACHE_ALLOC(0, GFP_KERNEL);
+ break;
+ }
+ case MVE_RESPONSE_CODE_ERROR:
+ {
+ header->code = MVE_RESPONSE_CODE_ERROR_V1;
+ header->size = sizeof(*response_v1.error);
+
+ response_v1.error = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(*response_v1.error), GFP_KERNEL);
+ if (response_v1.error == NULL)
+ {
+ ret = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ break;
+ }
+
+ response_v1.error->reason = response_v2.error.error_code;
+ memcpy(&response_v1.error->message, response_v2.error.message, sizeof(response_v1.error->message));
+
+ break;
+ }
+ case MVE_RESPONSE_CODE_STATE_CHANGE:
+ {
+ COPY_IDENTICAL_MESSAGE(MVE_RESPONSE_CODE_STATE_CHANGE_V1, header, response_v1.state_change, response_v2.state_change, sizeof(response_v2.state_change));
+ break;
+ }
+ case MVE_RESPONSE_CODE_DUMP:
+ {
+ header->code = MVE_RESPONSE_CODE_DUMP_V1;
+ header->size = 0;
+ response_v1.ret = MVE_RSRC_MEM_CACHE_ALLOC(0, GFP_KERNEL);
+ break;
+ }
+ case MVE_RESPONSE_CODE_IDLE:
+ {
+ header->code = MVE_RESPONSE_CODE_IDLE_V1;
+ header->size = 0;
+ response_v1.ret = MVE_RSRC_MEM_CACHE_ALLOC(0, GFP_KERNEL);
+ break;
+ }
+ case MVE_RESPONSE_CODE_FRAME_ALLOC_PARAM:
+ {
+ struct mve_base_fw_frame_alloc_parameters *dst;
+ struct mve_response_frame_alloc_parameters *src;
+
+ header->code = MVE_RESPONSE_CODE_FRAME_ALLOC_PARAM;
+ header->size = sizeof(struct mve_base_fw_frame_alloc_parameters);
+
+ response_v1.ret = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(struct mve_base_fw_frame_alloc_parameters), GFP_KERNEL);
+ if (NULL == response_v1.ret)
+ {
+ ret = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ break;
+ }
+
+ src = &response_v2.alloc_parameters;
+ dst = (struct mve_base_fw_frame_alloc_parameters *)response_v1.ret;
+
+ dst->planar_alloc_frame_width = src->planar_alloc_frame_width;
+ dst->planar_alloc_frame_height = src->planar_alloc_frame_height;
+ dst->afbc_alloc_bytes = src->afbc_alloc_bytes;
+ dst->afbc_alloc_bytes_downscaled = src->afbc_alloc_bytes_downscaled;
+ dst->afbc_width_in_superblocks = src->afbc_width_in_superblocks;
+ dst->afbc_width_in_superblocks_downscaled = src->afbc_width_in_superblocks_downscaled;
+ dst->cropx = src->cropx;
+ dst->cropy = src->cropy;
+ dst->mbinfo_alloc_bytes = src->mbinfo_alloc_bytes;
+ /* Need to store the alloc parameters here as well because they are needed
+ * when calculating filled_len */
+ com->alloc_params = response_v2.alloc_parameters;
+ break;
+ }
+ case MVE_RESPONSE_CODE_SEQUENCE_PARAMETERS:
+ {
+ COPY_IDENTICAL_MESSAGE(MVE_RESPONSE_CODE_SEQUENCE_PARAMETERS, header, response_v1.ret, response_v2.sequence_parameters, sizeof(response_v2.sequence_parameters));
+ break;
+ }
+ case MVE_BUFFER_CODE_PARAM:
+ {
+ COPY_IDENTICAL_MESSAGE(MVE_BUFFER_CODE_PARAM, header, response_v1.ret, response_v2.buffer_param,
+ header->size);
+ break;
+ }
+ case MVE_RESPONSE_CODE_EVENT:
+ {
+ header->code = MVE_RESPONSE_CODE_EVENT_V1;
+
+ if (MVE_EVENT_PROCESSED_v2 != response_v2.event.event_code &&
+ MVE_EVENT_TRACE_BUFFERS != response_v2.event.event_code)
+ {
+ header->size = sizeof(*response_v1.event);
+ response_v1.event = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(*response_v1.event), GFP_KERNEL);
+ }
+
+ switch (response_v2.event.event_code)
+ {
+ case MVE_EVENT_ERROR_STREAM_CORRUPT:
+ {
+ response_v1.event->data1 = OMX_EventError;
+ response_v1.event->data2 = OMX_ErrorStreamCorrupt;
+ break;
+ }
+ case MVE_EVENT_ERROR_STREAM_NOT_SUPPORTED:
+ {
+ response_v1.event->data1 = OMX_EventError;
+ response_v1.event->data2 = OMX_ErrorNotImplemented;
+ break;
+ }
+ case MVE_EVENT_PROCESSED_v2:
+ {
+ header->size -= sizeof(response_v2.event.event_code);
+
+ if (header->size == sizeof(response_v2.event.event_data.event_processed))
+ {
+ COPY_IDENTICAL_MESSAGE(MVE_RESPONSE_CODE_PROCESSED_V1, header, response_v1.event, response_v2.event.event_data.event_processed,
+ sizeof(response_v2.event.event_data.event_processed));
+ }
+ else if (0 == header->size)
+ {
+ response_v1.ret = MVE_RSRC_MEM_CACHE_ALLOC(0, GFP_KERNEL);
+ }
+ else
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_ERROR, session, "Com event processed message should have size 0 or %u. size=%u.",
+ sizeof(response_v2.event.event_data.event_processed), header->size);
+ }
+ break;
+ }
+ case MVE_EVENT_REF_FRAME:
+ {
+ break;
+ }
+ case MVE_EVENT_TRACE_BUFFERS:
+ {
+ header->size -= sizeof(response_v2.event.event_code);
+
+ COPY_IDENTICAL_MESSAGE(MVE_RESPONSE_CODE_TRACE_BUFFERS,
+ header,
+ response_v1.event,
+ response_v2.event.event_data.event_trace_buffers,
+ sizeof(response_v2.event.event_data.event_trace_buffers));
+ break;
+ }
+ default:
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_ERROR, session, "Received com message with illegal event code. code=%u.", response_v2.event.event_code);
+ break;
+ }
+ }
+ break;
+ }
+ case MVE_RESPONSE_CODE_REF_FRAME_UNUSED:
+ {
+ COPY_IDENTICAL_MESSAGE(MVE_RESPONSE_CODE_REF_FRAME_UNUSED, header, response_v1.ret, response_v2.ref_frame_unused, sizeof(response_v2.ref_frame_unused));
+ break;
+ }
+ default:
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_ERROR, session, "Received com message with illegal code. code=%u.", header->code);
+ break;
+ }
+ }
+
+ return response_v1.ret;
+}
+
+static mve_base_error add_input_buffer(struct mve_session *session,
+ mve_com_buffer *buffer,
+ enum mve_com_buffer_type type)
+{
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+
+ switch (type)
+ {
+ case MVE_COM_BUFFER_TYPE_FRAME:
+ ret = write_buffer_frame(session, session->buf_input_in, session->buf_input_out, MVE_LOG_FWIF_CHANNEL_INPUT_BUFFER, &buffer->frame);
+ break;
+ case MVE_COM_BUFFER_TYPE_BITSTREAM:
+ ret = write_buffer_bitstream(session, session->buf_input_in, session->buf_input_out, MVE_LOG_FWIF_CHANNEL_INPUT_BUFFER, &buffer->bitstream);
+ break;
+ case MVE_COM_BUFFER_TYPE_ROI:
+ ret = write_buffer_roi(session, session->buf_input_in, session->buf_input_out, MVE_LOG_FWIF_CHANNEL_INPUT_BUFFER, &buffer->roi);
+ break;
+ default:
+ WARN_ON(true);
+ ret = MVE_BASE_ERROR_BAD_PARAMETER;
+ break;
+ }
+
+ return ret;
+}
+
+static mve_base_error add_output_buffer(struct mve_session *session,
+ mve_com_buffer *buffer,
+ enum mve_com_buffer_type type)
+{
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+
+ switch (type)
+ {
+ case MVE_COM_BUFFER_TYPE_FRAME:
+ ret = write_buffer_frame(session, session->buf_output_in, session->buf_output_out, MVE_LOG_FWIF_CHANNEL_OUTPUT_BUFFER, &buffer->frame);
+ break;
+ case MVE_COM_BUFFER_TYPE_BITSTREAM:
+ ret = write_buffer_bitstream(session, session->buf_output_in, session->buf_output_out, MVE_LOG_FWIF_CHANNEL_OUTPUT_BUFFER, &buffer->bitstream);
+ break;
+ case MVE_COM_BUFFER_TYPE_ROI: /* Intentional fall-through */
+ default:
+ WARN_ON(true); /* Should never end up here */
+ ret = MVE_BASE_ERROR_BAD_PARAMETER;
+ break;
+ }
+
+ return ret;
+}
+
+static mve_base_error get_input_buffer(struct mve_session *session,
+ mve_com_buffer *buffer)
+{
+ mve_base_error ret;
+
+ if (is_frame(session, true) == true)
+ {
+ ret = read_buffer_frame(session, session->buf_input_in, session->buf_input_out, MVE_LOG_FWIF_CHANNEL_INPUT_BUFFER, &buffer->frame);
+ }
+ else
+ {
+ ret = read_buffer_bitstream(session, session->buf_input_in, session->buf_input_out, MVE_LOG_FWIF_CHANNEL_INPUT_BUFFER, &buffer->bitstream);
+ }
+
+ return ret;
+}
+
+static mve_base_error get_output_buffer(struct mve_session *session,
+ mve_com_buffer *buffer)
+{
+ mve_base_error ret;
+
+ if (is_frame(session, false) == true)
+ {
+ ret = read_buffer_frame(session, session->buf_output_in, session->buf_output_out, MVE_LOG_FWIF_CHANNEL_OUTPUT_BUFFER, &buffer->frame);
+ }
+ else
+ {
+ ret = read_buffer_bitstream(session, session->buf_output_in, session->buf_output_out, MVE_LOG_FWIF_CHANNEL_INPUT_BUFFER, &buffer->bitstream);
+ }
+
+ return ret;
+}
+
+static mve_base_error get_rpc_message(struct mve_session *session,
+ mve_com_rpc *rpc)
+{
+ struct mve_rpc_communication_area *rpc_area;
+ mve_base_error err = MVE_BASE_ERROR_NOT_READY;
+
+ rpc_area = mve_rsrc_dma_mem_map(session->rpc_area);
+ if (NULL == rpc_area)
+ {
+ return MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ }
+
+ mve_rsrc_dma_mem_invalidate_cache(session->rpc_area);
+
+ if (MVE_RPC_STATE_PARAM == rpc_area->state)
+ {
+ /* Copy RPC details to the client supplied structure */
+ rpc->state = rpc_area->state;
+ rpc->call_id = rpc_area->call_id;
+ rpc->size = rpc_area->size;
+
+ switch (rpc_area->call_id)
+ {
+ case MVE_RPC_FUNCTION_DEBUG_PRINTF:
+ memcpy(rpc->params.debug_print.string,
+ rpc_area->params.debug_print.string,
+ MVE_RPC_DATA_SIZE_IN_WORDS * 4);
+ break;
+ case MVE_RPC_FUNCTION_MEM_ALLOC:
+ rpc->params.mem_alloc.size = rpc_area->params.mem_alloc.size;
+ rpc->params.mem_alloc.max_size = rpc_area->params.mem_alloc.max_size;
+ rpc->params.mem_alloc.region = rpc_area->params.mem_alloc.region;
+ rpc->params.mem_alloc.log2_alignment = rpc_area->params.mem_alloc.log2_alignment;
+ break;
+ case MVE_RPC_FUNCTION_MEM_RESIZE:
+ rpc->params.mem_resize.ve_pointer = rpc_area->params.mem_resize.ve_pointer;
+ rpc->params.mem_resize.new_size = rpc_area->params.mem_resize.new_size;
+ break;
+ case MVE_RPC_FUNCTION_MEM_FREE:
+ rpc->params.mem_free.ve_pointer = rpc_area->params.mem_free.ve_pointer;
+ break;
+ }
+
+ err = MVE_BASE_ERROR_NONE;
+ }
+
+ mve_rsrc_dma_mem_unmap(session->rpc_area);
+
+ return err;
+}
+
+static mve_base_error put_rpc_message(struct mve_session *session,
+ mve_com_rpc *rpc)
+{
+ struct mve_rpc_communication_area *rpc_area;
+
+ rpc_area = mve_rsrc_dma_mem_map(session->rpc_area);
+ if (NULL == rpc_area)
+ {
+ return MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ }
+
+ /* Copy RPC details to the client supplied structure */
+ switch (rpc->call_id)
+ {
+ case MVE_COM_RPC_FUNCTION_DEBUG_PRINTF:
+ break;
+ case MVE_COM_RPC_FUNCTION_MEM_ALLOC:
+ rpc_area->params.data[0] = rpc->params.data[0];
+ break;
+ case MVE_COM_RPC_FUNCTION_MEM_RESIZE:
+ rpc_area->params.data[0] = rpc->params.data[0];
+ break;
+ case MVE_COM_RPC_FUNCTION_MEM_FREE:
+ break;
+ }
+
+ rpc_area->call_id = rpc->call_id;
+ rpc_area->size = rpc->size;
+ wmb();
+ rpc_area->state = rpc->state;
+
+ wmb();
+ mve_rsrc_dma_mem_clean_cache(session->rpc_area);
+ mve_rsrc_dma_mem_unmap(session->rpc_area);
+
+ return MVE_BASE_ERROR_NONE;
+}
+
+static void mve_com_host_interface_v2_construct(struct mve_com_v2 *com)
+{
+ memset(com, 0, sizeof(*com));
+
+ com->base.host_interface.add_message = add_message;
+ com->base.host_interface.get_message = get_message;
+ com->base.host_interface.add_input_buffer = add_input_buffer;
+ com->base.host_interface.add_output_buffer = add_output_buffer;
+ com->base.host_interface.get_input_buffer = get_input_buffer;
+ com->base.host_interface.get_output_buffer = get_output_buffer;
+ com->base.host_interface.get_rpc_message = get_rpc_message;
+ com->base.host_interface.put_rpc_message = put_rpc_message;
+}
+
+struct mve_com *mve_com_host_interface_v2_new(void)
+{
+ struct mve_com_v2 *com;
+
+ /* Allocate com object. */
+ com = MVE_RSRC_MEM_ZALLOC(sizeof(*com), GFP_KERNEL);
+ if (com == NULL)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_WARNING, "Failed to allocate com object.");
+ return NULL;
+ }
+
+ /* Run constructor. */
+ mve_com_host_interface_v2_construct(com);
+
+ return &com->base;
+}
diff --git a/drivers/video/arm/v5xx/base/mve_com_host_interface_v2.h b/drivers/video/arm/v5xx/base/mve_com_host_interface_v2.h
new file mode 100644
index 000000000000..67d6255f90f2
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_com_host_interface_v2.h
@@ -0,0 +1,25 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_COM_HOST_INTERFACE_V2_H
+#define MVE_COM_HOST_INTERFACE_V2_H
+
+#include "mve_com.h"
+#include "mve_mem_region.h"
+
+struct session;
+
+struct mve_com *mve_com_host_interface_v2_new(void);
+
+#endif /* MVE_COM_HOST_INTERFACE_V2_H */
diff --git a/drivers/video/arm/v5xx/base/mve_command.c b/drivers/video/arm/v5xx/base/mve_command.c
new file mode 100644
index 000000000000..b1e99eb67529
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_command.c
@@ -0,0 +1,564 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/slab.h>
+#endif
+
+#include "mve_driver.h"
+#include "mve_command.h"
+#include "mve_session.h"
+
+#include "mve_rsrc_mem_frontend.h"
+#include "mve_rsrc_register.h"
+#include "mve_rsrc_driver.h"
+#include "mve_rsrc_pm.h"
+#include "mve_rsrc_log.h"
+#include "mve_rsrc_dvfs.h"
+
+#ifdef UNIT
+#include "mve_rsrc_register.h"
+#endif
+
+#define CORESCHED_FUSE_DISABLE_AFBC (0)
+#define CORESCHED_FUSE_DISABLE_REAL (1)
+#define CORESCHED_FUSE_DISABLE_VP8 (2)
+
+#define OMX_ROLE_VIDEO_DECODER_RV "video_decoder.rv"
+#define OMX_ROLE_VIDEO_DECODER_VP8 "video_decoder.vp8"
+
+struct mve_buffer_client;
+
+/**
+ * Check the given OMX role against the fuse state.
+ * @param role A OMX role string.
+ * @return true if the role is enabled, false if it is fused.
+ */
+static bool is_role_enabled(char *role)
+{
+ bool ret = true;
+ uint32_t fuse = mver_reg_get_fuse();
+
+ if (NULL == role)
+ {
+ return false;
+ }
+
+ if (0 == strcmp(role, OMX_ROLE_VIDEO_DECODER_RV))
+ {
+ if (fuse & (1 << CORESCHED_FUSE_DISABLE_REAL))
+ {
+ ret = false;
+ }
+ }
+ else if (0 == strcmp(role, OMX_ROLE_VIDEO_DECODER_VP8))
+ {
+ if (fuse & (1 << CORESCHED_FUSE_DISABLE_VP8))
+ {
+ ret = false;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * Check the given OMX role against the fuse state before creating a new session.
+ * @param data Pointer to the data associated
+ * @param filep File descriptor used to identify which client this session belongs to
+ * @return MVE error code
+ */
+static mve_base_error mve_command_create_session_helper(void *data,
+ struct file *filep)
+{
+ struct mve_session *session = NULL;
+ char *role = data;
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+
+ if (false == is_role_enabled(role))
+ {
+ ret = MVE_BASE_ERROR_NOT_IMPLEMENTED;
+ MVE_LOG_PRINT(&mve_rsrc_log_session,
+ MVE_LOG_ERROR,
+ "Role is not implemented/enabled. role=%s.",
+ role);
+ }
+ else
+ {
+ session = mve_session_create(filep);
+ if (NULL == session)
+ {
+ ret = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ }
+ else
+ {
+ mve_session_set_role(session, role);
+ }
+ }
+
+ return ret;
+}
+
+static const char *mve_command_to_string(uint32_t cmd)
+{
+ static const char *name[] = {
+ "MVE_BASE_CREATE_SESSION",
+ "MVE_BASE_DESTROY_SESSION",
+ "MVE_BASE_ACTIVATE_SESSION",
+ "MVE_BASE_ENQUEUE_FLUSH_BUFFERS",
+ "MVE_BASE_ENQUEUE_STATE_CHANGE",
+ "MVE_BASE_GET_EVENT",
+ "MVE_BASE_SET_PARAMETER",
+ "MVE_BASE_GET_PARAMETER",
+ "MVE_BASE_SET_CONFIG",
+ "MVE_BASE_GET_CONFIG",
+ "MVE_BASE_REGISTER_BUFFER",
+ "MVE_BASE_UNREGISTER_BUFFER",
+ "MVE_BASE_FILL_THIS_BUFFER",
+ "MVE_BASE_EMPTY_THIS_BUFFER",
+ "MVE_BASE_NOTIFY_REF_FRAME_RELEASE",
+ "MVE_BASE_REQUEST_MAX_FREQUENCY",
+ "MVE_BASE_READ_HW_INFO",
+ "MVE_BASE_RPC_MEM_ALLOC",
+ "MVE_BASE_RPC_MEM_RESIZE",
+ "MVE_BASE_DEBUG_READ_REGISTER",
+ "MVE_BASE_DEBUG_WRITE_REGISTER",
+ "MVE_BASE_DEBUG_INTERRUPT_COUNT",
+ "MVE_BASE_DEBUG_SEND_COMMAND",
+ "MVE_BASE_DEBUG_FIRMWARE_HUNG_SIMULATION",
+ };
+
+ if (MVE_BASE_DEBUG_FIRMWARE_HUNG_SIMULATION < cmd)
+ {
+ return "Unknown command";
+ }
+
+ return name[cmd];
+}
+
+struct mve_response *mve_command_execute(struct mve_base_command_header *header,
+ void *data,
+ struct file *filep)
+{
+#define MVE_LOG_COMMAND(_severity) \
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, \
+ _severity, \
+ session, \
+ "mve_command_execute(header={cmd=%u \"%s\", size=%u, data[0]=%u}, data=0x%x, file=0x%x).", \
+ header->cmd, \
+ mve_command_to_string(header->cmd), \
+ header->size, \
+ header->data[0], \
+ data, \
+ filep);
+
+ struct mve_response *ret;
+ struct mve_session *session;
+
+ /* Allocate and initialize return struct */
+ ret = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(struct mve_response), GFP_KERNEL);
+ if (NULL == ret)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Failed to allocate memory for the ioctl return struct.");
+ return NULL;
+ }
+
+ ret->error = MVE_BASE_ERROR_NONE;
+ ret->firmware_error = 0;
+ ret->size = 0;
+ ret->data = NULL;
+
+ /* Find session associated with file pointer */
+ session = mve_session_get_by_file(filep);
+
+ switch (header->cmd)
+ {
+ case MVE_BASE_CREATE_SESSION:
+ if (NULL != session)
+ {
+ /* Multiple sessions for one file descriptor is not allowed! */
+ ret->error = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ MVE_LOG_ERROR,
+ session,
+ "Failed to create session. Only one session per file descriptor is allowed.");
+ }
+ else
+ {
+ ret->error = mve_command_create_session_helper(data, filep);
+ MVE_LOG_COMMAND(MVE_LOG_INFO);
+ }
+ break;
+ case MVE_BASE_DESTROY_SESSION:
+ MVE_LOG_COMMAND(MVE_LOG_INFO);
+ mve_session_destroy(session);
+ break;
+ case MVE_BASE_ACTIVATE_SESSION:
+ {
+ bool res;
+ uint32_t *version;
+ struct mve_base_fw_secure_descriptor *fw_secure_desc;
+
+ MVE_LOG_COMMAND(MVE_LOG_INFO);
+ fw_secure_desc = (struct mve_base_fw_secure_descriptor *)data;
+ version = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(uint32_t), GFP_KERNEL);
+ if (NULL != version)
+ {
+ res = mve_session_activate(session, version, fw_secure_desc);
+ if (false != res)
+ {
+ ret->data = version;
+ ret->size = sizeof(uint32_t);
+ }
+ else
+ {
+ ret->error = MVE_BASE_ERROR_UNDEFINED;
+ MVE_RSRC_MEM_CACHE_FREE(version, sizeof(uint32_t));
+ }
+ }
+ else
+ {
+ ret->error = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ MVE_LOG_ERROR,
+ session,
+ "Failed to allocate memory for the ioctl return struct's data member.");
+ }
+ break;
+ }
+ case MVE_BASE_ENQUEUE_FLUSH_BUFFERS:
+ {
+ uint32_t flush = ((uint32_t *)data)[0];
+
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ ret->error = mve_session_enqueue_flush_buffers(session, flush);
+ break;
+ }
+ case MVE_BASE_ENQUEUE_STATE_CHANGE:
+ {
+ enum mve_base_hw_state state = ((uint32_t *)data)[0];
+
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ ret->error = mve_session_enqueue_state_change(session, state);
+ break;
+ }
+ case MVE_BASE_GET_EVENT:
+ {
+ struct mve_base_event_header *event;
+ uint32_t timeout;
+
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ timeout = ((uint32_t *)data)[0];
+ event = mve_session_get_event(session, timeout);
+ if (NULL == event)
+ {
+ ret->error = MVE_BASE_ERROR_TIMEOUT;
+ }
+ else
+ {
+ ret->size = sizeof(struct mve_base_event_header) + event->size;
+ ret->data = event;
+ }
+ break;
+ }
+ case MVE_BASE_SET_PARAMETER:
+ {
+ ret->error = mve_session_set_paramconfig(session,
+ header->size,
+ data,
+ &ret->firmware_error,
+ PARAMETER);
+ if (ret->error == MVE_BASE_ERROR_NONE)
+ {
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ }
+ break;
+ }
+ case MVE_BASE_GET_PARAMETER:
+ {
+ ret->error = mve_session_get_paramconfig(session,
+ header,
+ data,
+ ret,
+ PARAMETER);
+ if (ret->error == MVE_BASE_ERROR_NONE)
+ {
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ }
+ break;
+ }
+ case MVE_BASE_SET_CONFIG:
+ {
+ ret->error = mve_session_set_paramconfig(session,
+ header->size,
+ data,
+ &ret->firmware_error,
+ CONFIG);
+ if (ret->error == MVE_BASE_ERROR_NONE)
+ {
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ }
+ break;
+ }
+ case MVE_BASE_GET_CONFIG:
+ {
+ ret->error = mve_session_get_paramconfig(session,
+ header,
+ data,
+ ret,
+ CONFIG);
+ if (ret->error == MVE_BASE_ERROR_NONE)
+ {
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ }
+ break;
+ }
+ case MVE_BASE_REGISTER_BUFFER:
+ {
+ uint32_t port_index = *((uint32_t *)data);
+ struct mve_base_buffer_userspace *descriptor =
+ (struct mve_base_buffer_userspace *)(data + sizeof(uint32_t));
+
+ /* Verify that the user and kernel space versions of the mve_buffer_userspace
+ * match in at least size */
+ WARN_ON(header->size != sizeof(uint32_t) + sizeof(struct mve_base_buffer_userspace));
+
+ MVE_LOG_COMMAND(MVE_LOG_INFO);
+ if (1 < port_index)
+ {
+ ret->error = MVE_BASE_ERROR_BAD_PORT_INDEX;
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ MVE_LOG_ERROR,
+ session,
+ "%u is greater than 1 which is not a valid port_index.",
+ port_index);
+ }
+ else
+ {
+ ret->error = mve_session_buffer_register(session, port_index, descriptor);
+ }
+ break;
+ }
+ case MVE_BASE_UNREGISTER_BUFFER:
+ {
+ mve_base_buffer_handle_t buffer_id = *((mve_base_buffer_handle_t *)data);
+ /* Must check for NULL here to avoid returning an error code when a buffer
+ * is unregistered after the session has been removed */
+ if (NULL != session)
+ {
+ MVE_LOG_COMMAND(MVE_LOG_INFO);
+ ret->error = mve_session_buffer_unregister(session, buffer_id);
+ }
+ break;
+ }
+ case MVE_BASE_FILL_THIS_BUFFER:
+ {
+ struct mve_base_buffer_details *param = (struct mve_base_buffer_details *)data;
+
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ ret->error = mve_session_buffer_enqueue(session, param, false);
+ break;
+ }
+ case MVE_BASE_EMPTY_THIS_BUFFER:
+ {
+ struct mve_base_buffer_details *param = (struct mve_base_buffer_details *)data;
+
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ ret->error = mve_session_buffer_enqueue(session, param, true);
+ break;
+ }
+ case MVE_BASE_NOTIFY_REF_FRAME_RELEASE:
+ {
+ mve_base_buffer_handle_t buffer_id = *((mve_base_buffer_handle_t *)data);
+
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ ret->error = mve_session_buffer_notify_ref_frame_release(session, buffer_id);
+ break;
+ }
+ case MVE_BASE_REQUEST_MAX_FREQUENCY:
+ {
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+
+ mver_dvfs_request_max_frequency();
+ ret->error = MVE_BASE_ERROR_NONE;
+ break;
+ }
+ case MVE_BASE_READ_HW_INFO:
+ {
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ ret->data = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(struct mve_base_hw_info), GFP_KERNEL);
+ if (NULL == ret->data)
+ {
+ ret->error = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ MVE_LOG_ERROR,
+ session,
+ "Failed to allocate memory for the ioctl return struct's data member.");
+ }
+ else
+ {
+ struct mve_base_hw_info *hw_info;
+ hw_info = (struct mve_base_hw_info *)ret->data;
+ ret->size = sizeof(struct mve_base_hw_info);
+ hw_info->fuse = mver_reg_get_fuse();
+ hw_info->version = mver_reg_get_version();
+ hw_info->ncores = mver_scheduler_get_ncores();
+ }
+ break;
+ }
+ case MVE_BASE_RPC_MEM_ALLOC:
+ {
+#ifndef EMULATOR
+ int32_t fd;
+
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+
+ fd = *((int32_t *)data);
+ ret->error = mve_session_handle_rpc_mem_alloc(session, fd);
+#endif /* EMULATOR */
+ break;
+ }
+ case MVE_BASE_RPC_MEM_RESIZE:
+ {
+#ifndef EMULATOR
+ int32_t fd;
+
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+
+ fd = *((int32_t *)data);
+ ret->error = mve_session_handle_rpc_mem_resize(session, fd);
+#endif /* EMULATOR */
+ break;
+ }
+#ifdef UNIT
+ case MVE_BASE_DEBUG_READ_REGISTER:
+ WARN_ON(header->size != sizeof(uint32_t));
+
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ ret->data = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(uint32_t), GFP_KERNEL);
+ if (NULL == ret->data)
+ {
+ ret->error = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ MVE_LOG_ERROR,
+ session,
+ "Failed to allocate memory for the ioctl return struct's data member.");
+ }
+ else
+ {
+ tCS *regs;
+ uint32_t *base;
+ uint32_t offset;
+ uint32_t value;
+
+ regs = mver_reg_get_coresched_bank();
+ base = (uint32_t *)regs;
+ offset = ((uint32_t *)data)[0] / sizeof(uint32_t);
+ value = mver_reg_read32(base + offset);
+ mver_reg_put_coresched_bank(&regs);
+
+ ret->size = sizeof(uint32_t);
+ *((uint32_t *)ret->data) = value;
+ }
+ break;
+ case MVE_BASE_DEBUG_WRITE_REGISTER:
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+
+ WARN_ON(header->size != sizeof(uint32_t) * 2);
+ {
+ tCS *regs;
+ uint32_t *base;
+ uint32_t offset;
+ uint32_t value;
+
+ regs = mver_reg_get_coresched_bank();
+ base = (uint32_t *)regs;
+ offset = ((uint32_t *)data)[0] / sizeof(uint32_t);
+ value = ((uint32_t *)data)[1];
+ mver_reg_write32(base + offset, value);
+ mver_reg_put_coresched_bank(&regs);
+ }
+ break;
+ case MVE_BASE_DEBUG_INTERRUPT_COUNT:
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+
+ ret->data = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(uint32_t), GFP_KERNEL);
+ if (NULL == ret->data)
+ {
+ ret->error = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ MVE_LOG_ERROR,
+ session,
+ "Failed to allocate memory for the ioctl return struct's data member.");
+ }
+ else
+ {
+ *((uint32_t *)ret->data) = mve_rsrc_data.interrupts;
+ ret->size = sizeof(uint32_t);
+ }
+ break;
+ case MVE_BASE_DEBUG_SEND_COMMAND:
+ {
+ uint32_t *ptr = (uint32_t *)data;
+
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ ret->error = mve_session_enqueue_message(session, ptr[0], ptr[1], &ptr[2]);
+ break;
+ }
+ case MVE_BASE_DEBUG_FIRMWARE_HUNG_SIMULATION:
+ {
+ /* unload the firmware to simulate firmware hung and no longer
+ * response for watchdog ping
+ */
+#ifndef DISABLE_WATCHDOG
+ uint32_t on = *(uint32_t *)data;
+ mve_session_firmware_hung_simulation(session, on);
+#endif
+
+ MVE_LOG_COMMAND(MVE_LOG_DEBUG);
+ break;
+ }
+#endif
+ default:
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ MVE_LOG_ERROR,
+ session,
+ "Invalid command. command=%u.",
+ header->cmd);
+ break;
+ }
+
+ WARN_ON(ret->data != NULL && ret->size == 0);
+
+ if (ret->error != MVE_BASE_ERROR_NONE)
+ {
+ enum mve_rsrc_log_severity severity = MVE_LOG_WARNING;
+
+ if ((ret->error == MVE_BASE_ERROR_TIMEOUT) || (ret->error == MVE_BASE_ERROR_NOT_READY))
+ {
+ severity = MVE_LOG_VERBOSE;
+ }
+
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ severity,
+ session,
+ "Ioctl failed. command=\"%s\", error=%u.",
+ mve_command_to_string(header->cmd),
+ ret->error);
+ }
+
+ return ret;
+}
diff --git a/drivers/video/arm/v5xx/base/mve_command.h b/drivers/video/arm/v5xx/base/mve_command.h
new file mode 100644
index 000000000000..39efdd801a6f
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_command.h
@@ -0,0 +1,50 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_COMMAND_H
+#define MVE_COMMAND_H
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#endif
+
+#include "mve_base.h"
+#include "mve_ioctl.h"
+
+/**
+ * This struct is used to store the result of an executed command.
+ */
+struct mve_response
+{
+ uint32_t error; /**< MVE error code */
+ uint32_t firmware_error; /**< Firmware error code */
+ int size; /**< Size of the supplied data */
+ void *data; /**< Pointer to the data. The client must free this member! */
+};
+
+/**
+ * Execute the command specified by the header and the associated data.
+ * @param header Specifies which command to execute and the amount of data supplied.
+ * @param data Pointer to the data associated with the command.
+ * @param filep struct file * associated with the file descriptor used to communicate
+ * with the driver.
+ * @return Command response.
+ */
+struct mve_response *mve_command_execute(struct mve_base_command_header *header,
+ void *data,
+ struct file *filep);
+
+#endif /* MVE_COMMAND_H */
diff --git a/drivers/video/arm/v5xx/base/mve_driver.c b/drivers/video/arm/v5xx/base/mve_driver.c
new file mode 100644
index 000000000000..f9f69b65f678
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_driver.c
@@ -0,0 +1,288 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+
+#include "mve_driver.h"
+#include "mve_ioctl.h"
+#include "mve_command.h"
+#include "mve_fw.h"
+#include "mve_session.h"
+
+#include "mve_rsrc_log.h"
+#include "mve_rsrc_register.h"
+
+#define MVE_DRIVER_NAME "mv500"
+
+/**
+ * @brief Track information about the driver device.
+ */
+struct mve_device
+{
+ struct cdev cdev; /**< Character-device information for the kernel.*/
+ struct class *mve_class; /**< Class-information for the kernel. */
+ struct device *dev; /**< Pointer to the device struct */
+ dev_t device;
+};
+
+struct device *mve_device;
+
+static long mve_driver_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd)
+ {
+ case MVE_BASE_COMMAND:
+ {
+ struct mve_base_command_header header;
+ void *data = NULL;
+ struct mve_response *result;
+ struct mve_base_response_header tmp;
+ struct mve_base_response_header *dst;
+
+ /* Get command header */
+ if (0 != copy_from_user(&header, (void __user *)arg, sizeof(struct mve_base_command_header)))
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "copy_from_user() failed to get the MVE_COMMAND header.");
+ return -EFAULT;
+ }
+
+ if (0 < header.size)
+ {
+ /* Fetch command data */
+ data = MVE_RSRC_MEM_CACHE_ALLOC(header.size, GFP_KERNEL);
+ if (NULL == data)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Failed to allocate memory.");
+ return -ENOMEM;
+ }
+
+ if (0 != copy_from_user(data,
+ (void __user *)(arg + sizeof(struct mve_base_command_header)),
+ header.size))
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "copy_from_user() failed to get MVE_COMMAND data.");
+ MVE_RSRC_MEM_CACHE_FREE(data, header.size);
+ return -EFAULT;
+ }
+ }
+ /* Process command */
+ result = mve_command_execute(&header, data, file);
+
+ if (NULL != data)
+ {
+ MVE_RSRC_MEM_CACHE_FREE(data, header.size);
+ data = NULL;
+ }
+
+ if (NULL != result)
+ {
+ tmp.error = result->error;
+ tmp.firmware_error = result->firmware_error;
+ tmp.size = result->size;
+ }
+ else
+ {
+ return -ENOMEM;
+ }
+
+ /* Copy response header to userspace */
+ dst = (struct mve_base_response_header *)arg;
+ if (copy_to_user((void __user *)dst, &tmp, sizeof(tmp)))
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "copy_to_user() failed to copy MVE_COMMAND header.");
+ return -EFAULT;
+ }
+
+ if (0 < result->size)
+ {
+ WARN_ON(NULL == result->data);
+
+ /* Copy response data to userspace */
+ if (copy_to_user((void __user *)&dst->data, result->data, result->size))
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "copy_to_user() failed to copy MVE_COMMAND data.");
+ return -EFAULT;
+ }
+
+ MVE_RSRC_MEM_CACHE_FREE(result->data, result->size);
+ result->data = NULL;
+ }
+
+ MVE_RSRC_MEM_CACHE_FREE(result, sizeof(struct mve_response));
+ result = NULL;
+
+ break;
+ }
+ default:
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Unknown ioctl. cmd=%u.", cmd);
+ break;
+ }
+ return 0;
+}
+
+static int mve_driver_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int mve_driver_release(struct inode *inode, struct file *file)
+{
+ /* Cleanup all sessions that have been created using this fd but not yet been destroyed */
+ mve_session_cleanup_client(file);
+
+ return 0;
+}
+
+static unsigned int mve_driver_poll(struct file *filp, struct poll_table_struct *poll_table)
+{
+ return mve_session_poll(filp, poll_table);
+}
+
+static struct file_operations fops =
+{
+ .owner = THIS_MODULE,
+ .open = mve_driver_open,
+ .release = mve_driver_release,
+ .unlocked_ioctl = mve_driver_ioctl,
+#ifdef CONFIG_64BIT
+ .compat_ioctl = mve_driver_ioctl,
+#endif
+ .poll = mve_driver_poll,
+};
+
+static int mve_driver_probe(struct platform_device *pdev)
+{
+ struct mve_device *mdev;
+ int err;
+
+ /* Save pointer to platform device. */
+ mve_device = &pdev->dev;
+
+ /* Allocate MVE device. */
+ mdev = MVE_RSRC_MEM_ZALLOC(sizeof(*mve_device), GFP_KERNEL);
+ memset(mdev, 0, sizeof(*mdev));
+
+ /* Register a range of char device numbers. */
+ err = alloc_chrdev_region(&mdev->device, 0, 1, MVE_DRIVER_NAME);
+
+ /* Initialize our char dev data. */
+ cdev_init(&mdev->cdev, &fops);
+ mdev->cdev.owner = THIS_MODULE;
+
+ /* Register char dev with the kernel */
+ err = cdev_add(&mdev->cdev, mdev->device, 1);
+
+ /* Create class for device driver. */
+ mdev->mve_class = class_create(THIS_MODULE, MVE_DRIVER_NAME);
+
+ /* Create a device node. */
+ mdev->dev = device_create(mdev->mve_class, NULL, mdev->device, NULL, MVE_DRIVER_NAME);
+
+ platform_set_drvdata(pdev, mdev);
+
+ /* Initialize session module. */
+ mve_session_init(&pdev->dev);
+
+ /* Initialize firmware module. */
+ mve_fw_init();
+
+ printk("MVE base driver loaded successfully\n");
+
+ return 0;
+}
+
+static int mve_driver_remove(struct platform_device *pdev)
+{
+ struct mve_device *mdev = platform_get_drvdata(pdev);
+ dev_t dev = MKDEV(MAJOR(mdev->device), 0);
+
+ device_destroy(mdev->mve_class, dev);
+ class_destroy(mdev->mve_class);
+
+ /* Unregister char device. */
+ cdev_del(&mdev->cdev);
+
+ /* Free major. */
+ unregister_chrdev_region(dev, 1);
+
+ /* Deinitialize session. */
+ mve_session_deinit(&pdev->dev);
+
+ /* Free device structure. */
+ MVE_RSRC_MEM_FREE(mdev);
+
+ printk("MVE base driver unloaded successfully\n");
+
+ return 0;
+}
+
+static struct platform_driver mv500_driver =
+{
+ .probe = mve_driver_probe,
+ .remove = mve_driver_remove,
+
+ .driver = {
+ .name = MVE_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct resource mve_resources[] =
+{};
+
+static void mve_device_release(struct device *dev)
+{}
+
+static struct platform_device mve_platform_device =
+{
+ .name = "mv500",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(mve_resources),
+ .resource = mve_resources,
+ .dev = {
+ .platform_data = NULL,
+ .release = mve_device_release,
+ },
+};
+
+static int __init mve_driver_init(void)
+{
+ platform_driver_register(&mv500_driver);
+ platform_device_register(&mve_platform_device);
+
+ return 0;
+}
+
+static void __exit mve_driver_exit(void)
+{
+ platform_driver_unregister(&mv500_driver);
+ platform_device_unregister(&mve_platform_device);
+
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_INFO, "MVE base driver unregistered");
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Mali-V500 video engine driver");
+
+module_init(mve_driver_init);
+module_exit(mve_driver_exit);
diff --git a/drivers/video/arm/v5xx/base/mve_driver.h b/drivers/video/arm/v5xx/base/mve_driver.h
new file mode 100644
index 000000000000..74974ad73405
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_driver.h
@@ -0,0 +1,31 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_DRIVER_H
+#define MVE_DRIVER_H
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/platform_device.h>
+#endif
+
+#define NELEMS(a) (sizeof(a) / sizeof((a)[0]))
+
+/**
+ * Pointer to platform device structure.
+ */
+extern struct device *mve_device;
+
+#endif
diff --git a/drivers/video/arm/v5xx/base/mve_fw.c b/drivers/video/arm/v5xx/base/mve_fw.c
new file mode 100644
index 000000000000..b0eaa2c76590
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_fw.c
@@ -0,0 +1,1011 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+#endif
+
+#include "mve_fw.h"
+#include "mve_driver.h"
+#include "mve_mmu.h"
+#include "mve_rsrc_log.h"
+#include "mve_rsrc_log_ram.h"
+#include "mve_rsrc_mem_frontend.h"
+
+#define MAX_STRINGNAME_SIZE 128
+
+#ifndef CSTD_UNUSED
+#define CSTD_UNUSED(x) ((void)(x))
+#endif
+
+/**
+ * Firmware binary header.
+ */
+struct fw_header
+{
+ /** The RASC instruction for a jump to the "real" firmware (so we
+ * always can start executing from the first address). */
+ uint32_t rasc_jmp;
+
+ /** Host interface protocol version. */
+ uint8_t protocol_minor;
+ uint8_t protocol_major;
+
+ /** Reserved for future use. Always 0. */
+ uint8_t reserved[2];
+
+ /** Human readable codec information. */
+ uint8_t info_string[56];
+
+ /** Part number. */
+ uint8_t part_number[8];
+
+ /** SVN revision */
+ uint8_t svn_revision[8];
+
+ /** Firmware version. */
+ uint8_t version_string[16];
+
+ /** Length of the read-only part of the firmware. */
+ uint32_t text_length;
+
+ /** Start address for BSS segment. This is always page-aligned. */
+ uint32_t bss_start_address;
+
+ /** How to allocate pages for BSS segment is specified by a bitmap.
+ * The encoding of this is as for the "mem_map" in the protocol,
+ * as if you call it with:
+ * mem_map(bss_start_address, bss_bitmap_size, bss_bitmap_size,
+ * bss_bitmap); */
+ uint32_t bss_bitmap_size;
+ /** How to allocate pages for BSS segment is specified by a bitmap.
+ * The encoding of this is as for the "mem_map" in the protocol,
+ * as if you call it with:
+ * mem_map(bss_start_address, bss_bitmap_size, bss_bitmap_size,
+ * bss_bitmap); */
+ uint32_t bss_bitmap[16];
+
+ /** Defines a region of shared pages */
+ uint32_t master_rw_start_address;
+ /** Defines a region of shared pages */
+ uint32_t master_rw_size;
+};
+
+/**
+ * Firmware container. It contains the actual firmware binary and other
+ * data that make the firmware instantiation and removal easier. These
+ * objects are reference counted. All data stored in this structure
+ * can be shared between firmware instances.
+ */
+struct mve_firmware_descriptor
+{
+ uint32_t num_pages; /**< Size of the firmware binary */
+ phys_addr_t *data; /**< Pointer to firmware binary pages */
+
+ mve_mmu_entry_t *mmu_entries; /**< Premade MMU entries for the firmware binary */
+ uint32_t num_text_pages; /**< Number of firmware text pages */
+ uint32_t num_shared_pages; /**< Number of firmware pages that can be shared */
+ uint32_t num_bss_pages; /**< Number of firmware pages that cannot be shared */
+
+ struct mve_base_fw_version fw_version; /**< FW version */
+ struct kref refcount; /**< Reference counter */
+};
+
+/**
+ * Structure used to store firmware instance specific data. Much of the data in
+ * this structure is specific to a certain instance.
+ */
+struct mve_fw_instance
+{
+ char *role; /**< OMX role */
+ int ncores; /**< Number of cores used by this instance */
+ bool secure; /**< Secure firmware instance, l2 page tables created in secure OS */
+ struct mve_firmware_descriptor *desc; /**< Pointer to the firmware descriptor */
+
+ phys_addr_t *shared_pages; /**< Array of shared BSS pages allocated for this instance */
+ uint32_t used_shared_pages; /**< Number of used shared BSS pages */
+
+ phys_addr_t *bss_pages; /**< Array of non-shared BSS pages allocated for this instance */
+ uint32_t used_bss_pages; /**< Number of used non-shared BSS pages */
+ struct mve_base_fw_version fw_version; /**< FW version */
+};
+
+/**
+ * Instances of this structure are used to represent cached firmware instances.
+ */
+struct firmware_cache_entry
+{
+ const char *role; /**< OMX role */
+ const char *filename; /**< Firmware filename */
+ struct mve_firmware_descriptor *desc; /**< Pointer to the cached firmware descriptor. NULL
+ * if no such descriptor exists yet. */
+};
+
+/**
+ * Preinitialized firmware cache. Maps OMX roles to firmware binary files.
+ */
+static struct firmware_cache_entry firmware_cache[] =
+{
+ {
+ "video_decoder.avc", "h264dec.fwb", NULL
+ },
+ {
+ "video_encoder.avc", "h264enc.fwb", NULL
+ },
+ {
+ "video_decoder.hevc", "hevcdec.fwb", NULL
+ },
+ {
+ "video_encoder.hevc", "hevcenc.fwb", NULL
+ },
+ {
+ "video_decoder.h264", "h264dec.fwb", NULL
+ },
+ {
+ "video_decoder.vp8", "vp8dec.fwb", NULL
+ },
+ {
+ "video_encoder.vp8", "vp8enc.fwb", NULL
+ },
+ {
+ "video_decoder.vp9", "vp9dec.fwb", NULL
+ },
+ {
+ "video_encoder.vp9", "vp9enc.fwb", NULL
+ },
+ {
+ "video_decoder.rv", "rvdec.fwb", NULL
+ },
+ {
+ "video_decoder.mpeg2", "mpeg2dec.fwb", NULL
+ },
+ {
+ "video_decoder.mpeg4", "mpeg4dec.fwb", NULL
+ },
+ {
+ "video_decoder.h263", "mpeg4dec.fwb", NULL
+ },
+ {
+ "video_decoder.vc1", "vc1dec.fwb", NULL
+ },
+ {
+ "video_encoder.jpeg", "jpegenc.fwb", NULL
+ },
+ {
+ "video_decoder.jpeg", "jpegdec.fwb", NULL
+ }
+};
+
+/* Mutex used to protect the firmware cache from concurrent access */
+static struct semaphore fw_cache_mutex;
+
+/**
+ * Construct a page entry for the firmware L2 lookup table.
+ * @param page The physical address of the page.
+ * @param type Type of firmware page (text, bss, shared).
+ * @return L2 page entry for the firmare lookup table.
+ */
+static mve_mmu_entry_t construct_fw_l2_entry(phys_addr_t page, uint32_t type)
+{
+ return (mve_mmu_entry_t)(((page >> (MVE_MMU_PAGE_SHIFT - FW_PHYSADDR_SHIFT)) & FW_PHYSADDR_MASK) |
+ (type & FW_PAGETYPE_MASK));
+}
+
+void mve_fw_init(void)
+{
+ static int count = 0;
+
+ if (0 == count)
+ {
+ sema_init(&fw_cache_mutex, 1);
+ count = 1;
+ }
+}
+
+/**
+ * Loads a binary firmware and creates a firmware descriptor. The client
+ * is responsible for freeing the descriptor memory when no longer needed.
+ * @param filename Firmware binary filename.
+ * @return A firmware descriptor on success, NULL on failure.
+ */
+static struct mve_firmware_descriptor *load_firmware(const char *filename)
+{
+ const struct firmware *fw = NULL;
+ struct mve_firmware_descriptor *desc = NULL;
+ uint32_t num_pages = 0;
+ int ret;
+ uint32_t bytes_left;
+ int i;
+
+ desc = MVE_RSRC_MEM_ZALLOC(sizeof(struct mve_firmware_descriptor), GFP_KERNEL);
+ if (NULL == desc)
+ {
+ return NULL;
+ }
+
+ ret = request_firmware(&fw, filename, mve_device);
+ if (0 > ret)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Unable to load firmware file. file=%s.", filename);
+ goto error;
+ }
+
+ /* Allocate memory for the firmware binary */
+ num_pages = (fw->size + MVE_MMU_PAGE_SIZE - 1) >> MVE_MMU_PAGE_SHIFT;
+ desc->data = MVE_RSRC_MEM_ALLOC_PAGES(num_pages);
+ if (NULL == desc->data)
+ {
+ goto error;
+ }
+
+ /* Copy the firmware to the physical pages */
+ bytes_left = fw->size;
+ i = 0;
+ while (0 < bytes_left)
+ {
+ uint32_t size = min(bytes_left, (uint32_t)MVE_MMU_PAGE_SIZE);
+ void *dst = mve_rsrc_mem_cpu_map_page(desc->data[i]);
+
+ if (NULL == dst)
+ {
+ goto error;
+ }
+
+ memcpy(dst, fw->data + i * MVE_MMU_PAGE_SIZE, size);
+ mve_rsrc_mem_cpu_unmap_page(desc->data[i]);
+ mve_rsrc_mem_clean_cache_range(desc->data[i], PAGE_SIZE);
+ i++;
+ bytes_left -= size;
+ }
+
+ desc->num_pages = num_pages;
+ kref_init(&desc->refcount);
+
+ release_firmware(fw);
+
+ return desc;
+
+error:
+ if (NULL != desc)
+ {
+ if (NULL != desc->data)
+ {
+ MVE_RSRC_MEM_FREE_PAGES(desc->data, desc->num_pages);
+ desc->data = NULL;
+ }
+ MVE_RSRC_MEM_FREE(desc);
+ }
+ if (NULL != fw)
+ {
+ release_firmware(fw);
+ }
+
+ return NULL;
+}
+
+/**
+ * Parses the firmware binary header and allocates text pages. This function
+ * also calculates data needed for creating BSS pages later in the firmware
+ * loading process.
+ * @param desc The firmware descriptor.
+ * @return True on success, false on failure.
+ */
+static bool construct_fw_page_table(struct mve_firmware_descriptor *desc)
+{
+ uint32_t i, j;
+ mve_mmu_entry_t *pages = NULL;
+ uint32_t no_text_pages, no_shared_pages = 0, no_bss_pages = 0;
+ struct fw_header *header;
+ bool ret = false;
+
+ /* The first section of the firmware is a struct fw_header entry */
+ header = (struct fw_header *)mve_rsrc_mem_cpu_map_page(desc->data[0]);
+ if (NULL == header)
+ {
+ return false;
+ }
+
+ /* Verify that the firmware fits in one L2 MMU page (i.e. 4 Mbyte). This
+ * is not a hardware but a software restriction. The firmwares are not
+ * anticipated to be larger than 4 Mbyte. */
+ if (header->bss_bitmap_size >= (MVE_MMU_PAGE_SIZE / sizeof(uint32_t)) ||
+ ((header->bss_start_address >> MVE_MMU_PAGE_SHIFT) + header->bss_bitmap_size >=
+ (MVE_MMU_PAGE_SIZE / sizeof(uint32_t))))
+ {
+ WARN_ON(true);
+ goto out;
+ }
+
+ /* Allocate one L2 page table. It will be used to store references
+ * to all text pages (shared with all instances). BSS pages will not be
+ * allocated here but when the firmware is instantiated for a given session.
+ * This is because BSS pages are session private pages. */
+ pages = MVE_RSRC_MEM_ZALLOC(MVE_MMU_PAGE_SIZE, GFP_KERNEL);
+ if (NULL == pages)
+ {
+ goto out;
+ }
+
+ /* Text pages can be shared between all cores and all sessions running the
+ * same firmware. */
+ no_text_pages = (header->text_length + MVE_MMU_PAGE_SIZE - 1) / MVE_MMU_PAGE_SIZE;
+
+ /* Process text pages */
+ for (i = 0; i < no_text_pages; ++i)
+ {
+ /* Do not use the first entry in the L2 lookup table. If the 1st page
+ * was used, then the firmware would have to read address 0x0 (NULL)
+ * to fetch the jump instruction to the start of the firmware. */
+ pages[i + 1] = construct_fw_l2_entry(desc->data[i], FW_PAGETYPE_TEXT);
+ }
+
+ /* Process BSS pages */
+ i = header->bss_start_address >> MVE_MMU_PAGE_SHIFT;
+ for (j = 0; j < header->bss_bitmap_size; j++)
+ {
+ uint32_t word_idx = j >> 5;
+ uint32_t bit_idx = j & 0x1f;
+ uint32_t addr = i << MVE_MMU_PAGE_SHIFT;
+
+ /* Mark this page as either a BSS page or a shared page */
+ if (addr >= header->master_rw_start_address &&
+ addr < header->master_rw_start_address + header->master_rw_size)
+ {
+ /* Shared pages can be shared between all cores running the same session. */
+ pages[i] = construct_fw_l2_entry(0, FW_PAGETYPE_BSS_SHARED);
+ no_shared_pages++;
+ }
+ else if ((header->bss_bitmap[word_idx] & (1 << bit_idx)) != 0)
+ {
+ /* Non-shared BSS pages. These pages need to be allocated for each core
+ * and session. */
+ pages[i] = construct_fw_l2_entry(0, FW_PAGETYPE_BSS);
+ no_bss_pages++;
+ }
+ i++;
+ }
+ /* Fill in book keeping data that is needed when the firmware is to be instantiated */
+ desc->mmu_entries = pages;
+ desc->num_text_pages = no_text_pages;
+ desc->num_shared_pages = no_shared_pages;
+ desc->num_bss_pages = no_bss_pages;
+
+ desc->fw_version.major = header->protocol_major;
+ desc->fw_version.minor = header->protocol_minor;
+
+ if (MVE_FW_PROTOCOL_VERSION_1_0 == desc->fw_version.major - 1)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_INFO, "Detected firmware version v1. major=%u, minor=%u.", desc->fw_version.major, desc->fw_version.minor);
+ }
+ else if (MVE_FW_PROTOCOL_VERSION_2_0 == desc->fw_version.major - 1)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_INFO, "Detected firmware version v2. major=%u, minor=%u.", desc->fw_version.major, desc->fw_version.minor);
+ }
+ else
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Unknown firmware version. major=%u, minor=%u.", desc->fw_version.major, desc->fw_version.minor);
+ desc->fw_version.major = MVE_FW_PROTOCOL_VERSION_UNKNOWN;
+ }
+ ret = true;
+
+out:
+ mve_rsrc_mem_cpu_unmap_page(desc->data[0]);
+
+ return ret;
+}
+
+/**
+ * Release function for a mve_firmware_descriptor. Called by kref_put
+ * when the reference counter reaches zero.
+ * @param ref Pointer to the kref member of the mve_firmware_descriptor structure.
+ */
+static void release_fw_descriptor(struct kref *ref)
+{
+ unsigned int i;
+
+ struct mve_firmware_descriptor *desc = container_of(ref,
+ struct mve_firmware_descriptor,
+ refcount);
+
+ /* Remove the firmware descriptor from the cache */
+ for (i = 0; i < NELEMS(firmware_cache); ++i)
+ {
+ if (desc == firmware_cache[i].desc)
+ {
+ struct firmware_cache_entry *entry = &firmware_cache[i];
+ entry->desc = NULL;
+ break;
+ }
+ }
+
+ MVE_RSRC_MEM_FREE_PAGES(desc->data, desc->num_pages);
+ desc->data = NULL;
+
+ MVE_RSRC_MEM_FREE(desc->mmu_entries);
+ MVE_RSRC_MEM_FREE(desc);
+}
+
+/**
+ * Get the firmware descriptor for a given OMX role. The corresponding
+ * firmware is loaded if it's not present in the firmware cache.
+ * @param role A OMX role string.
+ * @return The firmware descriptor on success, NULL on failure.
+ */
+static struct mve_firmware_descriptor *get_fw_descriptor(const char *role)
+{
+ unsigned int i;
+ struct firmware_cache_entry *entry = NULL;
+ struct mve_firmware_descriptor *ret = NULL;
+
+ /* Find which firmware to load based on the OMX role */
+ for (i = 0; i < NELEMS(firmware_cache); ++i)
+ {
+ if (0 == strncmp(role, firmware_cache[i].role, strlen(firmware_cache[i].role)))
+ {
+ entry = &firmware_cache[i];
+ break;
+ }
+ }
+
+ if (NULL == entry)
+ {
+ /* Unsupported OMX role */
+ goto out;
+ }
+
+ if (NULL == entry->desc)
+ {
+ bool ret;
+
+ /* This firmware has not been loaded previously */
+ entry->desc = load_firmware(entry->filename);
+ if (NULL == entry->desc)
+ {
+ goto out;
+ }
+
+ /* Create all text pages and mark bss pages */
+ ret = construct_fw_page_table(entry->desc);
+ if (false == ret)
+ {
+ kref_put(&entry->desc->refcount, release_fw_descriptor);
+ }
+ }
+ else
+ {
+ /* Firmware has already been loaded. Prevent it from being unloaded
+ * by increasing the reference counter. */
+ kref_get(&entry->desc->refcount);
+ }
+
+ ret = entry->desc;
+out:
+ return ret;
+}
+
+/**
+ * Create and map a consecutive chunk of bss pages.
+ * @param idx Index in the reference page list of the starting position
+ * @param mmu_entries Pointer to a list that maps out the different FW pages. It
+ * tells e.g. where the FW expects BSS pages.
+ * @param dst_page The destination L2 MMU page
+ * @param first_core Indicating whether the function builds L2 entries for the first core
+ * @param inst The firmware instance
+ * @return Index of the next page that isn't part of the consecutive BSS page chunk.
+ */
+static int add_bss_range(int idx,
+ const mve_mmu_entry_t *mmu_entries,
+ phys_addr_t dst_page,
+ bool first_core,
+ struct mve_fw_instance *inst)
+{
+ unsigned int i, j;
+ unsigned int first_bss_idx, last_bss_idx, nof_bss_pages;
+ phys_addr_t *bss_pages = NULL;
+
+ mve_mmu_entry_t ref_line = mmu_entries[idx];
+ uint32_t page_type = FW_GET_TYPE(ref_line);
+ if ((page_type != FW_PAGETYPE_BSS) &&
+ (page_type != FW_PAGETYPE_BSS_SHARED))
+ {
+ /* This is not a BSS page. */
+ return idx;
+ }
+
+ /* Count number of consecutive pages of same type */
+ first_bss_idx = idx;
+ for (i = idx + 1; FW_GET_TYPE(mmu_entries[i]) == page_type; i++)
+ {
+ /* Do nothing. */
+ }
+ last_bss_idx = i - 1;
+ nof_bss_pages = last_bss_idx - first_bss_idx + 1;
+
+ if (true == first_core || FW_PAGETYPE_BSS == page_type)
+ {
+ /* Find free pages from the preallocated pages list */
+ if (FW_PAGETYPE_BSS_SHARED == page_type)
+ {
+ bss_pages = &inst->shared_pages[inst->used_shared_pages];
+ inst->used_shared_pages += nof_bss_pages;
+ }
+ else
+ {
+ bss_pages = &inst->bss_pages[inst->used_bss_pages];
+ inst->used_bss_pages += nof_bss_pages;
+ }
+
+ for (j = first_bss_idx; j <= last_bss_idx; j++)
+ {
+ phys_addr_t page = bss_pages[j - first_bss_idx];
+ mve_mmu_entry_t entry;
+
+ entry = mve_mmu_make_l1l2_entry(ATTRIB_PRIVATE, page, ACCESS_READ_WRITE);
+ mve_rsrc_mem_write32(dst_page + MVE_MMU_PAGE_TABLE_ENTRY_SIZE * j, entry);
+ }
+ }
+
+ return i;
+}
+
+/**
+ * Loads a firmware binary and sets up the session's MMU table.
+ * @param ctx The session's MMU context.
+ * @param role OMX role. This is used to decide which firmware to load.
+ * @param ncores Number of video cores this session will use.
+ * @return A firmware instance on success, NULL on failure.
+ */
+static struct mve_fw_instance *mve_fw_load_insecure(struct mve_mmu_ctx *ctx,
+ const char *role,
+ int ncores)
+{
+ unsigned int i;
+ struct mve_firmware_descriptor *desc = NULL;
+ struct mve_fw_instance *inst = NULL;
+ bool first_core = true;
+ phys_addr_t l2page;
+ unsigned int ncores_setup = 0;
+ int sem_taken = -1;
+
+ enum mve_mem_virt_region_type fw_regions[] =
+ {
+ VIRT_MEM_REGION_FIRMWARE0,
+ VIRT_MEM_REGION_FIRMWARE1,
+ VIRT_MEM_REGION_FIRMWARE2,
+ VIRT_MEM_REGION_FIRMWARE3,
+ VIRT_MEM_REGION_FIRMWARE4,
+ VIRT_MEM_REGION_FIRMWARE5,
+ VIRT_MEM_REGION_FIRMWARE6,
+ VIRT_MEM_REGION_FIRMWARE7,
+ };
+
+ if (NULL == ctx || NULL == role)
+ {
+ return NULL;
+ }
+
+ /* Allocate L2 MMU page */
+ l2page = MVE_RSRC_MEM_ALLOC_PAGE();
+ if (0 == l2page)
+ {
+ return NULL;
+ }
+
+ sem_taken = down_interruptible(&fw_cache_mutex);
+ if (0 != sem_taken)
+ {
+ goto error;
+ }
+
+ desc = get_fw_descriptor(role);
+ if (NULL == desc)
+ {
+ /* Unable to load firmware */
+ goto error;
+ }
+
+ inst = MVE_RSRC_MEM_ZALLOC(sizeof(struct mve_fw_instance), GFP_KERNEL);
+ if (NULL == inst)
+ {
+ goto error;
+ }
+
+ inst->fw_version = desc->fw_version;
+
+ inst->role = MVE_RSRC_MEM_ZALLOC(sizeof(unsigned char) * MAX_STRINGNAME_SIZE, GFP_KERNEL);
+ if (NULL == inst->role)
+ {
+ goto error;
+ }
+ strncpy(inst->role, role, MAX_STRINGNAME_SIZE);
+
+ /* Allocate BSS pages. Since shared BSS pages can be shared between all cores
+ * used in a session, only desc->num_shared_pages are needed. However,
+ * non-shared BSS pages are core private which means that desc->num_bss_pages * ncores
+ * are needed. */
+ inst->shared_pages = MVE_RSRC_MEM_ALLOC_PAGES(desc->num_shared_pages);
+ inst->bss_pages = MVE_RSRC_MEM_ALLOC_PAGES(desc->num_bss_pages * ncores);
+
+ if (NULL == inst->shared_pages || NULL == inst->bss_pages)
+ {
+ goto error;
+ }
+
+ /* Setup the MMU table for each core */
+ for (i = 0; i < (unsigned int)ncores; ++i)
+ {
+ struct mve_mem_virt_region region;
+ bool ret;
+ unsigned int j;
+
+ mve_mem_virt_region_get(desc->fw_version.major - 1, fw_regions[i], &region);
+
+ /* No need to process text pages again since we reuse the previous L2 page */
+ if (true == first_core)
+ {
+ /* Add text pages */
+ for (j = 0; j < (MVE_MMU_PAGE_SIZE / sizeof(mve_mmu_entry_t)); j++)
+ {
+ mve_mmu_entry_t fw_mmu_page_line = desc->mmu_entries[j];
+ if (FW_GET_TYPE(fw_mmu_page_line) == FW_PAGETYPE_TEXT)
+ {
+ phys_addr_t paddr = FW_GET_PHADDR(fw_mmu_page_line);
+ mve_mmu_entry_t entry = mve_mmu_make_l1l2_entry(ATTRIB_PRIVATE, paddr, ACCESS_EXECUTABLE);
+ mve_rsrc_mem_write32(l2page + 4 * j, entry);
+ }
+ }
+ }
+
+ /* Add BSS pages */
+ for (j = 0; j < (MVE_MMU_PAGE_SIZE / sizeof(mve_mmu_entry_t)); j++)
+ {
+ j = add_bss_range(j, desc->mmu_entries, l2page, first_core, inst);
+ if ((unsigned int)-1 == j)
+ {
+ goto error;
+ }
+ }
+
+ /* Merge L2 page to MMU table */
+ ret = mve_mmu_map_pages_merge(ctx,
+ l2page,
+ region.start,
+ MVE_MMU_PAGE_SIZE / sizeof(uint32_t));
+ if (false == ret)
+ {
+ goto error;
+ }
+
+ /* When the first core has been setup, do not process text pages
+ * for the other cores since the text pages can be reused. */
+ first_core = false;
+ ncores_setup++;
+ }
+
+ MVE_RSRC_MEM_FREE_PAGE(l2page);
+
+ inst->ncores = ncores;
+ inst->desc = desc;
+
+ up(&fw_cache_mutex);
+
+ return inst;
+
+error:
+ if (0 != l2page)
+ {
+ MVE_RSRC_MEM_FREE_PAGE(l2page);
+ }
+ if (NULL != desc)
+ {
+ if (NULL != inst)
+ {
+ MVE_RSRC_MEM_FREE_PAGES(inst->shared_pages, desc->num_shared_pages);
+ MVE_RSRC_MEM_FREE_PAGES(inst->bss_pages, desc->num_bss_pages * ncores);
+
+ inst->shared_pages = NULL;
+ inst->bss_pages = NULL;
+
+ if (NULL != inst->role)
+ {
+ MVE_RSRC_MEM_FREE(inst->role);
+ }
+ MVE_RSRC_MEM_FREE(inst);
+ }
+
+ /* Unmapp the firmware from all cores that has already been setup */
+ for (i = 0; i < ncores_setup; ++i)
+ {
+ struct mve_mem_virt_region region;
+
+ mve_mem_virt_region_get(desc->fw_version.major - 1, fw_regions[i], &region);
+ mve_mmu_unmap_pages(ctx, region.start);
+ }
+
+ kref_put(&desc->refcount, release_fw_descriptor);
+ }
+
+ up(&fw_cache_mutex);
+
+ return NULL;
+}
+
+/**
+ * Sets up the session's MMU table from precreated l2pages from Secure OS.
+ * @param ctx The session's MMU context.
+ * @param fw_secure_desc Contains fw version and l2 page tables address for secure sessions.
+ * @param ncores Number of video cores this session will use.
+ * @return A firmware instance on success, NULL on failure.
+ */
+static struct mve_fw_instance *mve_fw_load_secure(struct mve_mmu_ctx *ctx,
+ struct mve_base_fw_secure_descriptor *fw_secure_desc,
+ int ncores)
+{
+ struct mve_fw_instance *inst = NULL;
+ uint32_t i;
+ phys_addr_t l2page;
+ phys_addr_t l2pages = fw_secure_desc->l2pages;
+
+ enum mve_mem_virt_region_type fw_regions[] =
+ {
+ VIRT_MEM_REGION_FIRMWARE0,
+ VIRT_MEM_REGION_FIRMWARE1,
+ VIRT_MEM_REGION_FIRMWARE2,
+ VIRT_MEM_REGION_FIRMWARE3,
+ VIRT_MEM_REGION_FIRMWARE4,
+ VIRT_MEM_REGION_FIRMWARE5,
+ VIRT_MEM_REGION_FIRMWARE6,
+ VIRT_MEM_REGION_FIRMWARE7,
+ };
+
+ if (NULL == ctx)
+ {
+ return NULL;
+ }
+
+ inst = MVE_RSRC_MEM_ZALLOC(sizeof(struct mve_fw_instance), GFP_KERNEL);
+ if (NULL == inst)
+ {
+ goto error;
+ }
+ inst->secure = true;
+ inst->fw_version = fw_secure_desc->fw_version;
+
+ for (i = 0; i < (unsigned int)ncores; ++i)
+ {
+ bool ret;
+ struct mve_mem_virt_region region;
+ mve_mem_virt_region_get(inst->fw_version.major - 1, fw_regions[i], &region);
+
+ l2page = l2pages + (i * MVE_MMU_PAGE_SIZE);
+
+ ret = mve_mmu_map_page_replace(ctx,
+ l2page,
+ region.start);
+ if (false == ret)
+ {
+ goto error;
+ }
+ }
+ inst->ncores = ncores;
+ return inst;
+
+error:
+ if (NULL != inst)
+ {
+ MVE_RSRC_MEM_FREE(inst);
+ }
+ return NULL;
+}
+
+/**
+ * Entry point for loading a firmware binary and setting up the session's MMU table.
+ * @param ctx The session's MMU context.
+ * @param fw_secure_desc Contains fw version and l2 page tables address for secure sessions.
+ * @param role OMX role. This is used to decide which firmware to load.
+ * @param ncores Number of video cores this session will use.
+ * @return A firmware instance on success, NULL on failure.
+ */
+struct mve_fw_instance *mve_fw_load(struct mve_mmu_ctx *ctx,
+ struct mve_base_fw_secure_descriptor *fw_secure_desc,
+ const char *role,
+ int ncores)
+{
+ if (NULL == fw_secure_desc)
+ {
+ return mve_fw_load_insecure(ctx, role, ncores);
+ }
+ else
+ {
+ return mve_fw_load_secure(ctx, fw_secure_desc, ncores);
+ }
+}
+
+bool mve_fw_unload(struct mve_mmu_ctx *ctx, struct mve_fw_instance *inst)
+{
+ int i;
+ int sem_taken = -1;
+
+ enum mve_mem_virt_region_type fw_regions[] =
+ {
+ VIRT_MEM_REGION_FIRMWARE0,
+ VIRT_MEM_REGION_FIRMWARE1,
+ VIRT_MEM_REGION_FIRMWARE2,
+ VIRT_MEM_REGION_FIRMWARE3,
+ VIRT_MEM_REGION_FIRMWARE4,
+ VIRT_MEM_REGION_FIRMWARE5,
+ VIRT_MEM_REGION_FIRMWARE6,
+ VIRT_MEM_REGION_FIRMWARE7,
+ };
+
+ if (NULL == ctx || NULL == inst)
+ {
+ return false;
+ }
+
+ if (false == inst->secure)
+ {
+ sem_taken = down_interruptible(&fw_cache_mutex);
+ /* Continue even in the case the semaphore was not successfully taken */
+
+ /* Unmap all firmware instances */
+ for (i = 0; i < inst->ncores; ++i)
+ {
+ struct mve_mem_virt_region region;
+
+ mve_mem_virt_region_get(inst->desc->fw_version.major - 1, fw_regions[i], &region);
+ mve_mmu_unmap_pages(ctx, region.start);
+ }
+ /* The text pages will be removed when the firmware descriptor is
+ * freed in release_fw_descriptor. */
+ MVE_RSRC_MEM_FREE_PAGES(inst->shared_pages, inst->desc->num_shared_pages);
+ MVE_RSRC_MEM_FREE_PAGES(inst->bss_pages, inst->desc->num_bss_pages * inst->ncores);
+ MVE_RSRC_MEM_FREE(inst->role);
+
+ inst->shared_pages = NULL;
+ inst->bss_pages = NULL;
+ inst->role = NULL;
+
+ kref_put(&inst->desc->refcount, release_fw_descriptor);
+ }
+
+ MVE_RSRC_MEM_FREE(inst);
+
+ if (0 == sem_taken)
+ {
+ up(&fw_cache_mutex);
+ }
+
+ return true;
+}
+
+struct mve_base_fw_version *mve_fw_get_version(struct mve_fw_instance *inst)
+{
+ if (NULL == inst)
+ {
+ return NULL;
+ }
+
+ return &inst->fw_version;
+}
+
+bool mve_fw_secure(struct mve_fw_instance *inst)
+{
+ if (NULL == inst)
+ {
+ return false;
+ }
+
+ return inst->secure;
+}
+
+void mve_fw_log_fw_binary(struct mve_fw_instance *inst, struct mve_session *session)
+{
+ struct fw_header *header;
+ struct
+ {
+ struct mve_log_header header;
+ struct mve_log_fw_binary fw_binary;
+ }
+ message;
+ struct iovec vec[2];
+ struct timespec timespec;
+
+ if (false != inst->secure)
+ {
+ return;
+ }
+
+ /* Map first page containing the firmware binary header. */
+ header = (struct fw_header *)mve_rsrc_mem_cpu_map_page(inst->desc->data[0]);
+ if (NULL == header)
+ {
+ return;
+ }
+
+ getnstimeofday(&timespec);
+
+ message.header.magic = MVE_LOG_MAGIC;
+ message.header.length = 0;
+ message.header.type = MVE_LOG_TYPE_FW_BINARY;
+ message.header.severity = MVE_LOG_INFO;
+ message.header.timestamp.sec = timespec.tv_sec;
+ message.header.timestamp.nsec = timespec.tv_nsec;
+
+ message.fw_binary.length = sizeof(*header);
+ message.fw_binary.session = (uintptr_t)session;
+
+ vec[0].iov_base = &message;
+ vec[0].iov_len = sizeof(message);
+
+ vec[1].iov_base = header;
+ vec[1].iov_len = sizeof(*header);
+
+ message.header.length = sizeof(message.fw_binary) + sizeof(*header);
+
+ MVE_LOG_DATA(&mve_rsrc_log_fwif, MVE_LOG_WARNING, vec, 2);
+
+ /* Unmap first page. */
+ mve_rsrc_mem_cpu_unmap_page(inst->desc->data[0]);
+}
+
+#ifdef UNIT
+
+void mve_fw_debug_get_info(struct mve_fw_instance *inst,
+ mve_mmu_entry_t **mmu_entries,
+ uint32_t *no_text_pages,
+ uint32_t *no_shared_pages,
+ uint32_t *no_bss_pages)
+{
+ struct mve_firmware_descriptor *desc;
+ int sem_taken;
+
+ if (NULL == inst)
+ {
+ return;
+ }
+
+ sem_taken = down_interruptible(&fw_cache_mutex);
+ if (0 != sem_taken)
+ {
+ return;
+ }
+
+ desc = get_fw_descriptor(inst->role);
+ if (NULL == desc)
+ {
+ *mmu_entries = NULL;
+ *no_text_pages = 0;
+ *no_shared_pages = 0;
+ *no_bss_pages = 0;
+
+ up(&fw_cache_mutex);
+ return;
+ }
+
+ *mmu_entries = desc->mmu_entries;
+ *no_text_pages = desc->num_text_pages;
+ *no_shared_pages = desc->num_shared_pages;
+ *no_bss_pages = desc->num_bss_pages;
+
+ kref_put(&desc->refcount, release_fw_descriptor);
+ up(&fw_cache_mutex);
+}
+
+#endif
diff --git a/drivers/video/arm/v5xx/base/mve_fw.h b/drivers/video/arm/v5xx/base/mve_fw.h
new file mode 100644
index 000000000000..27729e3e3497
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_fw.h
@@ -0,0 +1,116 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_FW_H
+#define MVE_FW_H
+
+#include "mve_mmu.h"
+
+struct mve_fw_instance;
+struct mve_session;
+
+#define FW_PAGETYPE_INVALID 0 /* no page */
+#define FW_PAGETYPE_TEXT 1 /* TEXT page, shared between sessions. Page is allocated */
+#define FW_PAGETYPE_BSS 2 /* BSS page, separate per core in same session */
+#define FW_PAGETYPE_BSS_SHARED 3 /* BSS page, shared across cores in same session */
+
+#define FW_PHYSADDR_MASK 0xfffffffc
+#define FW_PHYSADDR_SHIFT 2
+#define FW_PAGETYPE_MASK 0x3
+#define FW_PAGETYPE_SHIFT 0
+
+/* Get page type from FW MMU page entry */
+#define FW_GET_TYPE(line) ((line) & FW_PAGETYPE_MASK)
+/* Get page address from FW MMU page entry */
+#define FW_GET_PHADDR(line) ((((phys_addr_t)line) & FW_PHYSADDR_MASK) << (MVE_MMU_PAGE_SHIFT - FW_PHYSADDR_SHIFT))
+
+/* These constants are used to identify the firmware host interface protocol version */
+enum mve_fw_protocol_version
+{
+ MVE_FW_PROTOCOL_VERSION_1_0 = 0, /**< Protocol version v1 (Mali-V550 EAC) */
+ MVE_FW_PROTOCOL_VERSION_2_0 = 1, /**< Protocol version v2 */
+ MVE_FW_PROTOCOL_VERSION_UNKNOWN = 255 /**< Signals unknown protocol version or error reading
+ * protocol version */
+};
+
+/**
+ * Initializes the firmware cache subsystem. This function must be called
+ * before any firmware binaries can be loaded.
+ */
+void mve_fw_init(void);
+
+/**
+ * Loads the firmware necessary to support the supplied OMX role. Maps the firmware into
+ * the correct regions of the MMU table.
+ * @param ctx The MMU context.
+ * @param fw_secure_desc Pre-created l2 pagetables for secure video playback
+ * @param role The OMX role the firmware must support.
+ * @param ncores Number of firmware instances that shall be mapped into the MVE address space.
+ * @return Firmware identifier on success, NULL on failure.
+ */
+struct mve_fw_instance *mve_fw_load(struct mve_mmu_ctx *ctx,
+ struct mve_base_fw_secure_descriptor *fw_secure_desc,
+ const char *role,
+ int ncores);
+
+/**
+ * Unload firmware, unmap the firmware pages from the MMU tables and free
+ * all unused resources. The firmware is removed from the cache if no other
+ * running session uses this firmware.
+ * @param ctx The MMU context.
+ * @param inst The firmware instance to unload.
+ * @return True on success, false if an error occurred and no firmware was unloaded.
+ */
+bool mve_fw_unload(struct mve_mmu_ctx *ctx, struct mve_fw_instance *inst);
+
+/**
+ * Returns the host interface protocol version expected by the loaded firmware instance.
+ * @param inst The firmware instance.
+ * @return Pointer to a mve_fw_version structure data.
+ */
+struct mve_base_fw_version *mve_fw_get_version(struct mve_fw_instance *inst);
+
+/**
+ * Returns secure attribute of the loaded firmware instance.
+ * @param inst The firmware instance.
+ * @return true if secure firmware loaded, false otherwise.
+ */
+bool mve_fw_secure(struct mve_fw_instance *inst);
+
+/**
+ * Log the firmware binary header.
+ * @param inst The firmware instance.
+ * @param session Pointer to session.
+ */
+void mve_fw_log_fw_binary(struct mve_fw_instance *inst, struct mve_session *session);
+
+#ifdef UNIT
+
+/**
+ * Returns FW page setup data. Only used by the unit tests to verify FW loading.
+ * @param inst The FW instance that data should be queried from.
+ * @param mmu_entries [out] List of text pages and markers for shared and non shared BSS pages.
+ * @param no_text_pages [out] Number of text pages.
+ * @param no_shared_pages [out] Number of shared BSS pages.
+ * @param no_bss_pages [out] Number of non shared BSS pages.
+ */
+void mve_fw_debug_get_info(struct mve_fw_instance *inst,
+ mve_mmu_entry_t **mmu_entries,
+ uint32_t *no_text_pages,
+ uint32_t *no_shared_pages,
+ uint32_t *no_bss_pages);
+
+#endif
+
+#endif /* MVE_FW_H */
diff --git a/drivers/video/arm/v5xx/base/mve_ioctl.h b/drivers/video/arm/v5xx/base/mve_ioctl.h
new file mode 100644
index 000000000000..af5faab29c9f
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_ioctl.h
@@ -0,0 +1,26 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_IOCTL_H
+#define MVE_IOCTL_H
+
+#ifdef EMULATOR
+#else
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#endif
+
+typedef uint32_t session_id;
+
+#endif /* MVE_IOCTL_H */
diff --git a/drivers/video/arm/v5xx/base/mve_mem_region.c b/drivers/video/arm/v5xx/base/mve_mem_region.c
new file mode 100644
index 000000000000..4530f38809f3
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_mem_region.c
@@ -0,0 +1,134 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <asm/bug.h>
+#endif
+
+#include "mve_mem_region.h"
+#include "mve_fw.h"
+
+static struct mve_mem_virt_region mem_regions_prot_v1[] =
+{
+ { /* VIRT_MEM_REGION_FIRMWARE0 */
+ 0x00000000,
+ 0x10000000
+ },
+ { /* VIRT_MEM_REGION_MSG_IN_QUEUE */
+ 0x10079000,
+ 0x1007A000
+ },
+ { /* VIRT_MEM_REGION_MSG_OUT_QUEUE */
+ 0x1007A000,
+ 0x1007B000
+ },
+ { /* VIRT_MEM_REGION_INPUT_BUFFER_IN */
+ 0x1007B000,
+ 0x1007C000
+ },
+ { /* VIRT_MEM_REGION_INPUT_BUFFER_OUT */
+ 0x1007C000,
+ 0x1007D000
+ },
+ { /* VIRT_MEM_REGION_OUTPUT_BUFFER_IN */
+ 0x1007D000,
+ 0x1007E000
+ },
+ { /* VIRT_MEM_REGION_OUTPUT_BUFFER_OUT */
+ 0x1007E000,
+ 0x1007F000
+ },
+ { /* VIRT_MEM_REGION_RPC_QUEUE */
+ 0x1007F000,
+ 0x10080000
+ },
+ { /* VIRT_MEM_REGION_PROTECTED */
+ 0x20000000,
+ 0x50000000,
+ },
+ { /* VIRT_MEM_REGION_OUT_BUF */
+ 0x50000000,
+ 0x80000000,
+ },
+ { /* VIRT_MEM_REGION_FIRMWARE1 */
+ 0x80000000,
+ 0x90000000,
+ },
+ { /* VIRT_MEM_REGION_FIRMWARE2 */
+ 0x90000000,
+ 0xA0000000,
+ },
+ { /* VIRT_MEM_REGION_FIRMWARE3 */
+ 0xA0000000,
+ 0xB0000000,
+ },
+ { /* VIRT_MEM_REGION_FIRMWARE4 */
+ 0xB0000000,
+ 0xC0000000,
+ },
+ { /* VIRT_MEM_REGION_FIRMWARE5 */
+ 0xC0000000,
+ 0xD0000000,
+ },
+ { /* VIRT_MEM_REGION_FIRMWARE6 */
+ 0xD0000000,
+ 0xE0000000,
+ },
+ { /* VIRT_MEM_REGION_FIRMWARE7 */
+ 0xE0000000,
+ 0xF0000000,
+ },
+ { /* VIRT_MEM_REGION_REGS */
+ 0xF0000000,
+ 0xFFFFFFFF,
+ }
+};
+
+void mve_mem_virt_region_get(int fw_prot,
+ enum mve_mem_virt_region_type type,
+ struct mve_mem_virt_region *region)
+{
+ if (NULL != region)
+ {
+ /* Apparently gcc implemented the enum as an unsigned, and disliked
+ * what it considers to be an always-true comparision */
+ if (/*VIRT_MEM_REGION_FIRMWARE0 <= type &&*/ VIRT_MEM_REGION_COUNT > type)
+ {
+ *region = mem_regions_prot_v1[type];
+ }
+ else
+ {
+ WARN_ON(true);
+ region->start = 0xFFFFFFFF;
+ region->end = 0xFFFFFFFF;
+ }
+ }
+}
+
+enum mve_mem_virt_region_type mve_mem_virt_region_type_get(int fw_prot,
+ mve_addr_t mve_addr)
+{
+ enum mve_mem_virt_region_type i;
+ for (i = VIRT_MEM_REGION_FIRST; i < VIRT_MEM_REGION_COUNT; i++)
+ {
+ if (mve_addr >= mem_regions_prot_v1[i].start &&
+ mve_addr < mem_regions_prot_v1[i].end)
+ {
+ return i;
+ }
+ }
+ return VIRT_MEM_REGION_COUNT;
+}
diff --git a/drivers/video/arm/v5xx/base/mve_mem_region.h b/drivers/video/arm/v5xx/base/mve_mem_region.h
new file mode 100644
index 000000000000..1eb878f94d6a
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_mem_region.h
@@ -0,0 +1,86 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_MEM_REGION_H
+#define MVE_MEM_REGION_H
+
+#ifndef __KERNEL__
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#endif
+
+typedef uint32_t mve_addr_t;
+
+/**
+ * Struct defining a MVE virtual memory region.
+ */
+struct mve_mem_virt_region
+{
+ mve_addr_t start; /**< Virtual start address of the region */
+ mve_addr_t end; /**< Virtual end address of the region */
+};
+
+/**
+ * This enum defines the different MVE virtual memory regions.
+ */
+enum mve_mem_virt_region_type
+{
+ VIRT_MEM_REGION_FIRST, /**< Region start, Not an actual memory region */
+ VIRT_MEM_REGION_FIRMWARE0 = VIRT_MEM_REGION_FIRST,
+
+ VIRT_MEM_REGION_MSG_IN_QUEUE,
+ VIRT_MEM_REGION_MSG_OUT_QUEUE,
+ VIRT_MEM_REGION_INPUT_BUFFER_IN,
+ VIRT_MEM_REGION_INPUT_BUFFER_OUT,
+ VIRT_MEM_REGION_OUTPUT_BUFFER_IN,
+ VIRT_MEM_REGION_OUTPUT_BUFFER_OUT,
+ VIRT_MEM_REGION_RPC_QUEUE,
+
+ VIRT_MEM_REGION_PROTECTED,
+ VIRT_MEM_REGION_OUT_BUF,
+ VIRT_MEM_REGION_FIRMWARE1,
+ VIRT_MEM_REGION_FIRMWARE2,
+ VIRT_MEM_REGION_FIRMWARE3,
+ VIRT_MEM_REGION_FIRMWARE4,
+ VIRT_MEM_REGION_FIRMWARE5,
+ VIRT_MEM_REGION_FIRMWARE6,
+ VIRT_MEM_REGION_FIRMWARE7,
+ VIRT_MEM_REGION_REGS,
+ VIRT_MEM_REGION_COUNT /**< Region count, Not an actual memory region */
+};
+
+/**
+ * This function is used to query the memory region for a certain region type.
+ * @param fw_prot FW protocol version
+ * @param type The memory region type.
+ * @param[out] region Pointer to the structure that will contain the memory
+ * region information after the call has returned. If the
+ * memory region doesn't exist, the start and end of the
+ * region is set to 0xFFFFFFFF.
+ */
+void mve_mem_virt_region_get(int fw_prot,
+ enum mve_mem_virt_region_type type,
+ struct mve_mem_virt_region *region);
+
+/**
+ * This function is used to query the region type for a certain mve address.
+ * @param fw_prot FW protocol version
+ * @param mve_addr The mve address to check.
+ * @return The memory region type. If mve_addr is not a valid address
+ * region count (VIRT_MEM_REGION_COUNT) is returned.
+ */
+enum mve_mem_virt_region_type mve_mem_virt_region_type_get(int fw_prot,
+ mve_addr_t mve_addr);
+#endif /* MVE_MEM_REGION_H */
diff --git a/drivers/video/arm/v5xx/base/mve_mmu.c b/drivers/video/arm/v5xx/base/mve_mmu.c
new file mode 100644
index 000000000000..79cb170bd582
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_mmu.c
@@ -0,0 +1,1260 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "mveemul.h"
+#include "emulator_userspace.h"
+#else
+#include <linux/slab.h>
+#endif
+
+#include "mve_mmu.h"
+#include "mve_buffer.h"
+#include "mve_rsrc_log.h"
+
+#define NUM_L1_ENTRIES 1024
+#define NUM_L2_ENTRIES 1024
+
+#ifndef CSTD_UNUSED
+#define CSTD_UNUSED(x) ((void)(x))
+#endif
+
+#ifdef _BullseyeCoverage
+ #define BullseyeCoverageOff \
+ # pragma BullseyeCoverage off
+ #define BullseyeCoverageOn \
+ # pragma BullseyeCoverage on
+#else
+ #define BullseyeCoverageOff
+ #define BullseyeCoverageOn
+#endif
+
+enum mapping_type
+{
+ /** Normal page mapping. Do not free pages on unmap or when the MMU context
+ * is destroyed */
+ NORMAL_ALLOCATION = 0,
+ /** Used for external memory mapping, e.g. user-space memory. */
+ EXTERNAL_MAPPING = 1,
+ /** Free memory pages when the region is unmapped. */
+ FREE_ON_UNMAP = 2,
+};
+
+/** @brief Descripe a mapping into the MVE MMU */
+struct mve_mmu_mapping
+{
+ mve_addr_t mve_addr; /**< MVE virtual address */
+ uint32_t num_pages; /**< Number of allocated pages */
+ uint32_t max_pages; /**< Size of the region reserved for this allocation */
+ uint32_t type; /**< Type of allocation */
+ phys_addr_t *pages; /**< Member is set if pages are to be deallocated upon unmap */
+
+ struct list_head list; /**< These entries are stored in a linked list */
+};
+
+/**
+ * Returns the physical address of the L2 line that corresponds to the
+ * supplied address in MVE address space. This function allocates a L2 page
+ * if one doesn't exist.
+ * @param ctx The MMU context.
+ * @param mve_addr Address in MVE address space.
+ * @return The physical address of the L2 line that corresponds to the supplied address.
+ * Returns 0 in case of an error.
+ */
+static phys_addr_t get_l2_line_addr(struct mve_mmu_ctx *ctx, mve_addr_t mve_addr)
+{
+ phys_addr_t l1_entry_addr = mve_mmu_l1_entry_addr_from_mve_addr(ctx->l1_page, mve_addr);
+ mve_mmu_entry_t l1_entry = (mve_mmu_entry_t)mve_rsrc_mem_read32(l1_entry_addr);
+ phys_addr_t l2_page_addr;
+ phys_addr_t l2_entry_addr;
+
+ if (0 == l1_entry)
+ {
+ /* No L2 page has been created for this memory chunk */
+ uint32_t l1_index;
+ phys_addr_t l2_page = MVE_RSRC_MEM_ALLOC_PAGE();
+ if (0 == l2_page)
+ {
+ return 0;
+ }
+ l1_index = mve_mmu_l1_entry_index_from_mve_addr(ctx->l1_page, mve_addr);
+
+ ctx->l1_l2_alloc[l1_index] = ALLOC_NORMAL;
+ l1_entry = mve_mmu_make_l1l2_entry(ATTRIB_PRIVATE, l2_page, ACCESS_READ_ONLY);
+
+ mve_rsrc_mem_write32(l1_entry_addr, l1_entry);
+ mve_rsrc_mem_clean_cache_range(l1_entry_addr, sizeof(mve_mmu_entry_t));
+ }
+
+ l2_page_addr = mve_mmu_entry_get_paddr(l1_entry);
+ l2_entry_addr = mve_mmu_l2_entry_addr_from_mve_addr(l2_page_addr, mve_addr);
+
+ return l2_entry_addr;
+}
+
+struct mve_mmu_ctx *mve_mmu_create_ctx(void)
+{
+ struct mve_mmu_ctx *ctx;
+
+ ctx = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(struct mve_mmu_ctx), GFP_KERNEL);
+ if (NULL == ctx)
+ {
+ return NULL;
+ }
+
+ ctx->l1_page = MVE_RSRC_MEM_ALLOC_PAGE();
+ if (0 == ctx->l1_page)
+ {
+ goto error;
+ }
+
+ ctx->l1_l2_alloc = MVE_RSRC_MEM_VALLOC(sizeof(enum mve_mmu_alloc) * NUM_L1_ENTRIES);
+ if (NULL == ctx->l1_l2_alloc)
+ {
+ goto error;
+ }
+ memset(ctx->l1_l2_alloc, ALLOC_NORMAL, sizeof(enum mve_mmu_alloc) * NUM_L1_ENTRIES);
+
+ /* This page will be used to mark reserved entries of the L2 pages. */
+ ctx->reservation = MVE_RSRC_MEM_ALLOC_PAGE();
+ if (0 == ctx->reservation)
+ {
+ goto error;
+ }
+
+ ctx->mappings = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(struct mve_mmu_mapping), GFP_KERNEL);
+ if (0 == ctx->mappings)
+ {
+ goto error;
+ }
+
+ INIT_LIST_HEAD(&ctx->mappings->list);
+
+#if defined EMULATOR
+ {
+ int ret;
+
+ ctx->mmu_id = ctx->l1_page;
+ ret = mveemul_allocate_memory_map((unsigned int *)&ctx->mmu_id);
+ if (MVEEMUL_RET_OK != ret)
+ {
+ goto error;
+ }
+ }
+#endif
+
+ return ctx;
+
+error:
+ if (0 != ctx->mappings)
+ {
+ MVE_RSRC_MEM_CACHE_FREE(ctx->mappings, sizeof(struct mve_mmu_mapping));
+ }
+ if (0 != ctx->reservation)
+ {
+ MVE_RSRC_MEM_FREE_PAGE(ctx->reservation);
+ }
+ if (0 != ctx->l1_l2_alloc)
+ {
+ MVE_RSRC_MEM_VFREE(ctx->l1_l2_alloc);
+ }
+ if (0 != ctx->l1_page)
+ {
+ MVE_RSRC_MEM_FREE_PAGE(ctx->l1_page);
+ }
+ if (NULL != ctx)
+ {
+ MVE_RSRC_MEM_CACHE_FREE(ctx, sizeof(struct mve_mmu_ctx));
+ }
+
+ return NULL;
+}
+
+#if defined EMULATOR
+BullseyeCoverageOff
+/**
+ * Map a set of pages into the emulators memory space.
+ * @param ctx The MMU context.
+ * @param mve_addr The start address of the mapping in the emulators memory space.
+ * @param pages An array of physical pages.
+ * @param num_pages Number of pages to map.
+ */
+static void emul_map_memory(struct mve_mmu_ctx *ctx,
+ mve_addr_t mve_addr,
+ phys_addr_t *pages,
+ uint32_t num_pages)
+{
+ /* Go through the MMU table within the region defined by
+ * [mve_addr:mve_addr + num_pages * MVE_MMU_PAGE_SIZE] and
+ * register each consecutive memory chunk with mveemul. */
+ mve_addr_t start, end, curr;
+
+ start = mve_addr;
+ curr = start;
+ end = mve_addr + num_pages * MVE_MMU_PAGE_SIZE;
+
+ CSTD_UNUSED(pages);
+
+ while (curr < end)
+ {
+ phys_addr_t l2_line;
+ mve_mmu_entry_t entry;
+
+ /* Skip all initially empty pages */
+ do
+ {
+ l2_line = get_l2_line_addr(ctx, curr);
+ if (0 == l2_line)
+ {
+ /* Couldn't allocated a L2 page. This is an error! */
+ return;
+ }
+ entry = mve_rsrc_mem_read32(l2_line);
+ curr += MVE_MMU_PAGE_SIZE;
+ }
+ while (curr - MVE_MMU_PAGE_SIZE < end && 0 == entry);
+ start = curr - MVE_MMU_PAGE_SIZE;
+
+ /* Find the last page in the region or the first empty page */
+ do
+ {
+ l2_line = get_l2_line_addr(ctx, curr);
+ if (0 == l2_line)
+ {
+ /* Couldn't allocated a L2 page. This is an error! */
+ return;
+ }
+ entry = mve_rsrc_mem_read32(l2_line);
+ curr += MVE_MMU_PAGE_SIZE;
+ }
+ while (curr - MVE_MMU_PAGE_SIZE < end && 0 != entry);
+ curr -= MVE_MMU_PAGE_SIZE;
+
+ /* Add the chunk [start:curr] to mveemul */
+ if (0 < curr - start)
+ {
+ while (start < curr)
+ {
+ mve_addr_t ptr = start + MVE_MMU_PAGE_SIZE;
+ uint32_t i = 1;
+ phys_addr_t addr;
+
+ l2_line = get_l2_line_addr(ctx, start);
+ if (0 == l2_line)
+ {
+ return;
+ }
+ addr = mve_mmu_entry_get_paddr(mve_rsrc_mem_read32(l2_line));
+ void *pstart = mve_rsrc_mem_cpu_map_page(addr);
+ void *pptr;
+
+ do
+ {
+ l2_line = get_l2_line_addr(ctx, ptr);
+ if (0 == l2_line)
+ {
+ return;
+ }
+ mve_mmu_entry_t entry = mve_rsrc_mem_read32(l2_line);
+ pptr = mve_rsrc_mem_cpu_map_page(mve_mmu_entry_get_paddr(entry));
+ /* Unmap entry immediately. We only want to know if the pages are linear
+ * in the virtual address space. */
+ mve_rsrc_mem_cpu_unmap_page(mve_mmu_entry_get_paddr(entry));
+
+ i++;
+ ptr += MVE_MMU_PAGE_SIZE;
+ }
+ while (pstart + (i - 1) * MVE_MMU_PAGE_SIZE == pptr && (ptr - MVE_MMU_PAGE_SIZE) < curr);
+ /* Unmap pstart */
+ mve_rsrc_mem_cpu_unmap_page(addr);
+
+ ptr -= MVE_MMU_PAGE_SIZE;
+
+ l2_line = get_l2_line_addr(ctx, start);
+ if (0 == l2_line)
+ {
+ return;
+ }
+ addr = mve_mmu_entry_get_paddr(mve_rsrc_mem_read32(l2_line));
+ int err = mveemul_add_memory(ctx->mmu_id,
+ mve_rsrc_mem_cpu_map_page(addr),
+ start, ptr - start, ptr - start, false);
+ if (err != MVEEMUL_RET_OK)
+ {
+ /* Exit with a fatal error if mveemul_add_memory fails, as this
+ * indicates that the emulator is in a bad state. */
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "mveemul_add_memory failed. error=%u.", err);
+ }
+ mve_rsrc_mem_cpu_unmap_page(addr);
+ start = ptr;
+ }
+ }
+ }
+}
+
+/**
+ * Unmap a set of pages from the emulators memory space.
+ * @param ctx The MMU context.
+ * @param entry Structure containing information about the pages to unmap.
+ */
+static void emul_unmap_memory(struct mve_mmu_ctx *ctx,
+ struct mve_mmu_mapping *entry)
+{
+ /* Go through the MMU table within the region defined by
+ * [mve_addr:mve_addr + entry->num_pages * MVE_MMU_PAGE_SIZE] and
+ * remove each consecutive memory chunk from mveemul. */
+ mve_addr_t start, end, curr;
+
+ start = entry->mve_addr;
+ curr = start;
+ end = entry->mve_addr + entry->num_pages * MVE_MMU_PAGE_SIZE;
+
+ while (curr < end)
+ {
+ mve_mmu_entry_t l2_entry;
+
+ /* Skip all initial empty pages */
+ do
+ {
+ phys_addr_t l2_line = get_l2_line_addr(ctx, curr);
+ if (0 == l2_line)
+ {
+ return;
+ }
+ l2_entry = mve_rsrc_mem_read32(l2_line);
+ curr += MVE_MMU_PAGE_SIZE;
+ }
+ while (curr - MVE_MMU_PAGE_SIZE < end && 0 == l2_entry);
+ start = curr - MVE_MMU_PAGE_SIZE;
+
+ /* Find the last page in the region or the first empty page */
+ do
+ {
+ phys_addr_t l2_line = get_l2_line_addr(ctx, curr);
+ if (0 == l2_line)
+ {
+ return;
+ }
+ l2_entry = mve_rsrc_mem_read32(l2_line);
+ curr += MVE_MMU_PAGE_SIZE;
+ }
+ while (curr - MVE_MMU_PAGE_SIZE < end && 0 != l2_entry);
+ curr -= MVE_MMU_PAGE_SIZE;
+
+ /* Add the chunk [start:curr] to mveemul */
+ if (0 < curr - start)
+ {
+ int err = mveemul_remove_memory(ctx->mmu_id, start);
+ if (err != MVEEMUL_RET_OK)
+ {
+ /* Exit with a fatal error if mveemul_add_memory fails, as this
+ * indicates that the emulator is in a bad state. */
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "mveemul_remove_memory failed. error=%u.", err);
+ }
+ }
+ }
+}
+BullseyeCoverageOn
+#endif /* #if defined EMULATOR */
+
+/**
+ * Register a page mapping operation with the MMU context. This function saves
+ * all information needed to manage mapped pages. E.g. to later be able to
+ * unmap the pages. This function also registers the mapping with the emulator.
+ * @param ctx The MMU context.
+ * @param mve_addr Start address of the location in MVE address space where the pages are mapped.
+ * @param pages Array of pages to map.
+ * @param num_pages Number of pages to map.
+ * @param max_pages Maximum resize size.
+ * @param type Type of mapping.
+ * @return True on success, false otherwise.
+ */
+static bool add_mapping(struct mve_mmu_ctx *ctx,
+ mve_addr_t mve_addr,
+ phys_addr_t *pages,
+ uint32_t num_pages,
+ uint32_t max_pages,
+ enum mapping_type type)
+{
+ struct mve_mmu_mapping *map;
+
+ map = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(struct mve_mmu_mapping), GFP_KERNEL);
+ if (NULL == map)
+ {
+ return false;
+ }
+
+ map->mve_addr = mve_addr;
+ map->pages = pages;
+ map->num_pages = num_pages;
+ map->max_pages = max_pages;
+ map->type = type;
+ INIT_LIST_HEAD(&map->list);
+
+ list_add(&map->list, &ctx->mappings->list);
+
+#ifdef EMULATOR
+ {
+ emul_map_memory(ctx, mve_addr, pages, num_pages);
+ }
+#endif
+
+ return true;
+}
+
+/**
+ * Remove a mapping from the book keeping structure. Also frees the pages if
+ * the mapping is of type FREE_ON_UNMAP.
+ * @param ctx The MMU context.
+ * @param entry Structure containing information about the pages to unmap.
+ */
+static void free_mapping_entry(struct mve_mmu_ctx *ctx, struct mve_mmu_mapping *entry)
+{
+ list_del(&entry->list);
+
+#ifdef EMULATOR
+ {
+ emul_unmap_memory(ctx, entry);
+ }
+#endif
+
+ if (FREE_ON_UNMAP == entry->type)
+ {
+ MVE_RSRC_MEM_FREE_PAGES(entry->pages, entry->num_pages);
+ entry->pages = NULL;
+ }
+ MVE_RSRC_MEM_CACHE_FREE(entry, sizeof(struct mve_mmu_mapping));
+}
+
+/**
+ * Remove a mapping from the book keeping structure. This function verifies
+ * the existence of a page mapping at the supplied address and calls free_mapping_entry
+ * to do the actual work.
+ * @param ctx The MMU context.
+ * @param mve_addr Start address of the page mapping to unmap.
+ * @return True on success, false otherwise.
+ */
+static bool remove_mapping(struct mve_mmu_ctx *ctx,
+ mve_addr_t mve_addr)
+{
+ struct list_head *pos;
+ struct mve_mmu_mapping *map;
+
+ list_for_each(pos, &(ctx->mappings->list))
+ {
+ map = container_of(pos, struct mve_mmu_mapping, list);
+
+ if (mve_addr == map->mve_addr)
+ {
+ free_mapping_entry(ctx, map);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * Get the book keeping entry for the page mapping starting at the supplied address.
+ * @param ctx The MMU context.
+ * @param mve_addr Start address of the page mapping.
+ * @return The book keeping entry for the page mapping if one exists. NULL if
+ * none existing.
+ */
+static struct mve_mmu_mapping *get_mapping(struct mve_mmu_ctx *ctx,
+ mve_addr_t mve_addr)
+{
+ struct list_head *pos;
+ struct mve_mmu_mapping *map;
+
+ list_for_each(pos, &(ctx->mappings->list))
+ {
+ map = container_of(pos, struct mve_mmu_mapping, list);
+
+ if (mve_addr == map->mve_addr)
+ {
+ return map;
+ }
+ }
+
+ return NULL;
+}
+
+void mve_mmu_destroy_ctx(struct mve_mmu_ctx *ctx)
+{
+ int i;
+ struct list_head *pos, *next;
+
+ if (NULL == ctx)
+ {
+ return;
+ }
+
+ /* Clean up the book keeping structure */
+ list_for_each_safe(pos, next, &(ctx->mappings->list))
+ {
+ struct mve_mmu_mapping *map = container_of(pos, struct mve_mmu_mapping, list);
+
+#ifdef UNIT
+ BullseyeCoverageOff
+ if (NULL == map->pages && 0 < map->num_pages)
+ {
+ /* Only warn for regions that are not supposed to be deallocated
+ * on unmap. */
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "MMU leaked region. addr=0x%X-0x%X.",
+ map->mve_addr,
+ map->mve_addr + map->num_pages * MVE_MMU_PAGE_SIZE);
+ }
+ BullseyeCoverageOn
+#endif
+
+ free_mapping_entry(ctx, map);
+ }
+
+ /* Free all MMU pages and unallocated memory */
+ for (i = 0; i < NUM_L1_ENTRIES; ++i)
+ {
+ mve_mmu_entry_t l1_entry;
+ /* Check if l2 page corresponding to l1 entry is Externally allocated */
+ if (ctx->l1_l2_alloc[i] == ALLOC_EXTERNAL)
+ {
+ continue;
+ }
+
+ l1_entry = mve_rsrc_mem_read32(ctx->l1_page + i * sizeof(mve_mmu_entry_t));
+ if (0 != l1_entry)
+ {
+ phys_addr_t l2_page = mve_mmu_entry_get_paddr(l1_entry);
+ /* Free L2 page */
+ MVE_RSRC_MEM_FREE_PAGE(l2_page);
+ }
+ }
+
+#ifdef EMULATOR
+ {
+ int ret;
+
+ ret = mveemul_remove_memory_map(ctx->mmu_id);
+ if (MVEEMUL_RET_OK != ret)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Unable to remove memory map.");
+ }
+ }
+#endif
+
+ MVE_RSRC_MEM_CACHE_FREE(ctx->mappings, sizeof(struct mve_mmu_mapping));
+
+ MVE_RSRC_MEM_FREE_PAGE(ctx->reservation);
+ MVE_RSRC_MEM_FREE_PAGE(ctx->l1_page);
+ MVE_RSRC_MEM_VFREE(ctx->l1_l2_alloc);
+ MVE_RSRC_MEM_CACHE_FREE(ctx, sizeof(struct mve_mmu_ctx));
+}
+
+bool mve_mmu_map_page_replace(struct mve_mmu_ctx *ctx,
+ phys_addr_t l2_page,
+ mve_addr_t mve_addr)
+{
+ phys_addr_t l1_entry_addr;
+ mve_mmu_entry_t l1_entry;
+ uint32_t l1_index;
+
+ /* Sanity check of the arguments */
+ if (NULL == ctx || 0 == l2_page)
+ {
+ return false;
+ }
+
+ l1_entry_addr = mve_mmu_l1_entry_addr_from_mve_addr(ctx->l1_page, mve_addr);
+ l1_entry = mve_mmu_make_l1l2_entry(ATTRIB_PRIVATE, l2_page, ACCESS_READ_ONLY);
+ l1_index = mve_mmu_l1_entry_index_from_mve_addr(ctx->l1_page, mve_addr);
+
+ ctx->l1_l2_alloc[l1_index] = ALLOC_EXTERNAL;
+ mve_rsrc_mem_write32(l1_entry_addr, l1_entry);
+ mve_rsrc_mem_clean_cache_range(l1_entry_addr, sizeof(mve_mmu_entry_t));
+
+ (void)add_mapping(ctx, mve_addr, NULL, 1, 1, NORMAL_ALLOCATION);
+
+ return true;
+}
+
+bool mve_mmu_map_pages_merge(struct mve_mmu_ctx *ctx,
+ phys_addr_t l2_page,
+ mve_addr_t mve_addr,
+ uint32_t num_pages)
+{
+ mve_addr_t start_addr, curr_addr, end_addr;
+ unsigned int i;
+ uint32_t offset;
+
+ /* Sanity check of the arguments */
+ if (NULL == ctx || 0 == l2_page || 0 == num_pages || NUM_L2_ENTRIES < num_pages)
+ {
+ return false;
+ }
+
+ offset = mve_mmu_get_offset_in_page(mve_addr);
+ if (0 != offset)
+ {
+ /* Not page aligned */
+ return false;
+ }
+
+ if (mve_addr + num_pages * MVE_MMU_PAGE_SIZE < mve_addr)
+ {
+ /* Trying to map past 0xFFFFFFFF */
+ return false;
+ }
+
+ start_addr = mve_addr;
+ end_addr = start_addr + num_pages * MVE_MMU_PAGE_SIZE;
+
+ /* Make sure there is room at the designated place. */
+ for (curr_addr = start_addr; curr_addr < end_addr; curr_addr += MVE_MMU_PAGE_SIZE)
+ {
+ phys_addr_t addr;
+ mve_mmu_entry_t l2_entry;
+
+ addr = get_l2_line_addr(ctx, curr_addr);
+ if (0 == addr)
+ {
+ return false;
+ }
+
+ l2_entry = mve_rsrc_mem_read32(addr);
+ if (0 != l2_entry)
+ {
+ /* Memory region is not free! */
+ return false;
+ }
+ }
+
+ /* Setup the MMU tables */
+ for (i = 0; i < num_pages; ++i)
+ {
+ mve_mmu_entry_t entry;
+ phys_addr_t addr = get_l2_line_addr(ctx, start_addr + i * MVE_MMU_PAGE_SIZE);
+ /* No need to check addr here since it was verified in the for loop above */
+ entry = mve_rsrc_mem_read32(l2_page + i * sizeof(mve_mmu_entry_t));
+ mve_rsrc_mem_write32(addr, entry);
+ mve_rsrc_mem_clean_cache_range(addr, sizeof(mve_mmu_entry_t));
+ }
+
+ (void)add_mapping(ctx, mve_addr, NULL, num_pages, num_pages, NORMAL_ALLOCATION);
+
+ return true;
+}
+
+bool mve_mmu_map_pages(struct mve_mmu_ctx *ctx,
+ phys_addr_t *pages,
+ mve_addr_t mve_addr,
+ uint32_t num_pages,
+ enum mve_mmu_attrib attrib,
+ enum mve_mmu_access access,
+ bool free_pages_on_unmap)
+{
+ unsigned int i;
+ mve_addr_t start_addr, end_addr, curr_addr;
+ uint32_t offset;
+
+ if (NULL == ctx || NULL == pages)
+ {
+ return false;
+ }
+
+ offset = mve_mmu_get_offset_in_page(mve_addr);
+ if (0 != offset)
+ {
+ /* Not page aligned */
+ return false;
+ }
+
+ if ((0xFFFFFFFF - mve_addr) / MVE_MMU_PAGE_SIZE < num_pages)
+ {
+ /* Trying to map past 0xFFFFFFFF */
+ return false;
+ }
+
+ start_addr = mve_addr;
+ end_addr = mve_addr + num_pages * MVE_MMU_PAGE_SIZE;
+
+ /* Make sure there is room at the designated place. */
+ for (curr_addr = start_addr; curr_addr < end_addr; curr_addr += MVE_MMU_PAGE_SIZE)
+ {
+ phys_addr_t addr;
+ mve_mmu_entry_t l2_entry;
+
+ addr = get_l2_line_addr(ctx, curr_addr);
+ if (0 == addr)
+ {
+ /* Failed to allocate L2 page */
+ return false;
+ }
+
+ l2_entry = mve_rsrc_mem_read32(addr);
+ if (0 != l2_entry)
+ {
+ /* Memory region is not free! */
+ return false;
+ }
+ }
+
+ /* Setup the MMU tables */
+ for (i = 0; i < num_pages; ++i)
+ {
+ mve_mmu_entry_t entry = mve_mmu_make_l1l2_entry(attrib, pages[i], access);
+ phys_addr_t addr = get_l2_line_addr(ctx, mve_addr + i * MVE_MMU_PAGE_SIZE);
+ /* No need to check addr here since it was verified in the for loop above */
+ mve_rsrc_mem_write32(addr, entry);
+ mve_rsrc_mem_clean_cache_range(addr, sizeof(mve_mmu_entry_t));
+ }
+
+ if (true == free_pages_on_unmap)
+ {
+ (void)add_mapping(ctx, mve_addr, pages, num_pages, num_pages, FREE_ON_UNMAP);
+ }
+ else
+ {
+ (void)add_mapping(ctx, mve_addr, NULL, num_pages, num_pages, NORMAL_ALLOCATION);
+ }
+
+ return true;
+}
+
+void mve_mmu_unmap_pages(struct mve_mmu_ctx *ctx,
+ mve_addr_t mve_addr)
+{
+ uint32_t i;
+ phys_addr_t addr, start;
+ struct mve_mmu_mapping *map;
+ uint32_t npages_remaining;
+ uint32_t indx;
+ uint32_t max_indx;
+ uint32_t to_clear_current_iteration;
+
+ if (NULL == ctx)
+ {
+ return;
+ }
+
+ /* Find the mapping in the book keeping structure. Unaligned unmaps and
+ * other errors will be handled here. */
+ map = get_mapping(ctx, mve_addr);
+ if (NULL == map)
+ {
+ return;
+ }
+
+ npages_remaining = map->max_pages;
+
+ /* Remove the mapping from the book keeping structure */
+ (void)remove_mapping(ctx, mve_addr);
+
+ /* Find address of the beginning of the map */
+ do
+ {
+ /* Find the l2-entry for the mve virtual address */
+ addr = get_l2_line_addr(ctx, mve_addr);
+ /* No need to check addr here since get_mapping has verified that this
+ * is a valid mapping. */
+
+ /* This address must exist inside a mve l2page, which is page-aligned.
+ * Each entry is of a known size, so we can get the index.
+ */
+ indx = (uint32_t)((addr & (MVE_MMU_PAGE_SIZE - 1)) / sizeof(mve_mmu_entry_t));
+ /* And this is the max index available in an l2 page */
+ max_indx = MVE_MMU_PAGE_SIZE / sizeof(mve_mmu_entry_t);
+
+ /* Figure out how many we should clear from this l2 */
+ to_clear_current_iteration = (max_indx - indx) < npages_remaining ? max_indx - indx : npages_remaining;
+
+ /* Remember where we start so we can flush the cache later */
+ start = addr;
+
+ /* Clear the entries. */
+ for (i = 0; i < to_clear_current_iteration; ++i)
+ {
+ mve_rsrc_mem_write32(addr, 0);
+ addr += sizeof(mve_mmu_entry_t);
+ }
+ /* Flush the cache */
+ mve_rsrc_mem_clean_cache_range(start, to_clear_current_iteration * sizeof(mve_mmu_entry_t));
+
+ /* Update how many pages that remain to unmap */
+ npages_remaining -= to_clear_current_iteration;
+ /* The next mve_addr to work with */
+ mve_addr += to_clear_current_iteration * MVE_MMU_PAGE_SIZE;
+
+ /* Loop around if not done */
+ }
+ while (npages_remaining > 0);
+}
+
+bool mve_mmu_map_pages_and_reserve(struct mve_mmu_ctx *ctx,
+ phys_addr_t *pages,
+ struct mve_mem_virt_region *region,
+ uint32_t num_pages,
+ uint32_t max_pages,
+ uint32_t alignment,
+ enum mve_mmu_attrib attrib,
+ enum mve_mmu_access access,
+ bool free_pages_on_unmap)
+{
+ mve_addr_t start_addr, end_addr, curr_addr;
+ bool space_found;
+ uint32_t i;
+ uint32_t start_offset, end_offset;
+ bool mapping_registered = false;
+ bool ret = true;
+
+ if (NULL == ctx || NULL == region)
+ {
+ return false;
+ }
+
+ if (NULL == pages && 0 != num_pages)
+ {
+ return false;
+ }
+
+ /* Verify that the allocation is sane */
+ if (0 == max_pages || max_pages < num_pages)
+ {
+ return false;
+ }
+
+ /* Verify that the region is valid */
+ if (region->start > region->end || region->end - region->start == 0)
+ {
+ return false;
+ }
+
+ /* Verify that the region is big enough for the allocation */
+ if (region->end - region->start < max_pages * MVE_MMU_PAGE_SIZE)
+ {
+ return false;
+ }
+
+ /* Verify that the region is page aligned */
+ start_offset = mve_mmu_get_offset_in_page(region->start);
+ end_offset = mve_mmu_get_offset_in_page(region->end);
+ if (0 != start_offset ||
+ 0 != end_offset)
+ {
+ return false;
+ }
+
+ if (0 == alignment)
+ {
+ /* No alignment is the same as one byte alignment */
+ alignment = 1;
+ }
+
+ start_addr = region->start;
+ end_addr = region->end;
+
+ curr_addr = (start_addr + alignment - 1) & ~(alignment - 1);
+ /* Attempt to find a free chunk in MVE address space within the supplied region */
+ do
+ {
+ space_found = true;
+ start_addr = curr_addr;
+ for (i = 0; i < max_pages && true == space_found && curr_addr < end_addr; ++i)
+ {
+ phys_addr_t addr = get_l2_line_addr(ctx, curr_addr);
+ if (0 == addr)
+ {
+ /* No L2 page found/allocated => Out of memory! */
+ return false;
+ }
+ else
+ {
+ space_found = mve_rsrc_mem_read32(addr) == 0;
+ }
+
+ curr_addr += MVE_MMU_PAGE_SIZE;
+ }
+
+ if (false == space_found)
+ {
+ /* We didn't find a region of consecutive free MMU entries.
+ * Find the next gap! */
+ curr_addr = (curr_addr + alignment - 1) & ~(alignment - 1);
+
+ while (curr_addr < end_addr)
+ {
+ phys_addr_t addr = get_l2_line_addr(ctx, curr_addr);
+ if (0 != addr)
+ {
+ mve_mmu_entry_t entry = mve_rsrc_mem_read32(addr);
+ if (0 == entry)
+ {
+ break;
+ }
+ }
+
+ curr_addr = (curr_addr + MVE_MMU_PAGE_SIZE + alignment - 1) & ~(alignment - 1);
+ }
+ }
+ }
+ while (false == space_found && curr_addr < end_addr);
+
+ if (false == space_found || i < max_pages)
+ {
+ /* No space found within the memory region. Allocation failed! */
+ return false;
+ }
+
+ /* Region found */
+ for (i = 0; i < max_pages; ++i)
+ {
+ mve_mmu_entry_t entry;
+ phys_addr_t addr = get_l2_line_addr(ctx, start_addr + i * MVE_MMU_PAGE_SIZE);
+ /* No need to check addr since the loop above has verified that there
+ * are L2 pages for every L2 entry needed for this mapping */
+
+ if (i < num_pages)
+ {
+ /* Map this entry to a real page */
+ entry = mve_mmu_make_l1l2_entry(attrib, pages[i], access);
+ }
+ else
+ {
+ /* Map this entry to a reservation page */
+ entry = mve_mmu_make_l1l2_entry(attrib, ctx->reservation, access);
+ }
+
+ mve_rsrc_mem_write32(addr, entry);
+ mve_rsrc_mem_clean_cache_range(addr, sizeof(mve_mmu_entry_t));
+ }
+
+ /* Return the mapped region */
+ region->start = start_addr;
+ region->end = region->start + num_pages * MVE_MMU_PAGE_SIZE;
+ /* Add mapping to book keeping structure */
+ if (true == free_pages_on_unmap)
+ {
+ mapping_registered = add_mapping(ctx, region->start, pages, num_pages,
+ max_pages, FREE_ON_UNMAP);
+ }
+ else
+ {
+ mapping_registered = add_mapping(ctx, region->start, NULL, num_pages,
+ max_pages, NORMAL_ALLOCATION);
+ }
+
+ if (false == mapping_registered)
+ {
+ /* Failed to register the memory mapping in the book keeping data
+ * structure. Unmap the memory and return false. */
+ for (i = 0; i < max_pages; ++i)
+ {
+ phys_addr_t addr = get_l2_line_addr(ctx, region->start + i * MVE_MMU_PAGE_SIZE);
+ /* No need to check addr because the mapping has already been done
+ * successfully and get_l2_line_addr will therefore not fail now */
+ mve_rsrc_mem_write32(addr, 0);
+ mve_rsrc_mem_clean_cache_range(addr, sizeof(mve_mmu_entry_t));
+ }
+
+ ret = false;
+ }
+
+ return ret;
+}
+
+bool mve_mmu_map_info(struct mve_mmu_ctx *ctx,
+ mve_addr_t mve_addr,
+ uint32_t *num_pages,
+ uint32_t *max_pages)
+{
+ struct mve_mmu_mapping *map;
+
+ map = get_mapping(ctx, mve_addr);
+ if (NULL == map)
+ {
+ return false;
+ }
+
+ if (NULL != num_pages)
+ {
+ *num_pages = map->num_pages;
+ }
+ if (NULL != max_pages)
+ {
+ *max_pages = map->max_pages;
+ }
+ return true;
+}
+
+#ifdef EMULATOR
+BullseyeCoverageOff
+bool mve_mmu_map_resize(struct mve_mmu_ctx *ctx,
+ phys_addr_t *new_pages,
+ mve_addr_t mve_addr,
+ uint32_t num_pages,
+ enum mve_mmu_attrib attrib,
+ enum mve_mmu_access access,
+ bool extend)
+{
+ struct mve_mmu_mapping *map;
+
+ CSTD_UNUSED(new_pages);
+ CSTD_UNUSED(attrib);
+ CSTD_UNUSED(access);
+ CSTD_UNUSED(extend);
+
+ if (NULL == ctx)
+ {
+ return false;
+ }
+
+ map = get_mapping(ctx, mve_addr);
+ if (NULL == map)
+ {
+ return false;
+ }
+
+ if (num_pages > map->max_pages)
+ {
+ /* Trying to increase mapping past the maximum size */
+ return false;
+ }
+
+ return true;
+}
+BullseyeCoverageOn
+#else
+bool mve_mmu_map_resize(struct mve_mmu_ctx *ctx,
+ phys_addr_t *new_pages,
+ mve_addr_t mve_addr,
+ uint32_t num_pages,
+ enum mve_mmu_attrib attrib,
+ enum mve_mmu_access access,
+ bool extend)
+{
+ struct mve_mmu_mapping *map;
+ mve_addr_t curr_addr;
+ uint32_t i;
+
+ if (NULL == ctx ||
+ 0 == mve_addr)
+ {
+ return false;
+ }
+
+ if (NULL == new_pages &&
+ false != extend)
+ {
+ return false;
+ }
+
+ map = get_mapping(ctx, mve_addr);
+ if (NULL == map)
+ {
+ return false;
+ }
+
+ if (num_pages > map->max_pages)
+ {
+ /* Trying to increase mapping past the maximum size */
+ return false;
+ }
+
+ if (map->num_pages != num_pages)
+ {
+ bool delete_old_page_array = false;
+ int new_pages_start = 0;
+
+ if (NULL == new_pages)
+ {
+ /* Reuse the old pages. Allocate/free pages as needed */
+ if (map->num_pages > num_pages)
+ {
+ /* Area is shrinking */
+ uint32_t i;
+
+ /* We'll free unused pages so make sure this mapping is marked
+ * as FREE_ON_UNMAP */
+ WARN_ON(map->type != FREE_ON_UNMAP);
+
+ /* The difference is map->num_pages - num_pages. Delete the last
+ * ones in the pagelist */
+ for (i = num_pages; i < map->num_pages; ++i)
+ {
+ MVE_RSRC_MEM_FREE_PAGE(map->pages[i]);
+ map->pages[i] = 0;
+ }
+
+ new_pages = map->pages;
+ }
+ else if (map->num_pages < num_pages)
+ {
+ /* Area is increasing */
+ uint32_t i;
+
+ /* Warn if the array and pages are not marked for deallocation
+ * upon unmap since this will result in a memory leak. */
+ WARN_ON(map->type != FREE_ON_UNMAP);
+
+ /* Area is increasing, by num_pages - map->num_pages */
+ new_pages = vmalloc(sizeof(phys_addr_t) * num_pages);
+
+ /* Copy the old pages */
+ memcpy(new_pages, map->pages, sizeof(phys_addr_t) * map->num_pages);
+
+ for (i = map->num_pages; i < num_pages; ++i)
+ {
+ new_pages[i] = MVE_RSRC_MEM_ALLOC_PAGE();
+ }
+
+ delete_old_page_array = true;
+ }
+ }
+ else
+ {
+ if (FREE_ON_UNMAP == map->type)
+ {
+ /* Resize with a new page array and FREE_ON_UNMAP is not supported.
+ * The reason is that the code currently doesn't support detecting
+ * whether individual pages are reused or not. Currently there are
+ * no use cases for this so this feature is unimplemented for now. */
+ WARN_ON(true);
+ return false;
+ }
+ }
+
+ curr_addr = mve_addr;
+ if (false != extend)
+ {
+ new_pages_start = map->num_pages;
+ }
+ for (i = new_pages_start; i < map->max_pages; ++i)
+ {
+ if (i < min(map->num_pages, num_pages) &&
+ map->pages[i] == new_pages[i])
+ {
+ /* This page is being reused */
+ continue;
+ }
+ else if (i > max(map->num_pages, num_pages))
+ {
+ /* The rest of the entries will just be reservation pages. The
+ * MMU table has already been setup for these. */
+ break;
+ }
+ else
+ {
+ phys_addr_t l2_line_addr = get_l2_line_addr(ctx, mve_addr + i * MVE_MMU_PAGE_SIZE);
+ /* No need to check l2_line_addr because this address already
+ * contains an old entry referencing either a page or a reservation
+ * page */
+
+ mve_mmu_entry_t entry = mve_rsrc_mem_read32(l2_line_addr);
+
+ if (i < num_pages)
+ {
+ /* Map this entry to a real page */
+ entry = mve_mmu_make_l1l2_entry(attrib, new_pages[i - new_pages_start], access);
+ }
+ else
+ {
+ /* Map this entry to a reservation page */
+ entry = mve_mmu_make_l1l2_entry(attrib, ctx->reservation, access);
+ }
+
+ mve_rsrc_mem_write32(l2_line_addr, entry);
+ mve_rsrc_mem_clean_cache_range(l2_line_addr, sizeof(mve_mmu_entry_t));
+ }
+ }
+
+ if (FREE_ON_UNMAP == map->type)
+ {
+ if (false != delete_old_page_array)
+ {
+ vfree(map->pages);
+ }
+
+ map->pages = new_pages;
+ }
+ map->num_pages = num_pages;
+ }
+
+ return true;
+}
+#endif
+
+phys_addr_t mve_mmu_get_id(struct mve_mmu_ctx *ctx)
+{
+ if (NULL == ctx)
+ {
+ return 0;
+ }
+
+#ifdef EMULATOR
+ return ctx->mmu_id;
+#else
+ return ctx->l1_page;
+#endif
+}
+
+phys_addr_t mve_mmu_get_l1_page(struct mve_mmu_ctx *ctx)
+{
+ if (NULL == ctx)
+ {
+ return 0;
+ }
+
+ return ctx->l1_page;
+}
+
+bool mve_mmu_read_buffer(struct mve_mmu_ctx *ctx,
+ mve_addr_t src,
+ uint8_t *dst,
+ uint32_t size)
+{
+ while (size > 0)
+ {
+ void *ptr;
+ uint32_t copy_size;
+ mve_mmu_entry_t l2_entry;
+ phys_addr_t paddr;
+ phys_addr_t l2addr = get_l2_line_addr(ctx, src);
+ if (0 == l2addr)
+ {
+ return false;
+ }
+
+ l2_entry = (mve_mmu_entry_t)mve_rsrc_mem_read32(l2addr);
+ paddr = mve_mmu_entry_get_paddr(l2_entry);
+ if (0 == paddr)
+ {
+ return false;
+ }
+
+ copy_size = min(size, MVE_MMU_PAGE_SIZE - (src & (MVE_MMU_PAGE_SIZE - 1)));
+
+ mve_rsrc_mem_invalidate_cache_range(paddr, copy_size);
+ ptr = mve_rsrc_mem_cpu_map_page(paddr);
+ if (NULL != ptr)
+ {
+ memcpy(dst, ptr, copy_size);
+ mve_rsrc_mem_cpu_unmap_page(paddr);
+ }
+
+ src += copy_size;
+ dst += copy_size;
+ size -= copy_size;
+ }
+
+ return true;
+}
diff --git a/drivers/video/arm/v5xx/base/mve_mmu.h b/drivers/video/arm/v5xx/base/mve_mmu.h
new file mode 100644
index 000000000000..42b4ebfd924b
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_mmu.h
@@ -0,0 +1,383 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_MMU_H
+#define MVE_MMU_H
+
+#include "mve_rsrc_mem_frontend.h"
+#include "mve_mem_region.h"
+#include "mve_buffer.h"
+
+/* The following code assumes 4 kB pages and that the MVE uses a 32-bit
+ * virtual address space. */
+
+#define MVE_MMU_PAGE_SHIFT 12
+#define MVE_MMU_PAGE_SIZE (1 << MVE_MMU_PAGE_SHIFT)
+
+#define MVE_MMU_L2_CHUNK_SHIFT 22
+#define MVE_MMU_L2_CHUNK_SIZE (1 << MVE_MMU_L2_CHUNK_SHIFT)
+
+/* Size of the MVE MMU page table entry in bytes */
+#define MVE_MMU_PAGE_TABLE_ENTRY_SIZE 4
+
+enum mve_mmu_attrib
+{
+ ATTRIB_PRIVATE = 0,
+ ATTRIB_REFFRAME = 1,
+ ATTRIB_SHARED_RO = 2,
+ ATTRIB_SHARED_RW = 3
+};
+
+enum mve_mmu_access
+{
+ ACCESS_NO = 0,
+ ACCESS_READ_ONLY = 1,
+ ACCESS_EXECUTABLE = 2,
+ ACCESS_READ_WRITE = 3
+};
+
+enum mve_mmu_alloc
+{
+ ALLOC_NORMAL = 0,
+ ALLOC_EXTERNAL = 1
+};
+
+typedef uint32_t mve_mmu_entry_t;
+
+#define MVE_MMU_PADDR_MASK 0x3FFFFFFCLLU
+#define MVE_MMU_PADDR_SHIFT 2
+#define MVE_MMU_ATTRIBUTE_MASK 0xC0000000LLU
+#define MVE_MMU_ATTRIBUTE_SHIFT 30
+#define MVE_MMU_ACCESS_MASK 0x3
+#define MVE_MMU_ACCESS_SHIFT 0
+
+/**
+ * Extracts the physical address from an L1/L2 page table entry.
+ * @param entry The L1/L2 page table entry.
+ * @return The physical address stored in the page table entry.
+ */
+static inline phys_addr_t mve_mmu_entry_get_paddr(mve_mmu_entry_t entry)
+{
+ phys_addr_t pa_entry = (phys_addr_t)entry;
+ return (((pa_entry & MVE_MMU_PADDR_MASK) >> MVE_MMU_PADDR_SHIFT) << MVE_MMU_PAGE_SHIFT);
+}
+
+/**
+ * Extracts the L1/L2 page table entry attribute field.
+ * @param entry The L1/L2 page table entry.
+ * @return The attribute parameter stored in the page table entry.
+ */
+static inline enum mve_mmu_attrib mve_mmu_entry_get_attribute(mve_mmu_entry_t entry)
+{
+ return ((entry & MVE_MMU_ATTRIBUTE_MASK) >> MVE_MMU_ATTRIBUTE_SHIFT);
+}
+
+/**
+ * Extracts the L1/L2 page table entry access field.
+ * @param entry The L1/L2 page table entry.
+ * @return The access parameter stored in the page table entry.
+ */
+static inline enum mve_mmu_access mve_mmu_entry_get_access(mve_mmu_entry_t entry)
+{
+ return ((entry & MVE_MMU_ACCESS_MASK) >> MVE_MMU_ACCESS_SHIFT);
+}
+
+/**
+ * Calculates the physical address of the L1 page table entry that corresponds
+ * to a given MVE address.
+ * @param l1_page Physical address of the L1 page.
+ * @param mve_addr MVE virtual address.
+ * @return The physical address of the L1 page table entry.
+ */
+static inline phys_addr_t mve_mmu_l1_entry_addr_from_mve_addr(phys_addr_t l1_page,
+ mve_addr_t mve_addr)
+{
+ return l1_page + MVE_MMU_PAGE_TABLE_ENTRY_SIZE * (mve_addr >> MVE_MMU_L2_CHUNK_SHIFT);
+}
+
+/**
+ * Calculates the L1 page entry index that corresponds
+ * to a given MVE address.
+ * @param l1_page Physical address of the L1 page.
+ * @param mve_addr MVE virtual address.
+ * @return The index of the L1 page table entry.
+ */
+static inline uint32_t mve_mmu_l1_entry_index_from_mve_addr(phys_addr_t l1_page,
+ mve_addr_t mve_addr)
+{
+ return (mve_addr >> MVE_MMU_L2_CHUNK_SHIFT);
+}
+
+/**
+ * Calculates the physical address of the L2 page table entry that corresponds
+ * to a given MVE address.
+ * @param l2_page Physical address of the L2 page.
+ * @param mve_addr MVE virtual address.
+ * @return The physical address of the L2 page table entry.
+ */
+static inline phys_addr_t mve_mmu_l2_entry_addr_from_mve_addr(phys_addr_t l2_page,
+ mve_addr_t mve_addr)
+{
+ return l2_page + MVE_MMU_PAGE_TABLE_ENTRY_SIZE *
+ ((mve_addr & (MVE_MMU_L2_CHUNK_SIZE - 1)) >> MVE_MMU_PAGE_SHIFT);
+}
+
+/**
+ * Helper function for creating the MMU page table entry that shall be written
+ * to the MMUCTRL register.
+ * @param attrib MMU page table entry attribute.
+ * @param l1_page_addr Physical address of the L1 page.
+ * @return The L0 page table entry.
+ */
+static inline mve_mmu_entry_t mve_mmu_make_l0_entry(enum mve_mmu_attrib attrib, phys_addr_t l1_page_addr)
+{
+ return ((((mve_mmu_entry_t)attrib) << MVE_MMU_ATTRIBUTE_SHIFT) & MVE_MMU_ATTRIBUTE_MASK) |
+ ((mve_mmu_entry_t)((l1_page_addr >> (MVE_MMU_PAGE_SHIFT - MVE_MMU_PADDR_SHIFT)) & MVE_MMU_PADDR_MASK)) |
+ ACCESS_READ_WRITE;
+}
+
+/**
+ * Helper function for creating a MMU L1/L2 page table entry.
+ * @param attrib MMU page table entry attribute.
+ * @param paddr The physical address of the L2 page in case of a
+ * L1 page table entry or the physical address of a allocated
+ * page in case of a L2 page table entry.
+ * @param access MMU page table entry access attribute.
+ * @return The L1 page table entry.
+ */
+static inline mve_mmu_entry_t mve_mmu_make_l1l2_entry(enum mve_mmu_attrib attrib,
+ phys_addr_t paddr,
+ enum mve_mmu_access access)
+{
+ return ((((mve_mmu_entry_t)attrib) << MVE_MMU_ATTRIBUTE_SHIFT) & MVE_MMU_ATTRIBUTE_MASK) |
+ ((mve_mmu_entry_t)((paddr >> (MVE_MMU_PAGE_SHIFT - MVE_MMU_PADDR_SHIFT)) & MVE_MMU_PADDR_MASK)) |
+ ((((mve_mmu_entry_t)access) << MVE_MMU_ACCESS_SHIFT) & MVE_MMU_ACCESS_MASK);
+}
+
+/**
+ * Calculates the offset of the physical address from the page start.
+ * @param addr The physical address.
+ * @return The offset of the address from the page start.
+ */
+static inline uint32_t mve_mmu_get_offset_in_page(phys_addr_t addr)
+{
+ return (uint32_t)(addr & (MVE_MMU_PAGE_SIZE - 1));
+}
+
+struct mve_mmu_mapping; /* Forward declaration */
+
+/**
+ * Structure defining a MMU context.
+ */
+struct mve_mmu_ctx
+{
+ phys_addr_t l1_page; /**< Start address of the L1 page */
+ enum mve_mmu_alloc *l1_l2_alloc; /**< Allocation type of l2 page for each L1 entry */
+ phys_addr_t reservation; /**< Page used to mark reserved L2 entries */
+ struct mve_mmu_mapping *mappings; /**< Book keeping data on mapped memory */
+
+#ifdef EMULATOR
+ phys_addr_t mmu_id; /**< The emulator creates a MMU ID based on
+ * the l1_page when initialized. The ID is
+ * used as a context identifier. Store the
+ * ID in this member. */
+#endif
+};
+
+/**
+ * Create a MMU context.
+ * @return The created MMU context struct.
+ */
+struct mve_mmu_ctx *mve_mmu_create_ctx(void);
+
+/**
+ * Destroy a MMU context and free all referenced resources.
+ * @param ctx The MMU context to destroy.
+ */
+void mve_mmu_destroy_ctx(struct mve_mmu_ctx *ctx);
+
+/**
+ * Merge a preinitialized L2 page into the page table. Note that the client
+ * is responsible for freeing the supplied l2_page after this function has
+ * returned. The client is responsible for freeing the mapped pages when
+ * they are no longer needed (i.e. after the unmap operation).
+ * @param ctx The MMU context.
+ * @param l2_page The preinitialized L2 page to merge into the page table.
+ * @param mve_addr Where in MVE virtual address space to merge the page.
+ * @param num_pages Number of page entries to merge.
+ * @return True on success, false on failure.
+ */
+bool mve_mmu_map_pages_merge(struct mve_mmu_ctx *ctx,
+ phys_addr_t l2_page,
+ mve_addr_t mve_addr,
+ uint32_t num_pages);
+
+/**
+ * Create L1 page table using l2_page address provided by secure OS/client.
+ * The client is responsible for freeing the mapped pages when
+ * they are no longer needed (i.e. after the unmap operation).
+ * @param ctx The MMU context.
+ * @param l2_page The preinitialized L2 page to add into the page table.
+ * @param mve_addr Where in MVE virtual address space to add the page.
+ * @return True on success, false on failure.
+ */
+bool mve_mmu_map_page_replace(struct mve_mmu_ctx *ctx,
+ phys_addr_t l2_page,
+ mve_addr_t mve_addr);
+
+/**
+ * Maps an array of physical pages into MVE address space. No extra space
+ * reservations are made which means that a future resize will most likely
+ * fail. Use mve_mmu_map_pages_and_reserve in these cases. Note that mve_addr
+ * must be page aligned or this function will fail. If free_pages_on_unmap is true,
+ * the MMU module takes over as owner of both the array and the allocated pages.
+ * The client shall in this case not free any of these resources.
+ * @param ctx The MMU context.
+ * @param pages List of physical pages to map.
+ * @param mve_addr The start address in MVE virtual space where to map the pages.
+ * Must be page aligned.
+ * @param num_pages Number of pages to map.
+ * @param attrib MMU attribute settings.
+ * @param access MMU access settings.
+ * @param free_pages_on_unmap Whether the pages backing this mapping shall
+ * be freed on unmap.
+ * @return True on success, false on failure.
+ */
+bool mve_mmu_map_pages(struct mve_mmu_ctx *ctx,
+ phys_addr_t *pages,
+ mve_addr_t mve_addr,
+ uint32_t num_pages,
+ enum mve_mmu_attrib attrib,
+ enum mve_mmu_access access,
+ bool free_pages_on_unmap);
+
+/**
+ * Remove MVE virtual address space mapping. This function also removes mapping
+ * reservations. Note that this function does not free the unmapped pages unless
+ * they were mapped with free_pages_on_unmap set to true.
+ * @param ctx The MMU context.
+ * @param mve_addr The start address in MVE virtual space of the mapping.
+ */
+void mve_mmu_unmap_pages(struct mve_mmu_ctx *ctx,
+ mve_addr_t mve_addr);
+
+/**
+ * Maps a list of physical pages into MVE address space and reserves pages
+ * for future resize operations. The physical pages will be mapped somewhere
+ * in the region specified by the region parameter and the virtual start
+ * address of the mapping is guaranteed to be aligned according to the alignment
+ * parameter. Note that the region boundaries must be page aligned or the map
+ * will fail. If free_pages_on_unmap is true, the MMU module takes over as owner
+ * of both the array and the allocated pages. The client shall in this case not
+ * free any of these resources.
+ * @param[in] ctx The MMU context.
+ * @param[in] pages List of physical pages to map.
+ * @param[in,out] region Defining the region where the mapping must be
+ * performed. Note that the region must be page
+ * aligned. This parameter will contain the actual
+ * mapping region if the call returns successfully.
+ * @param[in] num_pages Number of physical pages to map.
+ * @param[in] max_pages max_pages - num_pages is the number of pages to
+ * reserve in order to support future resize operations.
+ * @param[in] alignment Aligntment of the mapping. The value must be a power of 2
+ * or 0 which results in 1 byte alignment.
+ * @param[in] attrib MMU attribute settings.
+ * @param[in] access MMU access settings.
+ * @param[in] free_pages_on_unmap Whether the pages backing this mapping shall be
+ * freed on unmap.
+ * @return True on success, false on failure. On success, region will contain
+ * the start and end addresses of the mapping (excluding reserved pages).
+ */
+bool mve_mmu_map_pages_and_reserve(struct mve_mmu_ctx *ctx,
+ phys_addr_t *pages,
+ struct mve_mem_virt_region *region,
+ uint32_t num_pages,
+ uint32_t max_pages,
+ uint32_t alignment,
+ enum mve_mmu_attrib attrib,
+ enum mve_mmu_access access,
+ bool free_pages_on_unmap);
+
+/**
+ * Return info of current mapping if it exist for given mve_addr
+ * @param mve_addr Address in MVE virtual address space to the first page of
+ * the mapping.
+ * @param num_pages Fill with number of physical pages in the map.
+ * @param max_pages Fill with maximum number of physical pages this map has reserved.
+ *
+ * @return True on success, false on failure.
+ */
+bool mve_mmu_map_info(struct mve_mmu_ctx *ctx,
+ mve_addr_t mve_addr,
+ uint32_t *num_pages,
+ uint32_t *max_pages);
+
+/**
+ * Resizes an existing mapping of physical pages in MVE address
+ * space. New pages will be allocated as needed if new_pages is NULL. The resize
+ * operation will fail if num_pages is larger than the value specified for max_pages
+ * when the initial mapping was made. If num_pages is the same as the current mapping
+ * then nothing will happen. You can therefore not use this function to switch
+ * physical pages. This function does not alter the owner property
+ * specified in the initial mapping operation. If a client resizes a mapping
+ * with free_pages_on_unmap set to false, the client is responsible for freeing
+ * the original pages and array after this function returns.
+ * @param ctx The MMU context.
+ * @param new_pages The new pages
+ * @param mve_addr Address in MVE virtual address space to the first page of
+ * the mapping.
+ * @param num_pages Number of physical pages to map.
+ * @param attrib MMU attribute settings.
+ * @param access MMU access settings.
+ * @param extend Extend an old mapping.
+ * only valid if new_pages != NULL and map->type != FREE_ON_UNMAP
+ * @return True on success, false on failure.
+ */
+bool mve_mmu_map_resize(struct mve_mmu_ctx *ctx,
+ phys_addr_t *new_pages,
+ mve_addr_t mve_addr,
+ uint32_t num_pages,
+ enum mve_mmu_attrib attrib,
+ enum mve_mmu_access access,
+ bool extend);
+
+/**
+ * Returns the ID of the L1 page. This value shall be written to the video HW.
+ * @param ctx The MMU context.
+ * @return The ID of the L1 page.
+ */
+phys_addr_t mve_mmu_get_id(struct mve_mmu_ctx *ctx);
+
+/**
+ * Returns the physical address of the L1 page.
+ * @param ctx The MMU context.
+ * @return The physical address of the L1 page or 0 if no page exists.
+ */
+phys_addr_t mve_mmu_get_l1_page(struct mve_mmu_ctx *ctx);
+
+/**
+ * Copy data from a memory region pointed out by a MVE virtual address to
+ * a CPU buffer.
+ * @param ctx The MMU context.
+ * @param src MVE virtual address of the source buffer.
+ * @param dst CPU pointer of the destination buffer.
+ * @param size Number of bytes to copy.
+ * @return True on success, false on failure.
+ */
+bool mve_mmu_read_buffer(struct mve_mmu_ctx *ctx,
+ mve_addr_t src,
+ uint8_t *dst,
+ uint32_t size);
+
+#endif /* MVE_MMU_H */
diff --git a/drivers/video/arm/v5xx/base/mve_queue.c b/drivers/video/arm/v5xx/base/mve_queue.c
new file mode 100644
index 000000000000..2cf493d90def
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_queue.c
@@ -0,0 +1,232 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/slab.h>
+#include <linux/sched.h>
+#endif
+
+#include "mve_base.h"
+#include "mve_queue.h"
+#include "mve_rsrc_mem_frontend.h"
+
+struct mve_queue *mve_queue_create(uint32_t size)
+{
+ struct mve_queue *queue;
+
+ queue = MVE_RSRC_MEM_ZALLOC(sizeof(struct mve_queue), GFP_KERNEL);
+ if (NULL == queue)
+ {
+ goto error;
+ }
+
+ queue->buffer = MVE_RSRC_MEM_VALLOC(size * sizeof(uint32_t));
+ if (NULL == queue->buffer)
+ {
+ goto error;
+ }
+
+ sema_init(&queue->consumer_lock, 1);
+ init_waitqueue_head(&queue->wait_queue);
+
+ queue->buffer_size = size;
+ queue->rpos = 0;
+ queue->wpos = 0;
+ queue->interrupted = false;
+
+ return queue;
+
+error:
+ if (NULL != queue)
+ {
+ if (NULL != queue->buffer)
+ {
+ MVE_RSRC_MEM_VFREE(queue->buffer);
+ }
+
+ MVE_RSRC_MEM_FREE(queue);
+ }
+
+ return NULL;
+}
+
+void mve_queue_destroy(struct mve_queue *queue)
+{
+ if (NULL != queue)
+ {
+ MVE_RSRC_MEM_VFREE(queue->buffer);
+ MVE_RSRC_MEM_FREE(queue);
+ }
+}
+
+/**
+ * Returns the event header for the current event.
+ * @param queue The queue.
+ * @return The event header of the current event.
+ */
+static struct mve_base_event_header get_event_header(struct mve_queue *queue)
+{
+ uint32_t buf_size = queue->buffer_size;
+
+ return *((struct mve_base_event_header *)&queue->buffer[queue->rpos % buf_size]);
+}
+
+void mve_queue_interrupt(struct mve_queue *queue)
+{
+ queue->interrupted = true;
+ wake_up_interruptible(&queue->wait_queue);
+}
+
+bool mve_queue_interrupted(struct mve_queue *queue)
+{
+ return queue->interrupted;
+}
+
+struct mve_base_event_header *mve_queue_wait_for_event(struct mve_queue *queue, uint32_t timeout)
+{
+ struct mve_base_event_header *event = NULL;
+ struct mve_base_event_header header;
+ uint32_t buf_size;
+ uint32_t words;
+ uint32_t i;
+ uint32_t *dst;
+ int ret;
+ int sem_taken;
+
+ if (NULL == queue)
+ {
+ return NULL;
+ }
+
+ if (0 != timeout)
+ {
+ queue->interrupted = false;
+ /* Wait for items to process */
+ ret = wait_event_interruptible_timeout(queue->wait_queue,
+ (0 < (queue->wpos - queue->rpos) || false != queue->interrupted),
+ msecs_to_jiffies(timeout));
+ if (1 > ret)
+ {
+ return NULL;
+ }
+ }
+
+ sem_taken = down_interruptible(&queue->consumer_lock);
+
+ if (0 == queue->wpos - queue->rpos)
+ {
+ goto exit;
+ }
+
+ header = get_event_header(queue);
+ words = (header.size + sizeof(uint32_t) - 1) / sizeof(uint32_t);
+
+ event = mve_queue_create_event(header.code, header.size);
+ if (NULL == event)
+ {
+ /* Failed to allocate memory. Skip this event */
+ goto out;
+ }
+
+ /* Copy data to return buffer */
+ buf_size = queue->buffer_size;
+ dst = (uint32_t *)event->data;
+ for (i = 0; i < words; ++i)
+ {
+ dst[i] = queue->buffer[(sizeof(struct mve_base_event_header) / sizeof(uint32_t) +
+ queue->rpos + i) % buf_size];
+ }
+
+out:
+ queue->rpos += (sizeof(struct mve_base_event_header) + words * sizeof(uint32_t)) /
+ sizeof(uint32_t);
+exit:
+ if (0 == sem_taken)
+ {
+ up(&queue->consumer_lock);
+ }
+
+ return event;
+}
+
+void mve_queue_add_event(struct mve_queue *queue,
+ struct mve_base_event_header *event)
+{
+ struct mve_base_event_header *header;
+ uint32_t buf_size;
+ uint32_t words, i;
+ uint32_t *dst, *src;
+ int sem_taken;
+
+ if (NULL == queue || NULL == event)
+ {
+ return;
+ }
+
+ dst = (uint32_t *)queue->buffer;
+ src = (uint32_t *)event->data;
+
+ words = (event->size + sizeof(uint32_t) - 1) / sizeof(uint32_t);
+
+ sem_taken = down_interruptible(&queue->consumer_lock);
+ if (0 != sem_taken)
+ {
+ /* Drop this event! */
+ return;
+ }
+
+ buf_size = queue->buffer_size;
+ if (queue->wpos + sizeof(struct mve_base_event_header) / sizeof(uint32_t) + words -
+ queue->rpos > buf_size)
+ {
+ /* Drop this event! */
+ /* WARN_ON(true); */
+ up(&queue->consumer_lock);
+ return;
+ }
+
+ header = (struct mve_base_event_header *)(dst + (queue->wpos++ % buf_size));
+ header->code = event->code;
+ header->size = event->size;
+
+ for (i = 0; i < words; ++i)
+ {
+ dst[queue->wpos++ % buf_size] = src[i];
+ }
+
+ up(&queue->consumer_lock);
+ /* Notify listeners that an item has been added to the queue */
+ wake_up_interruptible(&queue->wait_queue);
+}
+
+struct mve_base_event_header *mve_queue_create_event(int code, int size)
+{
+ struct mve_base_event_header *event;
+
+ event = MVE_RSRC_MEM_CACHE_ALLOC(EVENT_SIZE(size), GFP_KERNEL);
+ if (NULL != event)
+ {
+ event->code = code;
+ event->size = size;
+ }
+
+ return event;
+}
+
+bool mve_queue_event_ready(struct mve_queue *queue)
+{
+ return 0 < (queue->wpos - queue->rpos);
+}
diff --git a/drivers/video/arm/v5xx/base/mve_queue.h b/drivers/video/arm/v5xx/base/mve_queue.h
new file mode 100644
index 000000000000..7eb2c41ee967
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_queue.h
@@ -0,0 +1,112 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_QUEUE_H
+#define MVE_QUEUE_H
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/wait.h>
+#endif
+
+#define EVENT_SIZE(_size) \
+ sizeof(struct mve_base_event_header) + \
+ ((_size + sizeof(uint32_t) - 1) / sizeof(uint32_t)) * sizeof(uint32_t)
+
+/**
+ * This struct is used to store events received by the interrupt handler
+ * from the MVE hardware.
+ */
+struct mve_queue
+{
+ uint32_t *buffer; /**< Pointer to a ring buffer */
+ uint32_t buffer_size; /**< Size of the ring buffer in 32-bit words */
+ uint32_t rpos; /**< Tail of the ring buffer */
+ uint32_t wpos; /**< Head of the ring buffer */
+ bool interrupted; /**< Set if wait was interrupted */
+
+ struct semaphore consumer_lock; /**< Used to prevent concurrent modifications to the queue */
+ wait_queue_head_t wait_queue; /**< Indicating whether there are any pending messages or not */
+};
+
+/**
+ * Create a new queue.
+ * @param size Size of the internal ring buffer in 32-bit words.
+ * @returns A mve_queue.
+ */
+struct mve_queue *mve_queue_create(uint32_t size);
+
+/**
+ * Destroy a queue created by mve_queue_create.
+ * @param queue The queue to destroy.
+ */
+void mve_queue_destroy(struct mve_queue *queue);
+
+/**
+ * Wait until there is an unprocessed event in the queue. This function will
+ * return immediately if there is an unprocessed event in the queue. Otherwise
+ * it will wait until an event is added to the queue or the timeout expires.
+ * If the wait is interrupted this function will return false.
+ * @param queue The queue.
+ * @param timeout Timeout in milliseconds.
+ * @return The event or NULL in case of an error.
+ * The client is responsible for freeing the pointer if it's not NULL.
+ */
+struct mve_base_event_header *mve_queue_wait_for_event(struct mve_queue *queue, uint32_t timeout);
+
+/**
+ * Interrupt the wait queue and exit the wait by setting interrupted flag.
+ * @param queue The queue.
+ */
+void mve_queue_interrupt(struct mve_queue *queue);
+
+/**
+ * Helper function which tells if the queue was interrupted in previous wait.
+ * @param queue The queue.
+ * @return true in case wait was interrupted by calling mve_queue_interrupt.
+ * false otherwise.
+ */
+bool mve_queue_interrupted(struct mve_queue *queue);
+
+/**
+ * Add an item to the queue. This function copies the content of the event
+ * to the queue and it's the responsibility of the client to delete the
+ * event object after the call has returned.
+ * @param queue The queue.
+ * @param event The event to enqueue.
+ */
+void mve_queue_add_event(struct mve_queue *queue,
+ struct mve_base_event_header *event);
+
+/**
+ * Helper function that allocates a mve_queue_event and initializes the code
+ * and size members.
+ * @param code The value to assign to the code member.
+ * @param size The value to assign to the size member.
+ * @return A pointer to a mve_queue_event structure or NULL on failure.
+ */
+struct mve_base_event_header *mve_queue_create_event(int code, int size);
+
+/**
+ * Returns true if the queue contains an unprocessed event.
+ * @param queue The queue.
+ * @return True if the queue contains an unpreccessed event. False otherwise.
+ */
+bool mve_queue_event_ready(struct mve_queue *queue);
+
+#endif /* MVE_QUEUE_H */
diff --git a/drivers/video/arm/v5xx/base/mve_session.c b/drivers/video/arm/v5xx/base/mve_session.c
new file mode 100644
index 000000000000..7ecd448a37c8
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_session.c
@@ -0,0 +1,3738 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#include <unistd.h>
+#else
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/semaphore.h>
+#include <linux/debugfs.h>
+#endif
+
+#include "mve_session.h"
+#include "mve_fw.h"
+#include "mve_driver.h"
+#include "mve_com.h"
+#include "mve_mem_region.h"
+#include "mve_session_buffer.h"
+
+#include "mve_rsrc_mem_frontend.h"
+#include "mve_rsrc_mem_backend.h"
+
+#include "mve_rsrc_irq.h"
+#include "mve_rsrc_register.h"
+#include "mve_rsrc_scheduler.h"
+#include "mve_rsrc_driver.h"
+#include "mve_rsrc_pm.h"
+#include "mve_rsrc_log.h"
+
+#include <host_interface_v1/mve_protocol_kernel.h>
+
+/* List containing all allocated, but not freed, sessions */
+static LIST_HEAD(sessions);
+
+/* Semaphore protecting the sessions list */
+static struct semaphore sessions_sem;
+
+/* Number of sessions currently created */
+static int num_sessions;
+
+/* This define controls how many frames each job will process in a multisession
+ * scenario. */
+#define MULTI_SESSION_FRAME_COUNT 1
+
+/* Max string name. */
+#define MAX_STRINGNAME_SIZE 128
+
+/**
+ * Request to fill or empty a buffer.
+ */
+struct session_enqueue_request
+{
+ struct list_head list;
+ struct mve_base_buffer_details param;
+};
+
+#ifndef DISABLE_WATCHDOG
+/* Maximum pings before considering the hardware is dead */
+#define MAX_NUM_PINGS 3
+
+/* Watchdog ping interval in millisecond */
+#define WATCHDOG_PING_INTERVAL 3000
+
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+#ifdef EMULATOR
+extern struct semaphore watchdog_sem;
+#endif
+#endif
+
+/**
+ * @brief Info about the watchdog.
+ */
+struct watchdog_info
+{
+ struct task_struct *watchdog_thread; /**< Pointer to the watchdog thread.*/
+ struct semaphore killswitch; /**< Signal this to immediately terminate
+ * the watchdog thread (up(killswitch)) */
+ atomic_t refcount; /**< Reference-count for the watchdog */
+};
+
+static struct watchdog_info watchdog;
+
+#ifdef UNIT
+static bool watchdog_ignore_pong = false;
+#endif
+
+#endif /* DISABLE_WATCHDOG */
+
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+#define DVFS_DEBUG_MODE 1
+#else
+#define DVFS_DEBUG_MODE 0
+#endif
+
+#if (1 == DVFS_DEBUG_MODE)
+static atomic_t dvfs_b_enable = ATOMIC_INIT(1);
+#endif
+
+#define OMX_BUFFERFLAG_EOS 0x00000001
+
+/**
+ * Checks whether the supplied session is valid or not. Note that this function
+ * is not thread safe.
+ * @param session The session to check validity of.
+ * @return True if the session is valid, false otherwise.
+ */
+static bool is_session_valid(struct mve_session *session)
+{
+ struct list_head *pos;
+
+ if (NULL == session)
+ {
+ return false;
+ }
+
+ list_for_each(pos, &sessions)
+ {
+ struct mve_session *ptr = container_of(pos, struct mve_session, list);
+
+ if (session == ptr)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * Frees all resources allocated for the session. This is an internal
+ * function called by kref_put when the reference counter for a session
+ * reaches 0.
+ * @param ref Pointer to the kref member of the mve_session structure.
+ */
+static void free_session(struct kref *ref)
+{
+ bool ret;
+ struct mve_mem_virt_region region;
+
+ struct mve_session *session = container_of(ref,
+ struct mve_session,
+ refcount);
+
+ /* Unmap all external buffers */
+ mve_session_buffer_unmap_all(session);
+
+ mve_queue_destroy(session->response_events);
+ mve_queue_destroy(session->mve_events);
+
+ mve_mem_virt_region_get(session->prot_version, VIRT_MEM_REGION_MSG_IN_QUEUE, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+
+ mve_mem_virt_region_get(session->prot_version, VIRT_MEM_REGION_MSG_OUT_QUEUE, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+
+ mve_mem_virt_region_get(session->prot_version, VIRT_MEM_REGION_INPUT_BUFFER_IN, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+
+ mve_mem_virt_region_get(session->prot_version, VIRT_MEM_REGION_INPUT_BUFFER_OUT, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+
+ mve_mem_virt_region_get(session->prot_version, VIRT_MEM_REGION_OUTPUT_BUFFER_IN, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+
+ mve_mem_virt_region_get(session->prot_version, VIRT_MEM_REGION_OUTPUT_BUFFER_OUT, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+
+ mve_mem_virt_region_get(session->prot_version, VIRT_MEM_REGION_RPC_QUEUE, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+
+ mve_rsrc_dma_mem_free(session->msg_in_queue);
+ mve_rsrc_dma_mem_free(session->msg_out_queue);
+ mve_rsrc_dma_mem_free(session->buf_input_in);
+ mve_rsrc_dma_mem_free(session->buf_input_out);
+ mve_rsrc_dma_mem_free(session->buf_output_in);
+ mve_rsrc_dma_mem_free(session->buf_output_out);
+ mve_rsrc_dma_mem_free(session->rpc_area);
+
+ MVE_RSRC_MEM_FREE(session->role);
+ ret = mve_fw_unload(session->mmu_ctx, session->fw_inst);
+ if (false == ret)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_session, MVE_LOG_DEBUG, "Firmware unload failed.");
+ }
+ mve_mmu_destroy_ctx(session->mmu_ctx);
+
+ MVE_RSRC_MEM_FREE(session);
+}
+
+/**
+ * Checks whether the supplied reference is a reference to a valid session. If
+ * the session is valid, the session's reference counter is incremented by one.
+ * You must call release_session when the reference to the session is dropped.
+ * @param session The session to check validity of.
+ * @return True if the session is valid, false otherwise.
+ */
+static bool acquire_session(struct mve_session *session)
+{
+ bool is_valid = false;
+ int sem_taken;
+
+ sem_taken = down_interruptible(&sessions_sem);
+
+ is_valid = is_session_valid(session);
+ if (true == is_valid)
+ {
+ kref_get(&session->refcount);
+ }
+
+ if (0 == sem_taken)
+ {
+ up(&sessions_sem);
+ }
+
+ return is_valid;
+}
+
+/**
+ * Release the reference to the session. If the session's reference counter
+ * reaches 0, then the session is freed.
+ * @param session The session to release.
+ */
+static void release_session(struct mve_session *session)
+{
+ kref_put(&session->refcount, free_session);
+}
+
+/**
+ * Acquires the session lock for a valid session. Note that the client has to
+ * release the session's semaphore when the session no longer has to be
+ * protected from concurrent access.
+ * @param session The session to lock.
+ * @return 0 if the semaphore was successfully acquired, non-zero on failure.
+ */
+static int lock_session(struct mve_session *session)
+{
+ return down_interruptible(&session->semaphore);
+}
+
+/**
+ * Unlock the session lock.
+ * @param session The session to unlock.
+ */
+static void unlock_session(struct mve_session *session)
+{
+ up(&session->semaphore);
+}
+
+/**
+ * Convert MVE protocol response codes to event codes.
+ * @param response_code The MVE protocol response code to convert
+ * @returns The correspondig event code
+ */
+static enum mve_base_event_code response_code_to_mve_event(uint32_t response_code)
+{
+ /* Defines copied from firmware interface v2. */
+#define MVE_RESPONSE_CODE_FRAME_ALLOC_PARAM 2014
+#define MVE_RESPONSE_CODE_SEQUENCE_PARAMETERS 2015
+#define MVE_RESPONSE_CODE_REF_FRAME_UNUSED 2018
+#define MVE_BUFFER_CODE_PARAM 3003
+
+ switch (response_code)
+ {
+ case MVE_RESPONSE_CODE_INPUT:
+ return MVE_BASE_EVENT_INPUT;
+ case MVE_RESPONSE_CODE_OUTPUT:
+ return MVE_BASE_EVENT_OUTPUT;
+ case MVE_RESPONSE_CODE_PROCESSED:
+ return MVE_BASE_EVENT_PROCESSED;
+ case MVE_RESPONSE_CODE_EVENT:
+ return MVE_BASE_EVENT_GENERIC;
+ case MVE_RESPONSE_CODE_SWITCHED_OUT:
+ return MVE_BASE_EVENT_SWITCHED_OUT;
+ case MVE_RESPONSE_CODE_SWITCHED_IN:
+ return MVE_BASE_EVENT_SWITCHED_IN;
+ case MVE_RESPONSE_CODE_ERROR:
+ return MVE_BASE_EVENT_ERROR;
+ case MVE_RESPONSE_CODE_PONG:
+ return MVE_BASE_EVENT_PONG;
+ case MVE_RESPONSE_CODE_STATE_CHANGE:
+ return MVE_BASE_EVENT_STATE_CHANGED;
+ case MVE_RESPONSE_CODE_GET_PARAMETER_REPLY:
+ return MVE_BASE_EVENT_GET_PARAMCONFIG;
+ case MVE_RESPONSE_CODE_SET_PARAMETER_REPLY:
+ return MVE_BASE_EVENT_SET_PARAMCONFIG;
+ case MVE_RESPONSE_CODE_GET_CONFIG_REPLY:
+ return MVE_BASE_EVENT_GET_PARAMCONFIG;
+ case MVE_RESPONSE_CODE_SET_CONFIG_REPLY:
+ return MVE_BASE_EVENT_SET_PARAMCONFIG;
+ case MVE_RESPONSE_CODE_INPUT_FLUSHED:
+ return MVE_BASE_EVENT_INPUT_FLUSHED;
+ case MVE_RESPONSE_CODE_OUTPUT_FLUSHED:
+ return MVE_BASE_EVENT_OUTPUT_FLUSHED;
+ case MVE_RESPONSE_CODE_DUMP:
+ return MVE_BASE_EVENT_CODE_DUMP;
+ case MVE_RESPONSE_CODE_JOB_DEQUEUED:
+ return MVE_BASE_EVENT_JOB_DEQUEUED;
+ case MVE_RESPONSE_CODE_IDLE:
+ return MVE_BASE_EVENT_IDLE;
+ case MVE_RESPONSE_CODE_FRAME_ALLOC_PARAM:
+ return MVE_BASE_EVENT_ALLOC_PARAMS;
+ case MVE_RESPONSE_CODE_SEQUENCE_PARAMETERS:
+ return MVE_BASE_EVENT_SEQUENCE_PARAMS;
+ case MVE_BUFFER_CODE_PARAM:
+ return MVE_BASE_EVENT_BUFFER_PARAM;
+ case MVE_RESPONSE_CODE_REF_FRAME_UNUSED:
+ return MVE_BASE_EVENT_REF_FRAME_RELEASED;
+ case MVE_RESPONSE_CODE_TRACE_BUFFERS:
+ return MVE_BASE_EVENT_FW_TRACE_BUFFERS;
+ default:
+ WARN_ON(true);
+ break;
+ }
+
+ return -1;
+}
+
+/**
+ * Returns a string description of the supplied RPC code. Used when
+ * building debug binaries where each received RPC request is printed
+ * to the dmesg.
+ * @param code RPC code.
+ * @return A textual description of the RPC code.
+ */
+static char *print_rpc_code(uint32_t code)
+{
+ switch (code)
+ {
+ case MVE_RPC_FUNCTION_DEBUG_PRINTF:
+ return "MVE_RPC_FUNCTION DEBUG_PRINTF";
+ case MVE_RPC_FUNCTION_MEM_ALLOC:
+ return "MVE_RPC_FUNCTION_MEM_ALLOC";
+ case MVE_RPC_FUNCTION_MEM_RESIZE:
+ return "MVE_RPC_FUNCTION_MEM_RESIZE";
+ case MVE_RPC_FUNCTION_MEM_FREE:
+ return "MVE_RPC_FUNCTION_MEM_FREE";
+ default:
+ return "UNKNOWN RPC";
+ }
+}
+
+/* State machine description table */
+static const bool state_table[7][7] =
+{
+ /* SWITCHED_OUT SWITCHING_IN SWITCHING_OUT WAITING_FOR_RESPONSE ACTIVE_JOB TO / FROM */
+
+ { true, true, false, false, true }, /* SWITCHED_OUT */
+ { false, false, false, true, true }, /* SWITCHING_IN */
+ { true, false, false, false, false }, /* SWITCHING_OUT */
+ { true, false, false, false, true }, /* WAITING_FOR_RESPONSE */
+ { true, false, true, true, true }, /* ACTIVE_JOB */
+};
+
+/* State to string. */
+static const char *state_to_char[] =
+{
+ "Switched out",
+ "Switching in",
+ "Switching out",
+ "Waiting for response",
+ "Active job",
+ "Quick flush"
+};
+
+/**
+ * Change the state of the session to the supplied state. The state is not changed
+ * if the transition is not supported.
+ * @param session The session to change state on.
+ * @param to_state The state to change to.
+ * @return True if the state transition is valid, false otherwise.
+ */
+static bool change_session_state(struct mve_session *session, enum MVE_SESSION_STATE to_state)
+{
+ bool ret;
+
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_DEBUG, session, "Changing state. '%s' -> '%s'.", state_to_char[session->state.state], state_to_char[to_state]);
+
+ ret = state_table[session->state.state][to_state];
+ if (false != ret)
+ {
+ session->state.state = to_state;
+ }
+ else
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_ERROR, session, "Invalid state change. '%s' -> '%s'.", state_to_char[session->state.state], state_to_char[to_state]);
+ }
+
+ return ret;
+}
+
+/**
+ * This function is called when a message response have been received and
+ * that the session state should be changed because of this. Note that
+ * this session assumes that the supplied session is valid!
+ * @param session The session that received the message response.
+ */
+static void response_received(struct mve_session *session)
+{
+ bool res = change_session_state(session, STATE_ACTIVE_JOB);
+ WARN_ON(false == res);
+}
+
+/**
+ * Returns the buffer descriptor (if one exists) for a user allocated buffer
+ * mapping matching the supplied MVE handle.
+ * @param session The session.
+ * @param mve_handle MVE handle of the user allocated buffer.
+ * @return The buffer descriptor if the supplied user allocated buffer has been
+ * mapped. NULL if no such descriptor exists.
+ */
+static struct mve_buffer_client *get_external_descriptor_by_mve_handle(struct mve_session *session,
+ uint16_t mve_handle)
+{
+ struct list_head *pos;
+
+ list_for_each(pos, &session->external_buffers)
+ {
+ struct mve_buffer_client *ptr = container_of(pos, struct mve_buffer_client, register_list);
+
+ if (mve_handle == ptr->mve_handle)
+ {
+ return ptr;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Returns the buffer descriptor (if one exists) for a user allocated buffer mapping
+ * matching the supplied MVE address.
+ * @param session The session.
+ * @param mve_addr The address in MVE virtual address space where the buffer is mapped.
+ * @return The buffer scriptor for the matching buffer is one exists, NULL otherwise.
+ */
+static struct mve_buffer_client *get_external_descriptor_by_mve_address(struct mve_session *session,
+ mve_addr_t mve_addr)
+{
+ struct list_head *pos;
+
+ list_for_each(pos, &session->external_buffers)
+ {
+ struct mve_buffer_client *ptr = container_of(pos, struct mve_buffer_client, register_list);
+
+ if (mve_addr == ptr->buffer->mapping.region.start + ptr->buffer->mapping.offset_in_page)
+ {
+ return ptr;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Enqueues a message in the input queue of the supplied session. This function
+ * assumes that the session is valid and the session mutex has been locked.
+ * @param session The session that wants to enqueue a message.
+ * @param code MVE message code (see the protocol definition).
+ * @param size Size in bytes of the data attached to the message.
+ * @param data Pointer to the attached data.
+ * @return MVE_BASE_ERROR_NONE on success or a suitable OMX error code on failure.
+ */
+static mve_base_error enqueue_message(struct mve_session *session,
+ uint32_t code,
+ uint32_t size,
+ void *data)
+{
+ mve_base_error ret;
+
+ WARN_ON(code > MVE_MESSAGE_CODE_JOB);
+ WARN_ON(size > MVE_MAX_MESSAGE_SIZE_IN_WORDS * sizeof(uint32_t));
+ WARN_ON(size > 0 && NULL == data);
+
+ ret = mve_com_add_message(session, code, size, data);
+
+ return ret;
+}
+
+/**
+ * Called when the state of the HW has changed (MVE_BASE_HW_STATE_STOPPED <-> MVE_BASE_HW_STATE_RUNNING). Note that
+ * this function assumes that the supplied session is valid.
+ * @param session The session which has changed state.
+ * @param state The new state of the HW.
+ */
+static void change_session_hw_state(struct mve_session *session, enum mve_base_hw_state state)
+{
+ WARN_ON(MVE_BASE_HW_STATE_STOPPED != state && MVE_BASE_HW_STATE_RUNNING != state);
+
+ session->state.hw_state = state;
+ session->state.pending_hw_state = state;
+}
+
+static void send_event_to_userspace(struct mve_session *session, struct mve_base_event_header *event)
+{
+ mve_queue_add_event(session->mve_events, event);
+#ifdef EMULATOR
+ {
+ int t = (int)(((uintptr_t)session->filep) & 0xFFFFFFFF);
+ write(t, &t, sizeof(int));
+ }
+#endif
+}
+
+/**
+ * Return a pending buffer to userspace.
+ * @param session Pointer to session.
+ * @param list Pointer to pending buffer request.
+ * @param emtpy_this_buffer True if the buffer was enqueued on the input port, false otherwise.
+ */
+static void return_pending_buffer_to_userspace(struct mve_session *session, struct session_enqueue_request *request, bool empty_this_buffer)
+{
+ struct mve_base_event_header *event;
+ struct mve_base_buffer_userspace *buffer;
+ enum mve_base_event_code event_code;
+
+ event_code = (false != empty_this_buffer) ? MVE_BASE_EVENT_INPUT : MVE_BASE_EVENT_OUTPUT;
+ event = mve_queue_create_event(event_code, sizeof(struct mve_base_buffer_userspace));
+ if (NULL == event)
+ {
+ return;
+ }
+
+ buffer = (struct mve_base_buffer_userspace *)event->data;
+ memset(buffer, 0, sizeof(*buffer));
+ buffer->handle = request->param.handle;
+ buffer->flags = request->param.flags;
+ buffer->mve_flags = request->param.mve_flags;
+
+ send_event_to_userspace(session, event);
+ MVE_RSRC_MEM_CACHE_FREE(event, EVENT_SIZE(event->size));
+}
+
+/**
+ * Return all pending buffers to userspace.
+ * @param session Pointer to session.
+ * @param list Pointer to list with pending buffers.
+ * @param emtpy_this_buffer True if the buffers were enqueued on the input port, false otherwise.
+ */
+static void return_all_pending_buffers_to_userspace(struct mve_session *session, struct list_head *list, bool empty_this_buffer)
+{
+ struct list_head *pos, *next;
+
+ list_for_each_safe(pos, next, list)
+ {
+ struct session_enqueue_request *request = container_of(pos, struct session_enqueue_request, list);
+
+ return_pending_buffer_to_userspace(session, request, empty_this_buffer);
+ list_del(&request->list);
+ MVE_RSRC_MEM_FREE(request);
+ }
+}
+
+/**
+ * Enqueue a job message to firmware message queue.
+ * @param session The sessions to add the message to
+ * @return OMX error code describing the error. MVE_BASE_ERROR_NONE on success.
+ */
+static mve_base_error enqueue_job(struct mve_session *session)
+{
+ struct mve_job_command cmd;
+ mve_base_error err;
+
+ cmd.cores = mver_scheduler_get_ncores();
+ cmd.frames = (num_sessions > 1) ? MULTI_SESSION_FRAME_COUNT : 0;
+ cmd.flags = 0;
+
+ err = enqueue_message(session, MVE_MESSAGE_CODE_JOB, sizeof(cmd), &cmd);
+
+ /* Remember the job size since this will be used to determine whether the
+ * session must be rescheduled or not in mve_session_has_work_callback */
+ session->state.job_frame_size = cmd.frames;
+
+ return err;
+}
+
+/**
+ * Enqueue pending buffers to firmware queue.
+ * @param session Pointer to session.
+ * @param list Pointer to list with pending buffers.
+ * @param emtpy_this_buffer Boolean if buffer is of type 'empty' or 'fill'.
+ */
+static void enqueue_pending_buffers(struct mve_session *session, struct list_head *list, bool empty_this_buffer)
+{
+ struct list_head *pos, *next;
+
+ list_for_each_safe(pos, next, list)
+ {
+ mve_base_error ret;
+ struct session_enqueue_request *request = container_of(pos, struct session_enqueue_request, list);
+
+ ret = mve_session_buffer_enqueue_internal(session, &request->param, empty_this_buffer);
+
+ /* If request timed out, then firmware queue is full and we need to retry again later. */
+ if (MVE_BASE_ERROR_TIMEOUT == ret)
+ {
+ break;
+ }
+ else if (MVE_BASE_ERROR_NONE != ret)
+ {
+ /* On failure return the buffer to userspace. */
+ return_pending_buffer_to_userspace(session, request, empty_this_buffer);
+ }
+
+ list_del(&request->list);
+ MVE_RSRC_MEM_FREE(request);
+ }
+}
+
+/**
+ * Notify the scheduler that this session wants to be scheduled for execution.
+ * Note the the session mutex must not be taken when calling this function.
+ * @param session The session to schedule.
+ * @return True if the session was scheduled, false if the session was put in
+ * the list of sessions to schedule in the future or if it cannot be
+ * scheduled at all (e.g. no firmware loaded).
+ */
+static bool schedule_session(struct mve_session *session)
+{
+ bool ret = true;
+
+ if (false == session->fw_loaded)
+ {
+ /* Don't schedule a session that hasn't loaded a firmware */
+ return false;
+ }
+
+ if (STATE_SWITCHED_OUT == session->state.state)
+ {
+ ret = mver_scheduler_execute((mver_session_id)session);
+ }
+
+ if (STATE_SWITCHING_OUT == session->state.state ||
+ STATE_SWITCHED_OUT == session->state.state)
+ {
+ /* Failed to schedule session */
+ ret = false;
+ }
+
+ return ret;
+}
+
+/**
+ * This function is used to query whether a switched in session has any remaining
+ * work to do.
+ * @param session The session to check
+ * @return true if the session has work to do, false otherwise
+ */
+static bool fw_has_work(struct mve_session *session)
+{
+ /* The session has work to do if it's waiting for response from the FW or the FW
+ * has not signalled idleness */
+ return (session->pending_response_count != 0) || (IDLE_STATE_IDLE != session->state.idle_state);
+}
+
+/**
+ * Helper function to request a session to switch out. A switch out message
+ * will be sent to the FW asking to to switch out as soon as possible.
+ */
+static void switchout_session(struct mve_session *session)
+{
+ if (STATE_SWITCHING_OUT != session->state.state &&
+ STATE_SWITCHED_OUT != session->state.state &&
+ STATE_WAITING_FOR_RESPONSE != session->state.state &&
+ STATE_SWITCHING_IN != session->state.state)
+ {
+ /* Use polling mode here in case FW needs time
+ * to handle the previous msg and into the
+ * switchingable state */
+ int i;
+ bool ret;
+
+ for (i = 0; i < 20; i++)
+ {
+ ret = change_session_state(session, STATE_SWITCHING_OUT);
+ if (false != ret)
+ {
+ break;
+ }
+
+ /* Failed to request state transition, so sleep and retry. */
+ unlock_session(session);
+ msleep(10);
+
+ /* Lock session session again after sleep. */
+ lock_session(session);
+ }
+
+ if (false != ret)
+ {
+ uint32_t param = 0;
+ bool need_to_reschedule;
+
+ enqueue_message(session, MVE_MESSAGE_CODE_SWITCH, sizeof(param), &param);
+ mver_irq_signal_mve((mver_session_id)session);
+
+ /* Check if the session needs to be rescheduled */
+ need_to_reschedule = fw_has_work(session);
+ if (need_to_reschedule != false)
+ {
+ unlock_session(session);
+ schedule_session(session);
+ lock_session(session);
+ }
+ }
+ }
+ else if (STATE_SWITCHING_IN == session->state.state ||
+ STATE_WAITING_FOR_RESPONSE == session->state.state)
+ {
+ /* This session is in a state that doesn't allow direct switch out.
+ * Request that the session is switched out as soon as possible */
+ session->state.request_switchout = true;
+ }
+}
+
+/**
+ * This function is called by the IRQ worker thread for all messages that
+ * are not responses to get/set parameter/config. The purpose of this function
+ * is to inspect each event and change the driver state if needed. Before this
+ * function returns, the event is added to the mve_events queue and made visible
+ * to user-space.
+ * @param session The session.
+ * @param event The event received.
+ */
+static void process_generic_event(struct mve_session *session, struct mve_base_event_header *event)
+{
+ bool delete_event = false;
+
+ switch (event->code)
+ {
+ case MVE_BASE_EVENT_OUTPUT: /* Intentional fall-through! */
+ case MVE_BASE_EVENT_INPUT:
+ {
+ mve_com_buffer buffer;
+ struct mve_buffer_client *buffer_client;
+
+ /* Mark buffer as invalid */
+ buffer.frame.nHandle = 0;
+
+ if (STATE_SWITCHED_OUT == session->state.state ||
+ STATE_SWITCHING_IN == session->state.state)
+ {
+ response_received(session);
+ }
+
+ if (MVE_BASE_EVENT_OUTPUT == event->code)
+ {
+ mve_com_get_output_buffer(session, &buffer);
+ }
+ else
+ {
+ mve_com_get_input_buffer(session, &buffer);
+ }
+
+ /* nHandle is the first member of each buffer type so it is safe to
+ * access this member from any of the buffer types. */
+ buffer_client = get_external_descriptor_by_mve_handle(session,
+ buffer.frame.nHandle);
+ if (NULL != buffer_client)
+ {
+ if ((session->state.quick_flush_target & MVE_BASE_FLUSH_OUTPUT_PORT &&
+ buffer_client->port_index == MVE_PORT_INDEX_OUTPUT) ||
+ (session->state.quick_flush_target & MVE_BASE_FLUSH_INPUT_PORT &&
+ buffer_client->port_index == MVE_PORT_INDEX_INPUT))
+ {
+ /* In quick flush state, save the returned buffers in the
+ * quick_flush_buffers linked-list. When all buffers have been
+ * flushed, enqueue them again. */
+ list_add_tail(&buffer_client->quick_flush_list, &session->quick_flush_buffers);
+ buffer_client->mve_handle = MVE_HANDLE_INVALID;
+ /* Do not pass MVE_BASE_EVENT_OUTPUT/INPUT to userspace when
+ * performing a quick flush. */
+ return;
+ }
+ else
+ {
+ struct mve_base_event_header *usr_event;
+ struct mve_base_buffer_userspace *buf;
+ mve_base_error res;
+
+ res = mve_session_buffer_dequeue_internal(session, buffer_client->buffer->info.buffer_id, &buffer);
+ if (MVE_BASE_ERROR_NONE != res)
+ {
+ return;
+ }
+
+ if (MVE_BASE_EVENT_OUTPUT == event->code &&
+ OMX_BUFFERFLAG_EOS == (buffer_client->flags & OMX_BUFFERFLAG_EOS))
+ {
+ session->state.eos_queued = false;
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_DEBUG, session, "Resetting eos_queued.");
+ }
+
+ usr_event = mve_queue_create_event(event->code, sizeof(struct mve_base_buffer_userspace));
+ if (NULL == usr_event)
+ {
+ return;
+ }
+
+ buf = (struct mve_base_buffer_userspace *)usr_event->data;
+ mve_session_buffer_convert_to_userspace(buf, buffer_client);
+
+ event = usr_event;
+ delete_event = true;
+ }
+ }
+ else
+ {
+ WARN_ON(true);
+ /* This buffer has already been sent to userspace. Don't send it again */
+ return;
+ }
+
+ /* Try to enqueue any pending buffer requests. */
+ if (MVE_BASE_EVENT_INPUT == event->code)
+ {
+ enqueue_pending_buffers(session, &session->queued_input_buffers, true);
+ }
+ else
+ {
+ enqueue_pending_buffers(session, &session->queued_output_buffers, false);
+ }
+
+ break;
+ }
+ case MVE_BASE_EVENT_INPUT_FLUSHED:
+ case MVE_BASE_EVENT_OUTPUT_FLUSHED:
+ {
+ if (0 != session->state.quick_flush_target)
+ {
+ if (MVE_BASE_EVENT_INPUT_FLUSHED == event->code)
+ {
+ session->state.quick_flush_state |= MVE_BASE_FLUSH_INPUT_PORT;
+ }
+ else if (MVE_BASE_EVENT_OUTPUT_FLUSHED == event->code)
+ {
+ session->state.quick_flush_state |= MVE_BASE_FLUSH_OUTPUT_PORT;
+ }
+
+ if ((session->state.quick_flush_target & MVE_BASE_FLUSH_ALL_PORTS) ==
+ session->state.quick_flush_state)
+ {
+ /* All buffers have been flushed, re-enqueue them */
+ struct list_head *pos, *next;
+
+ list_for_each_safe(pos, next, &session->quick_flush_buffers)
+ {
+ struct mve_buffer_client *ptr = container_of(pos, struct mve_buffer_client, quick_flush_list);
+ struct mve_base_buffer_details param;
+ mve_base_error ret;
+ uint32_t mve_flags = ptr->mve_flags;
+ uint32_t crc_offset = 0;
+
+ if (0 != (session->state.quick_flush_target & MVE_BASE_FLUSH_QUICK_SET_INTERLACE))
+ {
+ mve_flags |= MVE_FLAGS_INTERLACE;
+ }
+ else
+ {
+ mve_flags &= ~MVE_FLAGS_INTERLACE;
+ }
+
+ if (NULL != ptr->crc)
+ {
+ crc_offset = ptr->crc->info.offset;
+ }
+
+ ptr->in_use -= 1;
+ WARN_ON(0 > ptr->in_use);
+
+ memset(&param, 0, sizeof(param));
+ param.handle = ptr->buffer->info.handle;
+ param.buffer_id = ptr->buffer->info.buffer_id;
+ param.filled_len = ptr->filled_len;
+ param.flags = ptr->flags;
+ param.mve_flags = mve_flags;
+ param.crc_offset = crc_offset;
+ param.timestamp = 0;
+ ret = mve_session_buffer_enqueue_internal(session,
+ &param,
+ ptr->port_index == 0 ? true : false);
+ WARN_ON(MVE_BASE_ERROR_NONE != ret);
+ if (MVE_BASE_ERROR_NONE == ret)
+ {
+ /* Reset the process to detect idleness and request that this session is
+ * switched in again if a switch out has already started.
+ * The reason this condition is inside of the loop is that quick_flush_buffers
+ * list could be empty and no buffers will be enqueued. */
+ session->state.idle_state = IDLE_STATE_ACTIVE;
+ session->state.request_switchin = true;
+ }
+
+ list_del(&ptr->quick_flush_list);
+ }
+
+ session->state.quick_flush_state = 0;
+ session->state.quick_flush_target = 0;
+
+ /* The session can be switching out at this point as the result
+ * of idleness. Don't attempt to change state if this is true. */
+ if (STATE_SWITCHING_OUT != session->state.state)
+ {
+ response_received(session);
+ }
+ }
+ /* Do not pass this event on to userspace */
+ return;
+ }
+ else
+ {
+ /* Mark all corresponding buffers as no longer in use by the MVE */
+ struct list_head *pos;
+ int port_index = 0;
+
+ if (MVE_BASE_EVENT_INPUT_FLUSHED == event->code)
+ {
+ port_index = 0;
+ }
+ else
+ {
+ port_index = 1;
+ }
+
+ list_for_each(pos, &session->external_buffers)
+ {
+ struct mve_buffer_client *ptr = container_of(pos, struct mve_buffer_client, register_list);
+
+ if (port_index == ptr->port_index)
+ {
+ if (0 < ptr->in_use)
+ {
+ /* VIDDK-150: The FW has not returned all buffers. Enqueue another flush request */
+ uint32_t code = (1 == port_index) ? MVE_MESSAGE_CODE_OUTPUT_FLUSH :
+ MVE_MESSAGE_CODE_INPUT_FLUSH;
+ enqueue_message(session, code, 0, NULL);
+ mver_irq_signal_mve((mver_session_id)session);
+ return;
+ }
+ }
+ }
+ }
+ break;
+ }
+ case MVE_BASE_EVENT_STATE_CHANGED:
+ {
+ change_session_hw_state(session, *((enum mve_base_hw_state *)event->data));
+ if (STATE_SWITCHING_OUT != session->state.state)
+ {
+ response_received(session);
+ }
+ break;
+ }
+ case MVE_BASE_EVENT_SWITCHED_IN:
+ {
+ /* Don't change state from STATE_WAITING_FOR_RESPONSE! */
+ if (STATE_WAITING_FOR_RESPONSE != session->state.state)
+ {
+ response_received(session);
+ }
+
+ /* In case FW in sleep while we are waiting for response */
+ if (session->pending_response_count != 0)
+ {
+ mver_irq_signal_mve((mver_session_id)session);
+ }
+ break;
+ }
+ case MVE_BASE_EVENT_SWITCHED_OUT:
+ {
+ enum MVE_SESSION_STATE state = session->state.state;
+
+ (void)change_session_state(session, STATE_SWITCHED_OUT);
+ session->state.request_switchout = false;
+
+ if (STATE_WAITING_FOR_RESPONSE == state)
+ {
+ session->state.request_switchin = true;
+ }
+ break;
+ }
+ case MVE_BASE_EVENT_PONG:
+ {
+ if (STATE_SWITCHED_OUT == session->state.state ||
+ STATE_SWITCHING_IN == session->state.state)
+ {
+ response_received(session);
+ }
+#if (defined UNIT) && !(defined DISABLE_WATCHDOG)
+ if (true == watchdog_ignore_pong)
+ {
+ break;
+ }
+#endif
+ session->state.ping_count = 0;
+ session->state.watchdog_state = WATCHDOG_PONGED;
+ break;
+ }
+ case MVE_BASE_EVENT_JOB_DEQUEUED:
+ /* Do not pass this event to userspace */
+ if (STATE_SWITCHED_OUT == session->state.state ||
+ STATE_SWITCHING_IN == session->state.state)
+ {
+ response_received(session);
+ }
+ return;
+ case MVE_BASE_EVENT_GENERIC:
+ break;
+ case MVE_BASE_EVENT_PROCESSED:
+ if (STATE_SWITCHED_OUT == session->state.state ||
+ STATE_SWITCHING_IN == session->state.state)
+ {
+ response_received(session);
+ }
+ break;
+ case MVE_BASE_EVENT_IDLE:
+ if (IDLE_STATE_IDLE_PENDING == session->state.idle_state)
+ {
+ /* Session has now been verified to be idle */
+ session->state.idle_state = IDLE_STATE_IDLE;
+ session->state.request_switchin = false;
+ }
+ else if (IDLE_STATE_PENDING == session->state.idle_state)
+ {
+ if (0 == session->output_buffer_count || 0 == session->input_buffer_count)
+ {
+ /* Switchout session on 2nd idle message if either of the fw buffer queue is empty */
+ session->state.idle_state = IDLE_STATE_IDLE;
+ session->state.request_switchin = false;
+ }
+ else
+ {
+ /* Wait for the 3rd idle message before switchout session (VIDDK-1206) */
+ session->state.idle_state = IDLE_STATE_IDLE_PENDING;
+ mver_irq_signal_mve((mver_session_id)session);
+ }
+ }
+ else
+ {
+ /* The first idle message doesn't mean the FW is idle. */
+ session->state.idle_state = IDLE_STATE_PENDING;
+ mver_irq_signal_mve((mver_session_id)session);
+ }
+ return;
+ /* By default, just pass the message on to user space */
+ case MVE_BASE_EVENT_ERROR:
+ {
+ struct event_error
+ {
+ uint32_t reason;
+ char message[80];
+ };
+ struct event_error *e = (struct event_error *)(event->data);
+ if (NULL != e && MVE_ERROR_UNSUPPORTED == e->reason)
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_ERROR, session, "Unsupported firmware.");
+ /* If waiting for firmware response but instead get an error event of wrong firmware
+ * the wait on response_events queue should be interupted and error returned instead of
+ * waiting for watchdog to trigger
+ */
+ if (STATE_WAITING_FOR_RESPONSE == session->state.state)
+ {
+ mve_queue_interrupt(session->response_events);
+ }
+ }
+ else if (NULL != e && MVE_ERROR_WATCHDOG == e->reason && !strcmp(e->message, "Watchdog"))
+ {
+ struct mve_comm_area_host *host_area;
+ struct mve_comm_area_mve *mve_area;
+
+ host_area = mve_rsrc_dma_mem_map(session->msg_in_queue);
+ mve_area = mve_rsrc_dma_mem_map(session->msg_out_queue);
+
+ if (host_area != NULL && mve_area != NULL)
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_ERROR, session, "host_area wpos: %d mve_area rpos: %d.", host_area->in_wpos, mve_area->in_rpos);
+ }
+ mve_rsrc_dma_mem_unmap(session->msg_in_queue);
+ mve_rsrc_dma_mem_unmap(session->msg_out_queue);
+ }
+ break;
+ }
+ case MVE_BASE_EVENT_ALLOC_PARAMS:
+ case MVE_BASE_EVENT_SEQUENCE_PARAMS: /* Intentional fallthrough */
+ {
+ /* Just forward these messages to userspace */
+ break;
+ }
+ case MVE_BASE_EVENT_REF_FRAME_RELEASED:
+ {
+ struct mve_buffer_client *buffer_client;
+ struct mve_com_notify_release_ref_frame *ref_frame;
+
+ ref_frame = (struct mve_com_notify_release_ref_frame *)event->data;
+ buffer_client = get_external_descriptor_by_mve_address(session, ref_frame->mve_buffer_addr);
+ if (NULL != buffer_client)
+ {
+ /* Create a new event and send that to userspace. A new event must be created
+ * because the original event contains the 32-bit virtual address of the buffer
+ * in MVE address space. Userspace wants the buffer handle which is a 64-bit value. */
+ struct mve_base_event_header *usr_event;
+ usr_event = mve_queue_create_event(event->code, sizeof(mve_base_buffer_handle_t));
+ if (NULL == usr_event)
+ {
+ return;
+ }
+
+ *(mve_base_buffer_handle_t *)usr_event->data = buffer_client->buffer->info.buffer_id;
+ /* Swap the old event for the new one. Make sure the new event is deleted before
+ * returning. The old event will automatically be deleted by the caller */
+ event = usr_event;
+ delete_event = true;
+ }
+
+ break;
+ }
+ case MVE_BASE_EVENT_BUFFER_PARAM:
+ {
+ struct buffer_param
+ {
+ uint32_t type;
+ uint32_t arg;
+ };
+ struct buffer_param *p = (struct buffer_param *)event->data;
+ if (NULL != p && MVE_BASE_BUFFER_PARAM_TYPE_DPB_HELD_FRAMES == p->type)
+ {
+ session->pending_buffer_on_hold_count = p->arg;
+ /* Don't send these events to userspace */
+ return;
+ }
+ break;
+ }
+ case MVE_BASE_EVENT_FW_TRACE_BUFFERS:
+ {
+ /* Store information about the FW trace buffers */
+ session->fw_trace = *(struct mve_com_trace_buffers *)event->data;
+ /* Don't send this message to userspace */
+ return;
+ }
+ default:
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_ERROR, session, "Illegal event code. code=%u.", event->code);
+ return;
+ }
+ }
+
+ /* Add the event to the event queue. Userspace clients may now retrieve the
+ * event. */
+ send_event_to_userspace(session, event);
+ if (false != delete_event)
+ {
+ MVE_RSRC_MEM_CACHE_FREE(event, EVENT_SIZE(event->size));
+ }
+}
+
+/**
+ * Called by the interrupt handler when an interrupt is received to handle
+ * messages from the MVE. The messages are extracted and placed in different
+ * queues for later processing.
+ * @param session The session.
+ */
+static void handle_mve_message(struct mve_session *session)
+{
+ struct mve_msg_header header;
+ uint32_t *data;
+
+ data = mve_com_get_message(session, &header);
+ while (NULL != data)
+ {
+ enum mve_base_event_code event_code;
+ struct mve_base_event_header *event;
+ uint32_t size;
+
+ event_code = response_code_to_mve_event(header.code);
+ size = header.size;
+
+ event = mve_queue_create_event(event_code, size);
+ if (NULL != event)
+ {
+ memcpy(event->data, data, size);
+
+ if (MVE_BASE_EVENT_ERROR == event->code)
+ {
+ struct mve_error
+ {
+ uint32_t reason;
+ char msg[1];
+ };
+
+ struct mve_error *err = (struct mve_error *)event->data;
+ MVE_LOG_PRINT(&mve_rsrc_log_session, MVE_LOG_ERROR, "Firmware error. reason=%u, msg=%s.", err->reason, err->msg);
+ }
+
+#ifndef DISABLE_WATCHDOG
+#ifdef UNIT
+ if (true == watchdog_ignore_pong)
+ {}
+ else
+#endif
+ {
+ session->state.ping_count = 0;
+ session->state.watchdog_state = WATCHDOG_PONGED;
+ }
+#endif
+ switch (event_code)
+ {
+ case MVE_BASE_EVENT_GET_PARAMCONFIG:
+ case MVE_BASE_EVENT_SET_PARAMCONFIG:
+ WARN_ON(STATE_SWITCHED_OUT == session->state.state);
+ response_received(session);
+ mve_queue_add_event(session->response_events, event);
+#ifdef EMULATOR
+ {
+ int t = (int)(((uintptr_t)session->filep) & 0xFFFFFFFF);
+ write(t, &t, sizeof(int));
+ }
+#endif
+ break;
+ default:
+ process_generic_event(session, event);
+ break;
+ }
+
+ MVE_RSRC_MEM_CACHE_FREE(event, EVENT_SIZE(event->size));
+ }
+ MVE_RSRC_MEM_CACHE_FREE(data, header.size);
+ data = mve_com_get_message(session, &header);
+ }
+
+ mve_rsrc_dma_mem_unmap(session->msg_in_queue);
+ mve_rsrc_dma_mem_unmap(session->msg_out_queue);
+}
+
+#ifndef EMULATOR
+
+mve_base_error mve_session_handle_rpc_mem_alloc(struct mve_session *session, int fd)
+{
+ struct mve_com_rpc rpc;
+ mve_base_error err;
+
+ bool ret = acquire_session(session);
+ if (false == ret)
+ {
+ return MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ }
+
+ lock_session(session);
+
+ err = mve_com_get_rpc_message(session, &rpc);
+ if (MVE_BASE_ERROR_NONE == err)
+ {
+ uint32_t size, max_size;
+ uint32_t num_pages, max_pages;
+ uint32_t alignment;
+ phys_addr_t *pages = NULL;
+ uint32_t mve_addr = 0;
+ struct mve_buffer_info info;
+ struct mve_buffer_external *buffer = NULL;
+
+ if (fd < 0)
+ {
+ err = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ goto error;
+ }
+
+ size = rpc.params.mem_alloc.size;
+ max_size = rpc.params.mem_alloc.max_size;
+ alignment = rpc.params.mem_alloc.log2_alignment;
+
+ num_pages = (size + MVE_MMU_PAGE_SIZE - 1) >> MVE_MMU_PAGE_SHIFT;
+ max_pages = (max_size + MVE_MMU_PAGE_SIZE - 1) >> MVE_MMU_PAGE_SHIFT;
+
+ info.allocator = MVE_BASE_BUFFER_ALLOCATOR_DMABUF;
+ info.handle = fd;
+ info.size = num_pages * MVE_MMU_PAGE_SIZE;
+
+ buffer = mve_buffer_create_buffer_external(&info, 0);
+ if (NULL != buffer)
+ {
+ bool ret = mve_buffer_map_physical_pages(buffer);
+ if (false != ret && num_pages <= buffer->mapping.num_pages)
+ {
+ pages = buffer->mapping.pages;
+ }
+ }
+
+ if (NULL != pages)
+ {
+ /* Map the allocated pages and return the MVE address */
+ struct mve_mem_virt_region region;
+ bool ret;
+
+ if (MVE_BASE_MEMORY_REGION_PROTECTED == rpc.params.mem_alloc.region)
+ {
+ mve_mem_virt_region_get(session->prot_version, VIRT_MEM_REGION_PROTECTED, &region);
+ }
+ else
+ {
+ mve_mem_virt_region_get(session->prot_version, VIRT_MEM_REGION_OUT_BUF, &region);
+ }
+
+ ret = mve_mmu_map_pages_and_reserve(session->mmu_ctx,
+ pages,
+ &region,
+ num_pages,
+ max_pages,
+ 1 << alignment,
+ ATTRIB_SHARED_RW,
+ ACCESS_READ_WRITE,
+ false);
+ if (false == ret)
+ {
+ pages = NULL;
+ err = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ }
+ else
+ {
+ /* Success! */
+ mve_addr = region.start;
+ }
+
+ mve_buffer_unmap_physical_pages(buffer);
+ mve_buffer_destroy_buffer_external(buffer);
+ buffer = NULL;
+ }
+
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_DEBUG, session,
+ "RPC alloc requested. size=%d(%u), num_pages=%d, addr=0x%X - 0x%X, alignment=%d.",
+ size, max_size, num_pages, mve_addr, mve_addr + max_size, alignment);
+
+error:
+ rpc.size = sizeof(uint32_t);
+ rpc.params.data[0] = mve_addr;
+ }
+
+ rpc.state = MVE_RPC_STATE_RETURN;
+
+ mve_com_put_rpc_message(session, &rpc);
+
+ session->rpc_in_progress = false;
+ mver_irq_signal_mve((mver_session_id)session);
+
+ unlock_session(session);
+ release_session(session);
+
+ return err;
+}
+
+mve_base_error mve_session_handle_rpc_mem_resize(struct mve_session *session, int fd)
+{
+ struct mve_com_rpc rpc;
+ mve_base_error err;
+
+ bool ret = acquire_session(session);
+ if (false == ret)
+ {
+ return MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ }
+
+ lock_session(session);
+
+ err = mve_com_get_rpc_message(session, &rpc);
+ if (MVE_BASE_ERROR_NONE == err)
+ {
+ uint32_t mve_addr = rpc.params.mem_resize.ve_pointer;
+ uint32_t new_size = rpc.params.mem_resize.new_size;
+ uint32_t num_pages = (new_size + MVE_MMU_PAGE_SIZE - 1) >> MVE_MMU_PAGE_SHIFT;
+ uint32_t ret_addr = 0;
+ bool ret;
+ uint32_t mapped_pages, max_pages, num_new_pages;
+ struct mve_buffer_info info;
+ struct mve_buffer_external *buffer;
+
+ if (fd < 0)
+ {
+ err = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ goto error;
+ }
+
+ ret = mve_mmu_map_info(session->mmu_ctx,
+ mve_addr,
+ &mapped_pages,
+ &max_pages);
+ if (false == ret || num_pages > max_pages)
+ {
+ err = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ goto error;
+ }
+
+ num_new_pages = num_pages - mapped_pages;
+
+ info.allocator = MVE_BASE_BUFFER_ALLOCATOR_DMABUF;
+ info.handle = fd;
+ info.size = num_new_pages * MVE_MMU_PAGE_SIZE;
+
+ buffer = mve_buffer_create_buffer_external(&info, 0);
+ if (NULL != buffer)
+ {
+ ret = mve_buffer_map_physical_pages(buffer);
+ if (false != ret && num_new_pages <= buffer->mapping.num_pages)
+ {
+ ret = mve_mmu_map_resize(session->mmu_ctx,
+ buffer->mapping.pages,
+ mve_addr,
+ num_pages,
+ ATTRIB_SHARED_RW,
+ ACCESS_READ_WRITE,
+ true);
+
+ if (false != ret)
+ {
+ ret_addr = mve_addr;
+ }
+ mve_buffer_unmap_physical_pages(buffer);
+ }
+ mve_buffer_destroy_buffer_external(buffer);
+ buffer = NULL;
+ }
+error:
+ mver_scheduler_flush_tlb((mver_session_id)session);
+
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_DEBUG, session, "RPC resize requested. new_size=%d, fd: %d, num_pages=%d, addr=0x%X ret_addr= %#x.", new_size, fd, num_pages, mve_addr, ret_addr);
+
+ rpc.size = sizeof(uint32_t);
+ rpc.params.data[0] = ret_addr;
+ }
+ rpc.state = MVE_RPC_STATE_RETURN;
+ mve_com_put_rpc_message(session, &rpc);
+
+ session->rpc_in_progress = false;
+ mver_irq_signal_mve((mver_session_id)session);
+
+ unlock_session(session);
+ release_session(session);
+
+ return err;
+}
+#endif /* EMULATOR */
+
+/**
+ * This function is invoked when an interrupt has been received. It takes care
+ * of all RPC requests the MVE can make.
+ * @param session The session that has received a RPC request.
+ */
+static void handle_rpc_request(struct mve_session *session)
+{
+ struct mve_com_rpc rpc;
+ mve_base_error err;
+
+ if (false != session->rpc_in_progress)
+ {
+ return;
+ }
+
+ err = mve_com_get_rpc_message(session, &rpc);
+
+ if (MVE_BASE_ERROR_NONE == err)
+ {
+ session->rpc_in_progress = true;
+ if (MVE_COM_RPC_FUNCTION_DEBUG_PRINTF != rpc.call_id)
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_INFO, session, "<- RPC %s", print_rpc_code(rpc.call_id));
+ }
+
+ switch (rpc.call_id)
+ {
+ case MVE_COM_RPC_FUNCTION_DEBUG_PRINTF:
+ {
+ struct mve_base_event_header *event;
+ uint16_t size;
+
+ /* Make sure the string is NULL terminated */
+ rpc.params.debug_print.string[NELEMS(rpc.params.debug_print.string) - 1] = '\0';
+ size = strlen(rpc.params.debug_print.string);
+
+ /* Strip trailing new line. */
+ if ((size > 0) && rpc.params.debug_print.string[size - 1] == '\n')
+ {
+ rpc.params.debug_print.string[--size] = '\0';
+ }
+
+ /* Include end of string. */
+ size++;
+
+ event = mve_queue_create_event(MVE_BASE_EVENT_RPC_PRINT, size);
+ if (NULL == event)
+ {
+ return;
+ }
+
+ memcpy(event->data, rpc.params.debug_print.string, size);
+
+ mve_queue_add_event(session->mve_events, event);
+#ifdef EMULATOR
+ {
+ int t = (int)(((uintptr_t)session->filep) & 0xFFFFFFFF);
+ write(t, &t, sizeof(int));
+ }
+#endif
+ MVE_RSRC_MEM_CACHE_FREE(event, EVENT_SIZE(event->size));
+ rpc.size = 0;
+ break;
+ }
+ case MVE_COM_RPC_FUNCTION_MEM_ALLOC:
+ {
+ uint32_t size, max_size;
+ uint32_t num_pages, max_pages;
+ uint32_t alignment;
+ phys_addr_t *pages;
+ uint32_t mve_addr = 0;
+ bool secure = mve_fw_secure(session->fw_inst);
+
+ size = rpc.params.mem_alloc.size;
+ max_size = rpc.params.mem_alloc.max_size;
+ alignment = rpc.params.mem_alloc.log2_alignment;
+
+ num_pages = (size + MVE_MMU_PAGE_SIZE - 1) >> MVE_MMU_PAGE_SHIFT;
+ max_pages = (max_size + MVE_MMU_PAGE_SIZE - 1) >> MVE_MMU_PAGE_SHIFT;
+
+#ifdef EMULATOR
+ /* Always allocate max pages as the workaround for emulator resize issue EGIL-1421*/
+ num_pages = (max_pages > num_pages) ? max_pages : num_pages;
+ secure = false;
+#endif
+ if (false != secure)
+ {
+ struct mve_base_event_header *event;
+ struct mve_base_rpc_memory *rpc_memory;
+
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_DEBUG, session, "%s send event to userspace size: %d", __FUNCTION__, num_pages * MVE_MMU_PAGE_SIZE);
+
+ event = mve_queue_create_event(MVE_BASE_EVENT_RPC_MEM_ALLOC, sizeof(struct mve_base_rpc_memory));
+ if (NULL != event)
+ {
+ rpc_memory = (struct mve_base_rpc_memory *)event->data;
+ rpc_memory->region = rpc.params.mem_alloc.region;
+ rpc_memory->size = num_pages * MVE_MMU_PAGE_SIZE;
+ }
+ mve_queue_add_event(session->mve_events, event);
+ MVE_RSRC_MEM_CACHE_FREE(event, EVENT_SIZE(event->size));
+ return;
+ }
+
+ pages = MVE_RSRC_MEM_ALLOC_PAGES(num_pages);
+ if (NULL != pages || 0 == num_pages)
+ {
+ /* Map the allocated pages and return the MVE address */
+ struct mve_mem_virt_region region;
+ bool ret;
+
+ if (MVE_BASE_MEMORY_REGION_PROTECTED == rpc.params.mem_alloc.region)
+ {
+ mve_mem_virt_region_get(session->prot_version, VIRT_MEM_REGION_PROTECTED, &region);
+ }
+ else
+ {
+ mve_mem_virt_region_get(session->prot_version, VIRT_MEM_REGION_OUT_BUF, &region);
+ }
+
+ ret = mve_mmu_map_pages_and_reserve(session->mmu_ctx,
+ pages,
+ &region,
+ num_pages,
+ max_pages,
+ 1 << alignment,
+ ATTRIB_SHARED_RW,
+ ACCESS_READ_WRITE,
+ true);
+ if (false == ret)
+ {
+ MVE_RSRC_MEM_FREE_PAGES(pages, num_pages);
+ pages = NULL;
+ }
+ else
+ {
+ /* Success! */
+ mve_addr = region.start;
+ }
+ }
+
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_INFO, session,
+ "RPC alloc requested. size=%d, num_pages=%d, addr=0x%X - 0x%X, alignment=%d.",
+ size, num_pages, mve_addr, mve_addr + max_size, alignment);
+
+ rpc.size = sizeof(uint32_t);
+ rpc.params.data[0] = mve_addr;
+ break;
+ }
+ case MVE_COM_RPC_FUNCTION_MEM_RESIZE:
+ {
+ uint32_t mve_addr = rpc.params.mem_resize.ve_pointer;
+ uint32_t new_size = rpc.params.mem_resize.new_size;
+ uint32_t num_pages = (new_size + MVE_MMU_PAGE_SIZE - 1) >> MVE_MMU_PAGE_SHIFT;
+ uint32_t ret_addr = 0;
+ bool secure = mve_fw_secure(session->fw_inst);
+
+ if (false != secure)
+ {
+ struct mve_base_event_header *event;
+ uint32_t mapped_pages, max_pages;
+ bool ret = mve_mmu_map_info(session->mmu_ctx,
+ mve_addr,
+ &mapped_pages,
+ &max_pages);
+ if (false == ret || num_pages > max_pages)
+ {
+ err = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ goto resize_error;
+ }
+
+ /* Donot shrink the area for secure memory */
+ if (num_pages > mapped_pages)
+ {
+ struct mve_base_rpc_memory *rpc_memory;
+ uint32_t num_new_pages = num_pages - mapped_pages;
+
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_DEBUG, session, "%s send event to userspace old: %d extra: %d",
+ __FUNCTION__, mapped_pages * MVE_MMU_PAGE_SIZE, num_new_pages * MVE_MMU_PAGE_SIZE);
+
+ event = mve_queue_create_event(MVE_BASE_EVENT_RPC_MEM_RESIZE, sizeof(struct mve_base_rpc_memory));
+ if (NULL != event)
+ {
+ rpc_memory = (struct mve_base_rpc_memory *)event->data;
+ rpc_memory->region = VIRT_MEM_REGION_PROTECTED == mve_mem_virt_region_type_get(session->prot_version, mve_addr) ?
+ MVE_BASE_MEMORY_REGION_PROTECTED : MVE_BASE_MEMORY_REGION_OUTBUF;
+ rpc_memory->size = num_new_pages * MVE_MMU_PAGE_SIZE;
+ }
+ mve_queue_add_event(session->mve_events, event);
+ MVE_RSRC_MEM_CACHE_FREE(event, EVENT_SIZE(event->size));
+ return;
+ }
+ ret_addr = mve_addr;
+ }
+ else
+ {
+ bool ret = mve_mmu_map_resize(session->mmu_ctx,
+ NULL,
+ mve_addr,
+ num_pages,
+ ATTRIB_SHARED_RW,
+ ACCESS_READ_WRITE,
+ false);
+ if (true == ret)
+ {
+ ret_addr = mve_addr;
+ }
+ }
+
+resize_error:
+ mver_scheduler_flush_tlb((mver_session_id)session);
+
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_INFO, session, "RPC resize requested. new_size=%d, num_pages=%d, mve_addr=0x%X, ret_addr: 0x%X.", new_size, num_pages, mve_addr, ret_addr);
+
+ rpc.size = sizeof(uint32_t);
+ rpc.params.data[0] = ret_addr;
+ break;
+ }
+ case MVE_RPC_FUNCTION_MEM_FREE:
+ {
+ if (0 != rpc.params.mem_free.ve_pointer)
+ {
+ mve_mmu_unmap_pages(session->mmu_ctx, rpc.params.mem_free.ve_pointer);
+ }
+
+ mver_scheduler_flush_tlb((mver_session_id)session);
+
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_INFO, session, "RPC free requested. addr=0x%X.", rpc.params.mem_free.ve_pointer);
+
+ rpc.size = 0;
+ break;
+ }
+ default:
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_ERROR, session, "Unsupported RPC request. id=%u.", rpc.call_id);
+ rpc.size = 0;
+ }
+ }
+
+ switch (rpc.call_id)
+ {
+ case MVE_RPC_FUNCTION_DEBUG_PRINTF:
+ case MVE_RPC_FUNCTION_MEM_ALLOC:
+ case MVE_RPC_FUNCTION_MEM_RESIZE:
+ case MVE_RPC_FUNCTION_MEM_FREE:
+ {
+ rpc.state = MVE_RPC_STATE_RETURN;
+ break;
+ }
+ default:
+ {
+ rpc.state = MVE_RPC_STATE_FREE;
+ break;
+ }
+ }
+
+ mve_com_put_rpc_message(session, &rpc);
+ session->rpc_in_progress = false;
+
+ if (MVE_RPC_FUNCTION_DEBUG_PRINTF != rpc.call_id)
+ {
+ mver_irq_signal_mve((mver_session_id)session);
+ }
+ }
+}
+
+/**
+ * Callback function that is invoked when the CPU receives an interrupt
+ * from the MVE.
+ * @param session_id Session identifier of the session mapped to the LSID
+ * that generated the interrupt.
+ */
+void mve_session_irq_callback(mver_session_id session_id)
+{
+ struct mve_session *session;
+ bool ret;
+
+ session = (struct mve_session *)session_id;
+
+ ret = acquire_session(session);
+ if (false == ret)
+ {
+ return;
+ }
+
+ lock_session(session);
+
+ session->state.irqs++;
+
+ handle_rpc_request(session);
+ handle_mve_message(session);
+
+ /* Update the timestamp marking the most recent interaction with the HW */
+ do_gettimeofday(&session->state.timestamp);
+
+ unlock_session(session);
+ release_session(session);
+}
+
+#ifndef DISABLE_WATCHDOG
+/**
+ * This function is called when MVE failed to respond to the ping requests.
+ * It informs userspace that the session has hung. The session
+ * is also removed from the scheduler. Please note that the seession
+ * must be validated by the caller.
+ *
+ * @param session Pointer to session that has hung.
+ */
+static void mve_session_hung(struct mve_session *session)
+{
+ struct mve_base_event_header *event;
+ struct list_head *pos;
+
+ session->state.watchdog_state = WATCHDOG_TIMEOUT;
+
+ /* Pass the event with the frame counter info to userspace */
+ event = mve_queue_create_event(MVE_BASE_EVENT_SESSION_HUNG, 0);
+ if (NULL == event)
+ {
+ return;
+ }
+
+ mve_queue_add_event(session->mve_events, event);
+#ifdef EMULATOR
+ {
+ int t = (int)(((uintptr_t)session->filep) & 0xFFFFFFFF);
+ write(t, &t, sizeof(int));
+ }
+#endif
+ MVE_RSRC_MEM_CACHE_FREE(event, EVENT_SIZE(event->size));
+
+ /* Mark all input/output buffers as no longer in use by the MVE */
+ list_for_each(pos, &session->external_buffers)
+ {
+ struct mve_buffer_client *ptr = container_of(pos, struct mve_buffer_client, register_list);
+
+ ptr->in_use = 0;
+ }
+
+ /* Stop and remove the session from the scheduler */
+ mver_scheduler_stop((mver_session_id)session);
+ mver_scheduler_unregister_session((mver_session_id)session);
+}
+
+/**
+ * Check if the session is active for ping command, which means that
+ * the corresponding firmware is loaded in MVE and the session is
+ * in either in STATE_ACTIVE_JOB or STATE_WAITING_FOR_RESPONSE
+ * state.
+ *
+ * @param session Pointer to session to be checked.
+ * @return True if the session is active, else return false.
+ */
+static bool is_session_active(struct mve_session *session)
+{
+ bool ret = false;
+
+ if (STATE_ACTIVE_JOB == session->state.state ||
+ STATE_WAITING_FOR_RESPONSE == session->state.state)
+ {
+ ret = true;
+ }
+
+ return ret;
+}
+
+/**
+ * Ping the FW. The watchdog state gets set after the ping command has been
+ * enqueued. The session must be validated by the caller.
+ *
+ * @param session Pointer to session to be pinged.
+ */
+static void mve_session_ping(struct mve_session *session)
+{
+ enqueue_message(session, MVE_MESSAGE_CODE_PING, 0, NULL);
+ mver_irq_signal_mve((mver_session_id)session);
+
+ session->state.watchdog_state = WATCHDOG_PINGED;
+ session->state.ping_count++;
+}
+
+/**
+ * Session watchdog task that checks the response from
+ * previous ping and proceeds accordingly
+ *
+ * @param session Pointer to watchdog session.
+ * @param tv Current time.
+ */
+static void session_watchdog(struct mve_session *session, struct timeval *tv)
+{
+ long dt;
+ bool is_valid, res;
+
+ is_valid = is_session_valid(session);
+ if (true == is_valid)
+ {
+ kref_get(&session->refcount);
+ }
+ else
+ {
+ return;
+ }
+
+ lock_session(session);
+
+ res = is_session_active(session);
+ if (false != res)
+ {
+ /* Calculate the time delta between the current time and the last
+ * HW interaction. */
+ dt = tv->tv_sec * 1000 + tv->tv_usec / 1000 -
+ (session->state.timestamp.tv_sec * 1000 + session->state.timestamp.tv_usec / 1000);
+ if (dt > WATCHDOG_PING_INTERVAL)
+ {
+ if (MAX_NUM_PINGS > session->state.ping_count)
+ {
+ mve_session_ping(session);
+ }
+ else
+ {
+ mve_session_hung(session);
+ }
+ }
+ }
+
+ unlock_session(session);
+ release_session(session);
+}
+
+/**
+ * Watchdog task that invokes session watchdog.
+ */
+static void watchdog_task(void)
+{
+ struct list_head *pos, *next;
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+
+ list_for_each_safe(pos, next, &sessions)
+ {
+ struct mve_session *session = container_of(pos, struct mve_session, list);
+ bool acquired;
+
+ /* If acquire session failed, then the session has been removed. This also means
+ * that the pointer to the next session in the session list can't be trusted, so
+ * the loop is aborted and retried the next poll.
+ */
+ acquired = acquire_session(session);
+ if (acquired == false)
+ {
+ break;
+ }
+
+ session_watchdog(session, &tv);
+
+ release_session(session);
+ }
+}
+
+/**
+ * Watchdog thread that execute the watchdog task periodically.
+ * @param data Not used but required by the kthread API.
+ * @return Returns 0 when the thread exits.
+ */
+static int watchdog_thread_func(void *data)
+{
+ int ret;
+ (void)data;
+
+ do
+ {
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+#ifdef EMULATOR
+ (void)down_interruptible(&watchdog_sem);
+#endif
+#endif
+ watchdog_task();
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+#ifdef EMULATOR
+ up(&watchdog_sem);
+#endif
+#endif
+ ret = down_timeout(&watchdog.killswitch, msecs_to_jiffies(WATCHDOG_PING_INTERVAL));
+ }
+ while (ret != 0);
+
+ return 0;
+}
+
+/**
+ * Initialize the watchdog system.
+ */
+static void watchdog_init(void)
+{
+ atomic_set(&watchdog.refcount, 0);
+}
+
+/**
+ * Create the watchdog task.
+ */
+static void watchdog_create_session(void)
+{
+ int cnt;
+
+ cnt = atomic_add_return(1, &watchdog.refcount);
+ if (1 == cnt)
+ {
+ /* Create the watchdog thread */
+ char name[] = "mv500-watchdog";
+
+ sema_init(&watchdog.killswitch, 0);
+ watchdog.watchdog_thread = kthread_run(watchdog_thread_func, NULL, name);
+ }
+}
+
+/**
+ * Terminate the watchdog task.
+ */
+static void watchdog_destroy_session(void)
+{
+ int cnt;
+
+ cnt = atomic_sub_return(1, &watchdog.refcount);
+ if (0 == cnt)
+ {
+ up(&watchdog.killswitch);
+ kthread_stop(watchdog.watchdog_thread);
+ }
+}
+#endif /* #ifndef DISABLE_WATCHDOG */
+
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+#include <linux/device.h>
+
+static ssize_t sysfs_print_sessions(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t num = 0;
+ struct mve_session *session = NULL;
+ struct list_head *pos;
+ int i = 0;
+
+ num += snprintf(buf, PAGE_SIZE, "Sessions:\n");
+ list_for_each(pos, &sessions)
+ {
+ uint32_t num_data;
+
+ session = container_of(pos, struct mve_session, list);
+
+ num += snprintf(buf + num, PAGE_SIZE - num, "%d: %p\n", i, session);
+ num += snprintf(buf + num, PAGE_SIZE - num, " role: %s\n", session->role);
+
+ num_data = session->mve_events->wpos - session->mve_events->rpos;
+ num += snprintf(buf + num, PAGE_SIZE - num, " regular events: %d\n",
+ num_data);
+
+ /* Print event codes */
+ if (0 < num_data)
+ {
+ uint32_t p;
+
+ num += snprintf(buf + num, PAGE_SIZE - num, " ");
+ p = session->mve_events->rpos;
+ while (0 < session->mve_events->wpos - p)
+ {
+ uint32_t message_size = session->mve_events->buffer[p % session->mve_events->buffer_size] >> 16;
+ num += snprintf(buf + num, PAGE_SIZE - num, "(%d, %d) ",
+ session->mve_events->buffer[p % session->mve_events->buffer_size] & 0xFFFF,
+ message_size);
+ p += 1 + (message_size + 3) / 4;
+ }
+ num += snprintf(buf + num, PAGE_SIZE - num, "\n");
+ }
+
+ num += snprintf(buf + num, PAGE_SIZE - num, " query events: %d\n",
+ session->response_events->wpos - session->response_events->rpos);
+
+ num += snprintf(buf + num, PAGE_SIZE - num, " state: %d\n", session->state.state);
+ num += snprintf(buf + num, PAGE_SIZE - num, " HW state: %d\n", session->state.hw_state);
+
+ /* Print statistics on registered buffers */
+ num += snprintf(buf + num, PAGE_SIZE - num, " Buffers\n");
+ {
+ struct list_head *b;
+
+ list_for_each(b, &session->external_buffers)
+ {
+ struct mve_buffer_client *ptr = container_of(b, struct mve_buffer_client, register_list);
+
+ num += snprintf(buf + num, PAGE_SIZE - num, "\t\t%s: %llu va:0x%X - 0x%X owner: %s\n", ptr->port_index == 0 ? "IN" : "OUT",
+ ptr->buffer->info.handle, ptr->buffer->mapping.region.start, ptr->buffer->mapping.region.end,
+ ptr->in_use > 0 ? "MVE" : "HOST");
+ }
+ }
+ i++;
+ }
+
+ return num;
+}
+
+static ssize_t sysfs_dump_fw_status(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t num = 0;
+ struct mve_session *session = NULL;
+ struct list_head *pos;
+
+ num += snprintf(buf, PAGE_SIZE, "Requesting FW status dump\n");
+ list_for_each(pos, &sessions)
+ {
+ session = container_of(pos, struct mve_session, list);
+
+ /* Dump FW status for the currently running session */
+ if (STATE_WAITING_FOR_RESPONSE == session->state.state ||
+ STATE_ACTIVE_JOB == session->state.state)
+ {
+ enqueue_message(session, MVE_MESSAGE_CODE_DUMP, 0, NULL);
+ mver_irq_signal_mve((mver_session_id)session);
+ }
+ }
+
+ return num;
+}
+
+#if (1 == DVFS_DEBUG_MODE)
+static ssize_t sysfs_print_dvfs_b_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t num = 0;
+ num += snprintf(buf, PAGE_SIZE, "%u", atomic_read(&dvfs_b_enable) ? 1 : 0);
+ return num;
+}
+
+static ssize_t sysfs_set_dvfs_b_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int failed;
+ int enable;
+ failed = kstrtouint(buf, 10, &enable);
+ if (!failed)
+ {
+ atomic_set(&dvfs_b_enable, enable);
+ }
+ return (failed) ? failed : count;
+}
+#endif /* DVFS_DEBUG_MODE */
+
+static struct device_attribute sysfs_files[] =
+{
+ __ATTR(sessions, S_IRUGO, sysfs_print_sessions, NULL),
+ __ATTR(dump_fw_status, S_IRUGO, sysfs_dump_fw_status, NULL),
+#if (1 == DVFS_DEBUG_MODE)
+ __ATTR(dvfs_b_adjust, (S_IRUGO | S_IWUSR), sysfs_print_dvfs_b_enable, sysfs_set_dvfs_b_enable),
+#endif
+};
+
+#endif /* CONFIG_SYSFS && _DEBUG */
+
+static void session_destroy_instance(struct mve_session *session)
+{
+ struct list_head *pos, *next;
+
+ mver_scheduler_stop((mver_session_id)session);
+ mver_scheduler_unregister_session((mver_session_id)session);
+
+ list_for_each_safe(pos, next, &session->queued_input_buffers)
+ {
+ struct session_enqueue_request *request = container_of(pos, struct session_enqueue_request, list);
+ list_del(&request->list);
+ MVE_RSRC_MEM_FREE(request);
+ }
+
+ list_for_each_safe(pos, next, &session->queued_output_buffers)
+ {
+ struct session_enqueue_request *request = container_of(pos, struct session_enqueue_request, list);
+ list_del(&request->list);
+ MVE_RSRC_MEM_FREE(request);
+ }
+
+ mve_com_delete(session);
+
+ mver_pm_request_suspend();
+
+#ifndef DISABLE_WATCHDOG
+ watchdog_destroy_session();
+#endif
+
+ num_sessions--;
+
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_INFO, session, "Session destroyed.");
+
+ kref_put(&session->refcount, free_session);
+}
+
+/**
+ * Callback function executed by the scheduler when it wants to know whether
+ * the session has any work to perform or not.
+ *
+ * @param session_id ID of the session to query the scheduling state.
+ * @return The session's scheduling state.
+ */
+static enum SCHEDULE_STATE mve_session_has_work_callback(mver_session_id session_id)
+{
+ struct mve_session *session;
+ enum SCHEDULE_STATE state = SCHEDULE_STATE_BUSY;
+ bool ret;
+
+ session = (struct mve_session *)session_id;
+
+ ret = acquire_session(session);
+ if (false == ret)
+ {
+ return SCHEDULE_STATE_SLEEP;
+ }
+
+ if (STATE_SWITCHED_OUT == session->state.state)
+ {
+ /* Session just got switched out. Find out if it needs to be rescheduled */
+ if (session->pending_response_count != 0 ||
+ false != session->state.request_switchin ||
+ session->state.hw_state != session->state.pending_hw_state ||
+ (IDLE_STATE_IDLE != session->state.idle_state && 0 != session->state.job_frame_size))
+ {
+ state = SCHEDULE_STATE_RESCHEDULE;
+ }
+ /* Session has received two idle messages and no buffers or messages have been added. Do not
+ * reschedule! */
+ else if (IDLE_STATE_IDLE == session->state.idle_state)
+ {
+ state = SCHEDULE_STATE_SLEEP;
+ }
+ }
+ else
+ {
+ bool has_work = fw_has_work(session);
+ if (false != session->state.request_switchout)
+ {
+ state = SCHEDULE_STATE_REQUEST_SWITCHOUT;
+ }
+ else if (false != has_work)
+ {
+ state = SCHEDULE_STATE_BUSY;
+ }
+ else
+ {
+ state = SCHEDULE_STATE_IDLE;
+ }
+ }
+
+ release_session(session);
+
+ return state;
+}
+
+/**
+ * Callback function executed by the scheduler when the session should be
+ * switched out.
+ *
+ * @param session_id ID of the session to switch out.
+ * @param require_idleness True if switchout should occur only if the session is idle.
+ */
+static void mve_session_switchout_callback(mver_session_id session_id, bool require_idleness)
+{
+ struct mve_session *session;
+ bool ret;
+
+ session = (struct mve_session *)session_id;
+
+ ret = acquire_session(session);
+ if (false == ret)
+ {
+ return;
+ }
+
+ lock_session(session);
+ /* The idle status can change between the scheduler calling
+ * mve_session_has_work_callback and this function because the session lock
+ * is not kept during that time. The if statement below is used to protect
+ * the session from being switched out while it actually has work to do. */
+ if (false == require_idleness ||
+ (false != require_idleness &&
+ IDLE_STATE_IDLE == session->state.idle_state))
+ {
+ switchout_session(session);
+ }
+ unlock_session(session);
+ release_session(session);
+}
+
+/**
+ * Callback function invoked by the scheduler when the session is being
+ * switched in.
+ *
+ * @param session_id ID of the session to switch in.
+ */
+static void mve_session_switchin_callback(mver_session_id session_id)
+{
+ struct mve_session *session = (struct mve_session *)session_id;
+ bool ret;
+ mve_base_error err;
+
+ ret = acquire_session(session);
+ if (false == ret)
+ {
+ return;
+ }
+
+ lock_session(session);
+
+ /* Reset idleness */
+ session->state.idle_state = IDLE_STATE_ACTIVE;
+ session->state.request_switchin = false;
+
+ err = enqueue_job(session);
+ if (MVE_BASE_ERROR_NONE != err)
+ {
+ goto out;
+ }
+
+ /* If session has a pending state change, then add stop/go message. It is important
+ * that this message is appended after the job message. */
+ if (session->state.pending_hw_state != MVE_BASE_HW_STATE_PENDING &&
+ session->state.hw_state != session->state.pending_hw_state)
+ {
+ uint32_t code = (session->state.pending_hw_state == MVE_BASE_HW_STATE_STOPPED) ? MVE_MESSAGE_CODE_STOP : MVE_MESSAGE_CODE_GO;
+
+ err = enqueue_message(session, code, 0, NULL);
+ if (MVE_BASE_ERROR_NONE != err)
+ {
+ goto out;
+ }
+ session->state.pending_hw_state = MVE_BASE_HW_STATE_PENDING;
+ }
+
+ if (STATE_SWITCHED_OUT == session->state.state)
+ {
+ /* Switching in session */
+ ret = change_session_state(session, STATE_SWITCHING_IN);
+ if (false != ret)
+ {
+ session->state.irqs = 0;
+ }
+ }
+ else
+ {
+ /* Switching in a session that is in the wrong state */
+ /* VIDDK-752: Remove this print */
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ MVE_LOG_ERROR,
+ session,
+ "Attempting to switch in a session that is not switched out. Current state %d",
+ session->state.state);
+ WARN_ON(true);
+ }
+
+ mver_irq_signal_mve((mver_session_id)session_id);
+
+out:
+ unlock_session(session);
+ release_session(session);
+}
+
+/**
+ * Callback function executed by the scheduler when the session has been switched
+ * out completely. This session will not execute until mve_session_switchin_callback
+ * has been called.
+ * @param session_id ID of the session that has been switched out.
+ */
+static void mve_session_switchout_completed_callback(mver_session_id session_id)
+{
+ struct mve_session *session = (struct mve_session *)session_id;
+
+ bool ret;
+
+ /* Take care of pending IRQs not handled by the IRQ handler automatically */
+ mve_session_irq_callback(session_id);
+
+ ret = acquire_session(session);
+ if (false == ret)
+ {
+ return;
+ }
+
+ lock_session(session);
+ ret = change_session_state(session, STATE_SWITCHED_OUT);
+ unlock_session(session);
+ release_session(session);
+}
+
+/**
+ * Callback function executed by the scheduler to retrieve the number of restricting
+ * buffers enqueued to the FW. Restricting buffers are output buffers for decoders
+ * and input buffers for encoders.
+ *
+ * Restricted buffers count is adjusted by amount of buffers which were processed by
+ * the FW but not yet released. Because these two values updated at different moment
+ * of time, potentially it could happen that return value would be negative. The
+ * caller should ignore this value then and treat the call as failed one.
+ *
+ * @param session_id ID of the session
+ * @return positive or zero amount of restricting buffers in case of success,
+ * negative value in case of failure
+ */
+static int mve_session_get_restricting_buffer_count_callback(mver_session_id session_id)
+{
+ struct mve_session *session = (struct mve_session *)session_id;
+ int buffers_cnt;
+ bool acquired = acquire_session(session);
+ if (false == acquired)
+ {
+ return -1;
+ }
+
+ /* Don't have to lock the session since we just want to get the number of buffers */
+ if (MVE_SESSION_TYPE_DECODER == session->session_type)
+ {
+ buffers_cnt = session->output_buffer_count;
+#if (1 == DVFS_DEBUG_MODE)
+ if (1 == atomic_read(&dvfs_b_enable))
+#endif
+ {
+ buffers_cnt -= session->buffer_on_hold_count;
+ }
+ }
+ else
+ {
+ buffers_cnt = session->input_buffer_count;
+ if (false != session->state.eos_queued && buffers_cnt < 2)
+ {
+ buffers_cnt = 2;
+ }
+ }
+
+ if (false != session->keep_freq_high)
+ {
+ buffers_cnt += 2;
+ }
+
+ release_session(session);
+
+ return buffers_cnt;
+}
+
+/*****************************************************************************
+ * External interface
+ *****************************************************************************/
+#if defined(_DEBUG) && !defined(EMULATOR)
+static int fw_trace_open(struct inode *inode, struct file *file)
+{
+#define TRACE_SIZE 128 * 1024
+ char *str;
+ struct list_head *pos, *next;
+ int num = 0;
+
+ str = MVE_RSRC_MEM_VALLOC(TRACE_SIZE);
+ if (NULL == str)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_session, MVE_LOG_ERROR, "Unable to allocate memory for FW trace buffer.");
+ return -EBUSY;
+ }
+ memset(str, 0, TRACE_SIZE);
+
+ list_for_each_safe(pos, next, &sessions)
+ {
+ struct mve_session *session = container_of(pos, struct mve_session, list);
+ int buffer_index = 0;
+ int core;
+
+ struct mve_com_trace_buffers *fw_trace = &session->fw_trace;
+
+ num += snprintf(str + num, TRACE_SIZE - num, "Session %p (%s):\n", session, session->role);
+
+ for (core = 0; core < fw_trace->num_cores; core++)
+ {
+ int rasc;
+
+ for (rasc = 0; rasc < 8 * sizeof(fw_trace->rasc_mask); rasc++)
+ {
+ if ((1 << rasc) & fw_trace->rasc_mask)
+ {
+ uint32_t rasc_addr = fw_trace->buffers[buffer_index].rasc_addr;
+
+ if (0 != rasc_addr)
+ {
+ uint32_t size = fw_trace->buffers[buffer_index].size;
+ uint32_t *buffer = (uint32_t *)MVE_RSRC_MEM_VALLOC(size);
+
+ if (NULL != buffer)
+ {
+ uint32_t i;
+
+ memset(buffer, 0, size);
+ mve_mmu_read_buffer(session->mmu_ctx, rasc_addr, (uint8_t *)buffer, size);
+
+ /* convert size to be in words from now on */
+ size >>= 2;
+ for (i = 0; i < size; )
+ {
+ uint32_t event = buffer[(i + 0) & (size - 1)] & 0xffff;
+ uint32_t param0 = (buffer[(i + 0) & (size - 1)] >> 16) & 0xff;
+ uint32_t nparams = buffer[(i + 0) & (size - 1)] >> 28;
+ uint32_t timestamp = buffer[(i + 1) & (size - 1)];
+
+ if (0 != event)
+ {
+ int j;
+
+ num += snprintf(str + num, TRACE_SIZE - num, "%d %d %08x %04x %02x",
+ core, rasc, timestamp, event, param0);
+
+ for (j = 0; j < ((int)nparams) - 1; j++)
+ {
+ uint32_t param = buffer[(i + 2 + j) & (size - 1)];
+ num += snprintf(str + num, TRACE_SIZE - num, " %08x", param);
+ }
+ num += snprintf(str + num, TRACE_SIZE - num, "\n");
+ }
+ i += (2 + nparams - 1);
+ }
+ MVE_RSRC_MEM_VFREE(buffer);
+ }
+ }
+
+ buffer_index++;
+ }
+ }
+ }
+ num += snprintf(str + num, TRACE_SIZE - num, "\n\n");
+ }
+
+ file->private_data = (void *)str;
+
+ return 0;
+#undef TRACE_SIZE
+}
+
+static int fw_trace_close(struct inode *inode, struct file *file)
+{
+ MVE_RSRC_MEM_VFREE(file->private_data);
+ return 0;
+}
+
+static ssize_t fw_trace_read(struct file *file, char __user *user_buffer, size_t count, loff_t *position)
+{
+ char *str = (char *)file->private_data;
+
+ return simple_read_from_buffer(user_buffer, count, position, str, strlen(str));
+}
+#endif
+
+void mve_session_init(struct device *dev)
+{
+ sema_init(&sessions_sem, 1);
+ num_sessions = 0;
+
+#ifndef DISABLE_WATCHDOG
+ /* Initialize watchdog thread */
+ watchdog_init();
+#endif
+
+#if defined(_DEBUG) && !defined(EMULATOR)
+ {
+ static const struct file_operations trace_fops =
+ {
+ .open = fw_trace_open,
+ .read = fw_trace_read,
+ .release = fw_trace_close
+ };
+ struct dentry *dentry, *parent_dentry;
+
+ parent_dentry = mve_rsrc_log_get_parent_dir();
+ dentry = debugfs_create_file("fw_trace", 0400, parent_dentry, NULL, &trace_fops);
+ if (IS_ERR_OR_NULL(dentry))
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_session, MVE_LOG_ERROR, "Unable to create debugfs file 'fw_trace'.");
+ }
+ }
+
+#if defined(CONFIG_SYSFS)
+ {
+ int i;
+
+ for (i = 0; i < NELEMS(sysfs_files); ++i)
+ {
+ int err = device_create_file(dev, &sysfs_files[i]);
+ if (err < 0)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_session, MVE_LOG_ERROR, "Unable to create sysfs file.");
+ }
+ }
+ }
+#endif /* CONFIG_SYSFS */
+#endif /* _DEBUG && !EMULATOR*/
+}
+
+void mve_session_deinit(struct device *dev)
+{
+#ifndef EMULATOR
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+ int i;
+ for (i = 0; i < NELEMS(sysfs_files); ++i)
+ {
+ device_remove_file(dev, &sysfs_files[i]);
+ }
+#endif
+#endif
+
+ mve_session_destroy_all_sessions();
+}
+
+void mve_session_destroy_all_sessions(void)
+{
+ struct list_head *pos, *next;
+
+ list_for_each_safe(pos, next, &sessions)
+ {
+ struct mve_session *ptr = container_of(pos, struct mve_session, list);
+
+ list_del(&ptr->list);
+ session_destroy_instance(ptr);
+ }
+}
+
+struct mve_session *mve_session_create(struct file *filep)
+{
+ struct mve_session *session;
+ struct mve_mem_virt_region region;
+ bool ret;
+ int sem_taken;
+ phys_addr_t *pages;
+
+ session = MVE_RSRC_MEM_ZALLOC(sizeof(struct mve_session), GFP_KERNEL);
+ if (NULL == session)
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ MVE_LOG_ERROR,
+ session,
+ "Failed to allocate memory to create a new session.");
+ return NULL;
+ }
+
+ session->mmu_ctx = mve_mmu_create_ctx();
+ if (NULL == session->mmu_ctx)
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ MVE_LOG_ERROR,
+ session,
+ "Failed to create a MMU context struct.");
+ MVE_RSRC_MEM_FREE(session);
+ return NULL;
+ }
+
+ session->filep = filep;
+ session->fw_loaded = false;
+ session->keep_freq_high = true;
+
+ session->state.state = STATE_SWITCHED_OUT;
+ session->state.hw_state = MVE_BASE_HW_STATE_STOPPED;
+ session->state.pending_hw_state = session->state.hw_state;
+ session->state.watchdog_state = WATCHDOG_IDLE;
+ session->state.ping_count = 0;
+ session->state.quick_flush_state = 0;
+ session->state.quick_flush_target = 0;
+ session->state.idle_state = IDLE_STATE_ACTIVE;
+ session->state.request_switchin = false;
+ session->state.request_switchout = false;
+ session->state.job_frame_size = 0;
+
+ do_gettimeofday(&session->state.timestamp);
+
+ session->role = MVE_RSRC_MEM_ZALLOC(sizeof(char) * MAX_STRINGNAME_SIZE, GFP_KERNEL);
+ if (NULL == session->role)
+ {
+ goto error;
+ }
+
+ session->prot_version = MVE_FW_PROTOCOL_VERSION_UNKNOWN;
+ session->rpc_in_progress = false;
+
+ /* Alloc and map pages for the message and buffer queues */
+ session->msg_in_queue = mve_rsrc_dma_mem_alloc(MVE_MMU_PAGE_SIZE, DMA_MEM_TYPE_UNCACHED);
+ session->msg_out_queue = mve_rsrc_dma_mem_alloc(MVE_MMU_PAGE_SIZE, DMA_MEM_TYPE_UNCACHED);
+ session->buf_input_in = mve_rsrc_dma_mem_alloc(MVE_MMU_PAGE_SIZE, DMA_MEM_TYPE_UNCACHED);
+ session->buf_input_out = mve_rsrc_dma_mem_alloc(MVE_MMU_PAGE_SIZE, DMA_MEM_TYPE_UNCACHED);
+ session->buf_output_in = mve_rsrc_dma_mem_alloc(MVE_MMU_PAGE_SIZE, DMA_MEM_TYPE_UNCACHED);
+ session->buf_output_out = mve_rsrc_dma_mem_alloc(MVE_MMU_PAGE_SIZE, DMA_MEM_TYPE_UNCACHED);
+ session->rpc_area = mve_rsrc_dma_mem_alloc(MVE_MMU_PAGE_SIZE, DMA_MEM_TYPE_UNCACHED);
+ if (NULL == session->msg_in_queue ||
+ NULL == session->msg_out_queue ||
+ NULL == session->buf_input_in ||
+ NULL == session->buf_input_out ||
+ NULL == session->buf_output_in ||
+ NULL == session->buf_output_out ||
+ NULL == session->rpc_area)
+ {
+ goto error;
+ }
+
+ /* Map message in queue */
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_MSG_IN_QUEUE, &region);
+ pages = mve_rsrc_dma_mem_get_pages(session->msg_in_queue);
+ ret = mve_mmu_map_pages(session->mmu_ctx, pages,
+ region.start, 1, ATTRIB_SHARED_RW, ACCESS_READ_WRITE, false);
+ if (false == ret)
+ {
+ goto error;
+ }
+ /* Map message out queue */
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_MSG_OUT_QUEUE, &region);
+ pages = mve_rsrc_dma_mem_get_pages(session->msg_out_queue);
+ ret = mve_mmu_map_pages(session->mmu_ctx, pages,
+ region.start, 1, ATTRIB_SHARED_RW, ACCESS_READ_WRITE, false);
+ if (false == ret)
+ {
+ goto error;
+ }
+ /* Map input buffer in queue */
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_INPUT_BUFFER_IN, &region);
+ pages = mve_rsrc_dma_mem_get_pages(session->buf_input_in);
+ ret = mve_mmu_map_pages(session->mmu_ctx, pages,
+ region.start, 1, ATTRIB_SHARED_RW, ACCESS_READ_WRITE, false);
+ if (false == ret)
+ {
+ goto error;
+ }
+ /* Map input buffer out queue */
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_INPUT_BUFFER_OUT, &region);
+ pages = mve_rsrc_dma_mem_get_pages(session->buf_input_out);
+ ret = mve_mmu_map_pages(session->mmu_ctx, pages,
+ region.start, 1, ATTRIB_SHARED_RW, ACCESS_READ_WRITE, false);
+ if (false == ret)
+ {
+ goto error;
+ }
+ /* Map output buffer in queue */
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_OUTPUT_BUFFER_IN, &region);
+ pages = mve_rsrc_dma_mem_get_pages(session->buf_output_in);
+ ret = mve_mmu_map_pages(session->mmu_ctx, pages,
+ region.start, 1, ATTRIB_SHARED_RW, ACCESS_READ_WRITE, false);
+ if (false == ret)
+ {
+ goto error;
+ }
+ /* Map output buffer out queue */
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_OUTPUT_BUFFER_OUT, &region);
+ pages = mve_rsrc_dma_mem_get_pages(session->buf_output_out);
+ ret = mve_mmu_map_pages(session->mmu_ctx, pages,
+ region.start, 1, ATTRIB_SHARED_RW, ACCESS_READ_WRITE, false);
+ if (false == ret)
+ {
+ goto error;
+ }
+ /* Map RPC area */
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_RPC_QUEUE, &region);
+ pages = mve_rsrc_dma_mem_get_pages(session->rpc_area);
+ ret = mve_mmu_map_pages(session->mmu_ctx, pages,
+ region.start, 1, ATTRIB_SHARED_RW, ACCESS_READ_WRITE, false);
+ if (false == ret)
+ {
+ goto error;
+ }
+
+ session->mve_events = mve_queue_create(16384);
+ session->response_events = mve_queue_create(8192);
+ if (NULL == session->mve_events || NULL == session->response_events)
+ {
+ goto error;
+ }
+
+ session->output_buffer_count = 0;
+ session->input_buffer_count = 0;
+ session->pending_response_count = 0;
+ session->buffer_on_hold_count = 0;
+ session->pending_buffer_on_hold_count = 0;
+ session->state.eos_queued = false;
+
+ INIT_LIST_HEAD(&session->queued_input_buffers);
+ INIT_LIST_HEAD(&session->queued_output_buffers);
+
+ INIT_LIST_HEAD(&session->external_buffers);
+ INIT_LIST_HEAD(&session->quick_flush_buffers);
+ session->next_mve_handle = 1;
+
+ INIT_LIST_HEAD(&session->list);
+ sema_init(&session->semaphore, 1);
+
+ kref_init(&session->refcount);
+
+ /* Add session to the list of sessions */
+ sem_taken = down_interruptible(&sessions_sem);
+ list_add(&session->list, &sessions);
+
+ num_sessions++;
+
+ if (2 == num_sessions)
+ {
+ /* Creating this session changed from single to multi session. If
+ * the other session is running an infinite job, switch it out
+ * and reschedule using finite jobs */
+ struct list_head *pos;
+
+ list_for_each(pos, &sessions)
+ {
+ struct mve_session *ptr = container_of(pos, struct mve_session, list);
+
+ if (session != ptr)
+ {
+ /* We have found the session to switch out. Add a reference
+ * to the session and release the sessions_sem */
+ kref_get(&ptr->refcount);
+ if (0 == sem_taken)
+ {
+ up(&sessions_sem);
+ sem_taken = -1;
+ }
+
+ lock_session(ptr);
+ /* Infinite job. Set the switchin flag and switch it out! */
+ ptr->state.request_switchin = true;
+ switchout_session(ptr);
+ unlock_session(ptr);
+ release_session(ptr);
+ break;
+ }
+ }
+ }
+
+ if (0 == sem_taken)
+ {
+ up(&sessions_sem);
+ }
+
+ mver_pm_request_resume();
+
+#ifndef DISABLE_WATCHDOG
+ watchdog_create_session();
+#endif
+
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_INFO, session, "Session created.");
+
+ return session;
+
+error:
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ MVE_LOG_ERROR,
+ session,
+ "An error occurred during session creation.");
+
+ mve_queue_destroy(session->response_events);
+ mve_queue_destroy(session->mve_events);
+
+ if (NULL != session->msg_out_queue)
+ {
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_MSG_OUT_QUEUE, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+ mve_rsrc_dma_mem_free(session->msg_out_queue);
+ }
+ if (NULL != session->msg_in_queue)
+ {
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_MSG_IN_QUEUE, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+ mve_rsrc_dma_mem_free(session->msg_in_queue);
+ }
+ if (NULL != session->buf_input_in)
+ {
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_INPUT_BUFFER_IN, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+ mve_rsrc_dma_mem_free(session->buf_input_in);
+ }
+ if (NULL != session->buf_input_out)
+ {
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_INPUT_BUFFER_OUT, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+ mve_rsrc_dma_mem_free(session->buf_input_out);
+ }
+ if (NULL != session->buf_output_in)
+ {
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_OUTPUT_BUFFER_IN, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+ mve_rsrc_dma_mem_free(session->buf_output_in);
+ }
+ if (NULL != session->buf_output_out)
+ {
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_OUTPUT_BUFFER_OUT, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+ mve_rsrc_dma_mem_free(session->buf_output_out);
+ }
+ if (NULL != session->rpc_area)
+ {
+ mve_mem_virt_region_get(MVE_FW_PROTOCOL_VERSION_1_0, VIRT_MEM_REGION_RPC_QUEUE, &region);
+ mve_mmu_unmap_pages(session->mmu_ctx, region.start);
+ mve_rsrc_dma_mem_free(session->rpc_area);
+ }
+
+ if (NULL != session->role)
+ {
+ MVE_RSRC_MEM_FREE(session->role);
+ }
+ if (NULL != session->mmu_ctx)
+ {
+ mve_mmu_destroy_ctx(session->mmu_ctx);
+ }
+
+ MVE_RSRC_MEM_FREE(session);
+
+ return NULL;
+}
+
+void mve_session_destroy(struct mve_session *session)
+{
+ bool ret;
+ int sem_taken;
+
+ sem_taken = down_interruptible(&sessions_sem);
+ ret = is_session_valid(session);
+ if (false == ret)
+ {
+ if (0 == sem_taken)
+ {
+ up(&sessions_sem);
+ }
+ return;
+ }
+
+ /* Remove the session from the sessions list. This marks the session as
+ * invalid. */
+ list_del(&session->list);
+ if (0 == sem_taken)
+ {
+ up(&sessions_sem);
+ }
+
+ session_destroy_instance(session);
+}
+
+struct mve_session *mve_session_get_by_file(struct file *filep)
+{
+ struct list_head *pos, *next;
+ struct mve_session *session = NULL;
+ int sem_taken;
+
+ sem_taken = down_interruptible(&sessions_sem);
+
+ list_for_each_safe(pos, next, &sessions)
+ {
+ struct mve_session *ptr = container_of(pos, struct mve_session, list);
+
+ if (filep == ptr->filep)
+ {
+ session = ptr;
+ break;
+ }
+ }
+
+ if (0 == sem_taken)
+ {
+ up(&sessions_sem);
+ }
+
+ return session;
+}
+
+void mve_session_cleanup_client(struct file *filep)
+{
+ struct mve_session *session;
+
+ session = mve_session_get_by_file(filep);
+ if (NULL != session)
+ {
+ int sem_taken = down_interruptible(&sessions_sem);
+ list_del(&session->list);
+ if (0 == sem_taken)
+ {
+ up(&sessions_sem);
+ }
+
+ session_destroy_instance(session);
+ }
+}
+
+void mve_session_destroy_all(void)
+{
+ struct list_head *pos, *next;
+
+ list_for_each_safe(pos, next, &sessions)
+ {
+ struct mve_session *ptr = container_of(pos, struct mve_session, list);
+
+ int sem_taken = down_interruptible(&sessions_sem);
+ list_del(&ptr->list);
+ if (0 == sem_taken)
+ {
+ up(&sessions_sem);
+ }
+
+ session_destroy_instance(ptr);
+ }
+}
+
+void mve_session_set_role(struct mve_session *session, char *role)
+{
+ bool ret;
+
+ if (NULL == role)
+ {
+ return;
+ }
+
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session,
+ MVE_LOG_INFO,
+ session,
+ "Setting session role to %s.",
+ role);
+
+ ret = acquire_session(session);
+ if (false == ret)
+ {
+ return;
+ }
+
+ if (true == session->fw_loaded)
+ {
+ WARN_ON(true == session->fw_loaded);
+ }
+ else
+ {
+ void *ptr;
+
+ strncpy(session->role, role, MAX_STRINGNAME_SIZE);
+ /* Make sure the string is NULL terminated */
+ session->role[MAX_STRINGNAME_SIZE - 1] = '\0';
+
+ ptr = strstr(session->role, "decoder");
+ if (NULL != ptr)
+ {
+ session->session_type = MVE_SESSION_TYPE_DECODER;
+ }
+ else
+ {
+ session->session_type = MVE_SESSION_TYPE_ENCODER;
+ }
+ }
+
+ release_session(session);
+}
+
+bool mve_session_activate(struct mve_session *session, uint32_t *version,
+ struct mve_base_fw_secure_descriptor *fw_secure_desc)
+{
+ phys_addr_t l1_page;
+ bool ret;
+ int ncores;
+
+ ret = acquire_session(session);
+ if (false == ret)
+ {
+ return false;
+ }
+
+ ncores = mver_scheduler_get_ncores();
+
+ /* generate secure firmware instance with pre-created l2pages; or
+ * just load regular firmware if secure l2pages are not available */
+ session->fw_inst = mve_fw_load(session->mmu_ctx, fw_secure_desc, session->role, ncores);
+ if (NULL != session->fw_inst)
+ {
+ mve_base_error error;
+ struct mve_base_fw_version *ver;
+
+ session->fw_loaded = true;
+ ver = mve_fw_get_version(session->fw_inst);
+ if (NULL != ver)
+ {
+ session->prot_version = ver->major - 1;
+ if (NULL != version)
+ {
+ *version = ver->major << 8 | ver->minor;
+ }
+ }
+ else
+ {
+ session->prot_version = MVE_FW_PROTOCOL_VERSION_UNKNOWN;
+ }
+ error = mve_com_set_interface_version(session, session->prot_version);
+ mve_fw_log_fw_binary(session->fw_inst, session);
+
+ if (error == MVE_BASE_ERROR_NONE)
+ {
+ l1_page = mve_mmu_get_id(session->mmu_ctx);
+ mver_scheduler_register_session((mver_session_id)session,
+ mve_mmu_make_l0_entry(ATTRIB_PRIVATE, l1_page),
+ ncores,
+ mve_fw_secure(session->fw_inst),
+ mve_session_irq_callback,
+ mve_session_has_work_callback,
+ mve_session_switchout_callback,
+ mve_session_switchin_callback,
+ mve_session_switchout_completed_callback,
+ mve_session_get_restricting_buffer_count_callback);
+
+ ret = true;
+ }
+ else
+ {
+ ret = false;
+ }
+ }
+ else
+ {
+ ret = false;
+ }
+
+ release_session(session);
+ return ret;
+}
+
+mve_base_error mve_session_enqueue_flush_buffers(struct mve_session *session, uint32_t flush)
+{
+ bool res;
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+
+ res = acquire_session(session);
+ if (false == res)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ lock_session(session);
+ /* Make sure all messages from the FW have been handled */
+ handle_mve_message(session);
+
+ if (WATCHDOG_TIMEOUT == session->state.watchdog_state)
+ {
+ ret = MVE_BASE_ERROR_HARDWARE;
+ goto out;
+ }
+
+ if (0 == (flush & MVE_BASE_FLUSH_ALL_PORTS))
+ {
+ ret = MVE_BASE_ERROR_BAD_PARAMETER;
+ goto out;
+ }
+
+ if (0 != (flush & MVE_BASE_FLUSH_INPUT_PORT))
+ {
+ ret = enqueue_message(session, MVE_MESSAGE_CODE_INPUT_FLUSH, 0, NULL);
+ mver_irq_signal_mve((mver_session_id)session);
+ return_all_pending_buffers_to_userspace(session, &session->queued_input_buffers, true);
+ }
+
+ if (0 != (flush & MVE_BASE_FLUSH_OUTPUT_PORT))
+ {
+ ret |= enqueue_message(session, MVE_MESSAGE_CODE_OUTPUT_FLUSH, 0, NULL);
+ mver_irq_signal_mve((mver_session_id)session);
+ return_all_pending_buffers_to_userspace(session, &session->queued_output_buffers, false);
+ }
+
+ if (MVE_BASE_ERROR_NONE == ret && 0 != (flush & MVE_BASE_FLUSH_QUICK))
+ {
+ session->state.quick_flush_state = 0;
+ session->state.quick_flush_target = flush;
+ }
+
+out:
+ unlock_session(session);
+ if (MVE_BASE_ERROR_NONE == ret)
+ {
+ schedule_session(session);
+ }
+ release_session(session);
+
+ return ret;
+}
+
+mve_base_error mve_session_enqueue_state_change(struct mve_session *session, enum mve_base_hw_state state)
+{
+ bool res;
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+
+ if (MVE_BASE_HW_STATE_STOPPED != state &&
+ MVE_BASE_HW_STATE_RUNNING != state)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ res = acquire_session(session);
+ if (false == res)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ lock_session(session);
+
+ /* Make sure all messages from the FW have been handled */
+ handle_mve_message(session);
+
+ if (WATCHDOG_TIMEOUT == session->state.watchdog_state)
+ {
+ ret = MVE_BASE_ERROR_HARDWARE;
+ goto out;
+ }
+
+ if (session->state.pending_hw_state == MVE_BASE_HW_STATE_PENDING)
+ {
+ ret = MVE_BASE_ERROR_HARDWARE;
+ goto out;
+ }
+
+ /* Remember requested state. The go/stop command can only be safely added to the message
+ * queue just before the session is switched in, in order to guarantee that the go/stop
+ * command is preceded by a job command. */
+ session->state.pending_hw_state = state;
+
+#if SCHEDULER_MODE_IDLE_SWITCHOUT != 1
+ /* The state change implementation always defers state changes to immediately
+ * after a switch in. This is normally not a problem since the FW will report
+ * idleness which causes the session to switch out and because of the pending
+ * state change, the session will be switched in again. The problem is when
+ * idle switchout has been disabled. In this case, the driver will not switch
+ * out idle sessions and the pending state change will never be handled. If
+ * idle switchout is disabled, the driver must force switchout of the session
+ * here. */
+ switchout_session(session);
+#endif
+
+ /* Unlock the session before any calls to the scheduler, to avoid deadlocks if the scheduler
+ * calls one of the session callbacks. */
+ unlock_session(session);
+ schedule_session(session);
+
+ release_session(session);
+
+ return ret;
+
+out:
+ unlock_session(session);
+ release_session(session);
+
+ return ret;
+}
+
+struct mve_base_event_header *mve_session_get_event(struct mve_session *session, uint32_t timeout)
+{
+ struct mve_base_event_header *event = NULL;
+ bool ret;
+
+ ret = acquire_session(session);
+ if (false == ret)
+ {
+ return NULL;
+ }
+
+ if (true == session->fw_loaded)
+ {
+ event = mve_queue_wait_for_event(session->mve_events, timeout);
+ }
+
+ release_session(session);
+
+ return event;
+}
+
+mve_base_error mve_session_set_paramconfig(struct mve_session *session,
+ uint32_t size,
+ void *data,
+ uint32_t *firmware_error,
+ enum MVE_ENTITY entity)
+{
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+ bool res;
+ int msg;
+ int response_count;
+
+ if (NULL == data)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ res = acquire_session(session);
+ if (false == res)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ if (WATCHDOG_TIMEOUT == session->state.watchdog_state)
+ {
+ ret = MVE_BASE_ERROR_HARDWARE;
+ release_session(session);
+ return ret;
+ }
+
+ res = schedule_session(session);
+ if (false == res)
+ {
+ ret = MVE_BASE_ERROR_NOT_READY;
+ release_session(session);
+ return ret;
+ }
+
+ lock_session(session);
+
+ if (STATE_ACTIVE_JOB != session->state.state &&
+ STATE_WAITING_FOR_RESPONSE != session->state.state &&
+ STATE_SWITCHING_IN != session->state.state)
+ {
+ ret = MVE_BASE_ERROR_NOT_READY;
+ goto out;
+ }
+
+ if (PARAMETER == entity)
+ {
+ msg = MVE_MESSAGE_CODE_SET_PARAMETER;
+ }
+ else
+ {
+ msg = MVE_MESSAGE_CODE_SET_CONFIG;
+ }
+
+ change_session_state(session, STATE_WAITING_FOR_RESPONSE);
+
+ /* Remember the current number of messages we are waiting for. */
+ response_count = session->pending_response_count;
+
+ /* Enqueue message to firmware and send interrupt. */
+ ret = enqueue_message(session, msg, size, data);
+ mver_irq_signal_mve((mver_session_id)session);
+
+ /* Calculate the number of confirms that should be received. */
+ response_count = session->pending_response_count - response_count;
+ if (MVE_BASE_ERROR_NONE == ret)
+ {
+ struct set_response
+ {
+ uint32_t index;
+ uint32_t error;
+ };
+
+ struct mve_base_event_header *event = NULL;
+ struct set_response *response;
+
+ /* Wait for the response message. First unlock the session semaphore or
+ * the interrupt handler will not be able to process the incoming message.
+ * The timeout is set to 10 seconds because once a message is placed
+ * in the message queue, it's not possible to remove it. If we don't
+ * wait until the message has been received, the next get/set parameter/config
+ * call will receive it and regard that message as the response to the one
+ * it just put into the message queue. */
+ while (response_count-- > 0)
+ {
+ unlock_session(session);
+ event = mve_queue_wait_for_event(session->response_events, 10000);
+ lock_session(session);
+
+ if (NULL == event)
+ {
+ if (false != mve_queue_interrupted(session->response_events))
+ {
+ ret = MVE_BASE_ERROR_HARDWARE;
+ }
+ else
+ {
+ ret = MVE_BASE_ERROR_NOT_READY;
+ }
+ change_session_state(session, STATE_ACTIVE_JOB);
+ goto out;
+ }
+
+ if (MVE_BASE_EVENT_SET_PARAMCONFIG != event->code ||
+ sizeof(struct set_response) != event->size)
+ {
+ ret = MVE_BASE_ERROR_HARDWARE;
+ MVE_RSRC_MEM_CACHE_FREE(event, EVENT_SIZE(event->size));
+ goto out;
+ }
+
+ /* Extract error code */
+ response = (struct set_response *)event->data;
+ if (response->error != 0)
+ {
+ *firmware_error = response->error;
+ ret = MVE_BASE_ERROR_FIRMWARE;
+ }
+
+ MVE_RSRC_MEM_CACHE_FREE(event, EVENT_SIZE(event->size));
+ }
+
+ /* Flag response as received. Some set parameters in the v2 host interface
+ * don't result in any response. Revert back to the old state in that case. */
+ if (STATE_WAITING_FOR_RESPONSE == session->state.state)
+ {
+ response_received(session);
+ }
+ }
+ else
+ {
+ if (response_count == 0)
+ {
+ *firmware_error = ret;
+ ret = MVE_BASE_ERROR_FIRMWARE;
+ }
+ change_session_state(session, STATE_ACTIVE_JOB);
+ }
+
+out:
+ unlock_session(session);
+ release_session(session);
+ return ret;
+}
+
+mve_base_error mve_session_get_paramconfig(struct mve_session *session,
+ struct mve_base_command_header *command,
+ void *data,
+ struct mve_response *response,
+ enum MVE_ENTITY entity)
+{
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+ struct mve_base_event_header *event = NULL;
+ struct mve_get_reply *event_response;
+ bool res;
+ uint32_t msg;
+
+ struct mve_get_reply
+ {
+ uint32_t index;
+ uint32_t return_code;
+ uint32_t data[1];
+ };
+
+ res = acquire_session(session);
+ if (false == res)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ if (WATCHDOG_TIMEOUT == session->state.watchdog_state)
+ {
+ ret = MVE_BASE_ERROR_HARDWARE;
+ goto out1;
+ }
+
+ res = schedule_session(session);
+ if (false == res)
+ {
+ ret = MVE_BASE_ERROR_NOT_READY;
+ goto out1;
+ }
+
+ lock_session(session);
+ if (STATE_ACTIVE_JOB != session->state.state &&
+ STATE_WAITING_FOR_RESPONSE != session->state.state &&
+ STATE_SWITCHING_IN != session->state.state)
+ {
+ ret = MVE_BASE_ERROR_NOT_READY;
+ goto out2;
+ }
+
+ if (PARAMETER == entity)
+ {
+ msg = MVE_MESSAGE_CODE_GET_PARAMETER;
+ }
+ else
+ {
+ msg = MVE_MESSAGE_CODE_GET_CONFIG;
+ }
+
+ change_session_state(session, STATE_WAITING_FOR_RESPONSE);
+ ret = enqueue_message(session, msg, command->size, data);
+ mver_irq_signal_mve((mver_session_id)session);
+
+ if (MVE_BASE_ERROR_NONE != ret)
+ {
+ change_session_state(session, STATE_ACTIVE_JOB);
+ goto out2;
+ }
+
+ /* Wait for the response message. First unlock the session semaphore or
+ * the interrupt handler will not be able to process the incoming message.
+ * The timeout is set to 10 seconds because once a message is placed
+ * in the message queue, it's not possible to remove it. If we don't
+ * wait until the message has been received, the next get/set parameter/config
+ * call will receive it and regard that message as the response to the one
+ * it just put into the message queue. */
+ unlock_session(session);
+ event = mve_queue_wait_for_event(session->response_events, 10000);
+ lock_session(session);
+
+ if (NULL == event)
+ {
+ /* According to the OMX spec, the client shall retry the call if
+ * MVE_BASE_ERROR_NOT_READY is returned. */
+ ret = MVE_BASE_ERROR_NOT_READY;
+ change_session_state(session, STATE_ACTIVE_JOB);
+ goto out2;
+ }
+
+ /* Flag response as received. Firmware v2 fake return messages. Revert
+ * back to the old state if needed. */
+ if (STATE_WAITING_FOR_RESPONSE == session->state.state)
+ {
+ response_received(session);
+ }
+
+ /* Request Response
+ * +-------------+ +-------------+
+ * | Code | | Code |
+ * +-------------+ +-------------+
+ * | Size | | Size' |
+ * +-------------+ ^ +-------------+ ^
+ * | OMX index | | | OMX index | |
+ * +-------------+ | Size +-------------+ |
+ * | OMX message | | | Return code | | Size' = Size + 4
+ * +-------------+ v +-------------+ |
+ * | OMX message | |
+ * +-------------+ V
+ */
+ event_response = (struct mve_get_reply *)event->data;
+
+ if (MVE_BASE_EVENT_GET_PARAMCONFIG != event->code || (command->size + sizeof(uint32_t)) != event->size)
+ {
+ /* The type of message or message size didn't match the expected */
+ ret = MVE_BASE_ERROR_HARDWARE;
+ goto out2;
+ }
+
+ response->firmware_error = event_response->return_code;
+ if (0 != event_response->return_code)
+ {
+ ret = MVE_BASE_ERROR_FIRMWARE;
+ goto out2;
+ }
+
+ /* Remove size of 'OMX index' and 'return code'. */
+ response->size = event->size - 2 * sizeof(uint32_t);
+
+ /* Firmware adds a 4 byte return code to the response message. */
+ response->data = MVE_RSRC_MEM_CACHE_ALLOC(response->size, GFP_KERNEL);
+ if (NULL == response->data)
+ {
+ ret = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ goto out2;
+ }
+
+ memcpy(response->data, &event_response->data, response->size);
+
+out2:
+ if (NULL != event)
+ {
+ MVE_RSRC_MEM_CACHE_FREE(event, EVENT_SIZE(event->size));
+ }
+
+ unlock_session(session);
+
+out1:
+ release_session(session);
+
+ return ret;
+}
+
+mve_base_error mve_session_buffer_register(struct mve_session *session,
+ uint32_t port_index,
+ struct mve_base_buffer_userspace *descriptor)
+{
+ mve_base_error ret;
+ bool res;
+
+ if (NULL == descriptor)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ if (MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC == descriptor->format ||
+ MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC_10B == descriptor->format)
+ {
+ uint32_t fuse = mver_reg_get_fuse();
+ if (fuse & (1 << CORESCHED_FUSE_DISABLE_AFBC))
+ {
+ return MVE_BASE_ERROR_NOT_IMPLEMENTED;
+ }
+ }
+
+ res = acquire_session(session);
+ if (false == res)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ lock_session(session);
+ ret = mve_session_buffer_register_internal(session, port_index, descriptor);
+ unlock_session(session);
+ release_session(session);
+
+ return ret;
+}
+
+mve_base_error mve_session_buffer_unregister(struct mve_session *session,
+ mve_base_buffer_handle_t buffer_id)
+{
+ mve_base_error ret;
+ bool res;
+
+ res = acquire_session(session);
+ if (false == res)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ lock_session(session);
+ ret = mve_session_buffer_unregister_internal(session, buffer_id);
+ unlock_session(session);
+ release_session(session);
+
+ return ret;
+}
+
+mve_base_error mve_session_buffer_enqueue(struct mve_session *session,
+ struct mve_base_buffer_details *param,
+ bool empty_this_buffer)
+{
+ mve_base_error ret;
+ bool res;
+
+ res = acquire_session(session);
+ if (false == res)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ lock_session(session);
+ /* Make sure all messages from the FW have been handled */
+ handle_mve_message(session);
+
+ if (false != empty_this_buffer)
+ {
+ session->state.eos_queued = ((param->flags & OMX_BUFFERFLAG_EOS) == OMX_BUFFERFLAG_EOS);
+ if (false != session->state.eos_queued)
+ {
+ MVE_LOG_PRINT_SESSION(&mve_rsrc_log_session, MVE_LOG_DEBUG, session, "Setting eos_queued.");
+ }
+ }
+ else
+ {
+ session->keep_freq_high = false;
+ }
+
+ ret = mve_session_buffer_enqueue_internal(session, param, empty_this_buffer);
+
+ /* Queue the request and return success. */
+ if (MVE_BASE_ERROR_TIMEOUT == ret)
+ {
+ struct session_enqueue_request *request;
+
+ /* Allocate struct and store arguments. */
+ request = MVE_RSRC_MEM_ZALLOC(sizeof(*request), GFP_KERNEL);
+ if (NULL == request)
+ {
+ ret = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ goto exit;
+ }
+
+ request->param = *param;
+
+ /* Add request last in queue. */
+ if (true == empty_this_buffer)
+ {
+ list_add_tail(&request->list, &session->queued_input_buffers);
+ }
+ else
+ {
+ list_add_tail(&request->list, &session->queued_output_buffers);
+ }
+
+ ret = MVE_BASE_ERROR_NONE;
+ }
+
+ if (MVE_BASE_ERROR_NONE == ret)
+ {
+ /* Reset the process to detect idleness and request that this session is
+ * switched in again if a switch out has already started. */
+ session->state.idle_state = IDLE_STATE_ACTIVE;
+ session->state.request_switchin = true;
+ }
+
+exit:
+ unlock_session(session);
+ res = schedule_session(session);
+ if (false != res)
+ {
+ mver_irq_signal_mve((mver_session_id)session);
+ }
+ release_session(session);
+
+ return ret;
+}
+
+mve_base_error mve_session_buffer_notify_ref_frame_release(struct mve_session *session,
+ mve_base_buffer_handle_t buffer_id)
+{
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+ bool res;
+
+ res = acquire_session(session);
+ if (false == res)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ lock_session(session);
+ mve_session_buffer_notify_ref_frame_release_internal(session, buffer_id);
+ unlock_session(session);
+ res = schedule_session(session);
+ if (false != res)
+ {
+ mver_irq_signal_mve((mver_session_id)session);
+ }
+ release_session(session);
+
+ return ret;
+}
+
+mve_base_error mve_session_enqueue_message(struct mve_session *session,
+ uint32_t code,
+ uint32_t size,
+ void *data)
+{
+ mve_base_error ret;
+ bool res;
+
+ res = acquire_session(session);
+ if (false == res)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ lock_session(session);
+ ret = enqueue_message(session, code, size, data);
+ unlock_session(session);
+ schedule_session(session);
+ release_session(session);
+
+ return ret;
+}
+
+#ifndef EMULATOR
+uint32_t mve_session_poll(struct file *filp, struct poll_table_struct *poll_table)
+{
+ struct mve_session *session;
+ uint32_t mask = 0;
+
+ session = mve_session_get_by_file(filp);
+ if (NULL != session)
+ {
+ lock_session(session);
+
+ poll_wait(filp, &session->mve_events->wait_queue, poll_table);
+
+ if (false != mve_queue_event_ready(session->mve_events))
+ {
+ mask |= POLLIN | POLLRDNORM;
+ }
+
+ unlock_session(session);
+ }
+
+ return mask;
+}
+#endif
+
+#ifdef UNIT
+#ifndef DISABLE_WATCHDOG
+void mve_session_firmware_hung_simulation(struct mve_session *session, uint32_t on)
+{
+ if (1 == on)
+ {
+ watchdog_ignore_pong = true;
+ }
+ else
+ {
+ watchdog_ignore_pong = false;
+ }
+}
+#endif
+#endif
diff --git a/drivers/video/arm/v5xx/base/mve_session.h b/drivers/video/arm/v5xx/base/mve_session.h
new file mode 100644
index 000000000000..8423d5543c61
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_session.h
@@ -0,0 +1,426 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_SESSION_H
+#define MVE_SESSION_H
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/time.h>
+#include <linux/semaphore.h>
+#include <linux/poll.h>
+#endif
+
+#include "mve_base.h"
+#include "mve_mmu.h"
+#include "mve_fw.h"
+#include "mve_queue.h"
+#include "mve_buffer.h"
+#include "mve_command.h"
+#include "mve_com.h"
+
+#include "mve_rsrc_scheduler.h"
+#include "mve_rsrc_mem_dma.h"
+
+/**
+ * Describing the session state.
+ */
+enum MVE_SESSION_STATE
+{
+ STATE_SWITCHED_OUT, /**< Session is switched out and doesn't have an allocated LSID */
+ STATE_SWITCHING_IN, /**< Session is in the process of being switched in */
+ STATE_SWITCHING_OUT, /**< Session is in the process of being switched out */
+ STATE_WAITING_FOR_RESPONSE, /**< Session is waiting for response from e.g. a get/set call */
+ STATE_ACTIVE_JOB, /**< Session is active, decoding/encoding a stream */
+};
+
+/**
+ * Describing the session watchdog state.
+ */
+enum MVE_WATCHDOG_STATE
+{
+ WATCHDOG_IDLE, /**< Watchdog idle */
+ WATCHDOG_PINGED, /**< Watchdog pinged the session and wait for response */
+ WATCHDOG_PONGED, /**< MVE responsed ping with pong */
+ WATCHDOG_TIMEOUT, /**< Watchdog timeout for ping response */
+};
+
+enum MVE_IDLE_STATE
+{
+ IDLE_STATE_ACTIVE, /**< Session has not received any idle messages */
+ IDLE_STATE_PENDING, /**< Session has received one idle message but the FW can still have work to do */
+ IDLE_STATE_IDLE_PENDING, /**< Session has received second idle message but the FW can still have work to do */
+ IDLE_STATE_IDLE, /**< Session has received three idle messages in a row without the client sending
+ * any other messages in between. This session can now be switched out. */
+};
+
+/* Used in get/set parameter/config calls to hint whether the client tries
+ * to set/get a parameter or config. */
+enum MVE_ENTITY {PARAMETER, CONFIG};
+
+/**
+ * This structure describes the state of the session
+ */
+struct mve_state
+{
+ enum MVE_SESSION_STATE state; /**< The current session state */
+ uint32_t irqs; /**< Interrupt counter */
+
+ enum mve_base_hw_state hw_state; /**< Track the hw state */
+ enum mve_base_hw_state pending_hw_state; /**< Pending hardware state that will be requested
+ * next time the session switches in. */
+
+ enum MVE_WATCHDOG_STATE watchdog_state; /**< The current watchdog state */
+ uint32_t ping_count; /**< Watchdog ping counter, the hardware
+ * is considered to be dead when it reached
+ * max counter without ponged*/
+ struct timeval timestamp; /**< Timestamp marking the most recent
+ * respons from the FW. This is used by the
+ * watchdog to decide when the FW
+ * should be pinged. */
+ enum MVE_IDLE_STATE idle_state; /**< Describing the FW idle state */
+ bool request_switchin; /**< This member marks that the session should be
+ * switched in upon switchout */
+ bool request_switchout; /**< This member marks that the sessions should be
+ * switched out as soon as possible */
+ int job_frame_size; /**< Size of the last queued job */
+
+ enum mve_base_flush quick_flush_state; /**< The current quick flush state */
+ enum mve_base_flush quick_flush_target; /**< When the current quick flush state
+ * has reached this state then we're
+ * done */
+ bool eos_queued; /**< Indicates last incoming buffer has EOS flag */
+};
+
+enum mve_session_type
+{
+ MVE_SESSION_TYPE_DECODER = 0,
+ MVE_SESSION_TYPE_ENCODER
+};
+
+/**
+ * Structure representing a video session.
+ */
+struct mve_session
+{
+ struct file *filep; /**< struct file * of the file descriptor used
+ * to create this session. When a file descriptor
+ * is closed, this member is used to identify all
+ * sessions that must be destroyed to avoid memory
+ * leaks. */
+
+ struct mve_mmu_ctx *mmu_ctx; /**< MMU context */
+
+ struct mve_state state; /**< Session state */
+ bool fw_loaded; /**< Has a firmware been loaded for this session? */
+
+ char *role; /**< The OMX role */
+ struct mve_fw_instance *fw_inst; /**< The loaded firmware instance */
+
+ enum mve_session_type session_type; /**< Decoder or encoder */
+
+ uint32_t output_buffer_count; /**< Number of output buffers currently owned by the firmware. */
+ uint32_t input_buffer_count; /**< Number of input buffers currently owned by the firmware. */
+ uint32_t pending_response_count; /**< Number of pending responses from the firmware. */
+ uint32_t buffer_on_hold_count; /**< Number of buffers that the firmware has completed process of,
+ * but is holding on to for frame reordering. */
+ uint32_t pending_buffer_on_hold_count; /**< Some FW versions signal the DPB in the form of a message
+ * that will be valid once the next frame has been returned
+ * by the FW. In these cases, the new DPB is stored in this
+ * variable and copied to buffer_on_hold_count once the next
+ * buffer arrives. */
+ struct list_head queued_input_buffers; /**< Input buffers that have not yet been added to firmware queue. */
+ struct list_head queued_output_buffers; /**< Output buffers that have not yet been added to firmware queue. */
+
+ struct list_head external_buffers; /**< External buffer mappings */
+ struct list_head quick_flush_buffers; /**< List of quick flush buffers */
+
+ uint16_t next_mve_handle; /**< Each external buffer is assigned a session
+ * unique 16-bit identifier. This ID is used to
+ * match all buffers returned from the MVE with
+ * the corresponding mve_buffer_client instance. The
+ * next_buffer_handle is incremented for each
+ * registered buffer and the value is assigned
+ * to the corresponding mve_buffer_client instance. */
+
+ struct mve_com *com; /**< Firmware communication. */
+ enum mve_fw_protocol_version prot_version; /**< FW host interface protocol version */
+ bool rpc_in_progress; /**< RPC irq being processed, useful for async rpc memory operations */
+
+ struct mve_rsrc_dma_mem_t *msg_in_queue; /**< CPU -> MVE messages */
+ struct mve_rsrc_dma_mem_t *msg_out_queue; /**< CPU <- MVE messages */
+ struct mve_rsrc_dma_mem_t *buf_input_in; /**< Input buffers to MVE */
+ struct mve_rsrc_dma_mem_t *buf_input_out; /**< Input buffers returned by MVE */
+ struct mve_rsrc_dma_mem_t *buf_output_in; /**< Output buffers to MVE */
+ struct mve_rsrc_dma_mem_t *buf_output_out; /**< Output buffers returned by MVE */
+ struct mve_rsrc_dma_mem_t *rpc_area; /**< Physical address of the RPC data */
+
+ struct mve_queue *mve_events; /**< Queue for regular MVE events */
+ struct mve_queue *response_events; /**< Queue for query responses */
+
+ struct semaphore semaphore; /**< Semaphore offering mutual exclusion */
+ struct kref refcount; /**< Session reference counter */
+ struct list_head list; /**< Sessions linked list */
+ bool keep_freq_high; /**< Control DVFS by avoiding low frequency at start of usecase */
+
+ struct mve_com_trace_buffers fw_trace; /**< Book keeping data for FW trace buffers */
+};
+
+/**
+ * Performs all initialization required before any sessions can be created. This
+ * includes creating a watchdog thread, master session semaphore and debug
+ * functionality initialization.
+ * @param dev Pointer to the device structure received from the kernel.
+ */
+void mve_session_init(struct device *dev);
+
+/**
+ * Frees resources allocated during mve_session_init. This function must
+ * be called when the driver is uninstalled.
+ * @param dev Pointer to the device structure received from the kernel.
+ */
+void mve_session_deinit(struct device *dev);
+
+/**
+ * Frees all created sessions. This function is called internally by
+ * mve_session_deinit.
+ */
+void mve_session_destroy_all_sessions(void);
+
+/**
+ * Create a new session.
+ * @param filep ID used to identify which client process this session belongs to.
+ * This will be the ID of the created session.
+ * @return mve_session struct used to represent this session.
+ */
+struct mve_session *mve_session_create(struct file *filep);
+
+/**
+ * Destroy session. All resources referenced by this session are freed.
+ * @param session Session to destroy.
+ */
+void mve_session_destroy(struct mve_session *session);
+
+/**
+ * This function is called to make sure that all sessions created by a process
+ * are destroyed when the process quits. The supplied parameter is used to
+ * identify the process that has just quit.
+ * @param filep struct file * of the file descriptor used to create sessions.
+ */
+void mve_session_cleanup_client(struct file *filep);
+
+/**
+ * Destroy all sessions currently allocated.
+ */
+void mve_session_destroy_all(void);
+
+/**
+ * Returns the session associated with the supplied struct file *.
+ * @param filep The struct file * associated with the file descriptor used to
+ * create the session that will be returned.
+ * @return A session or NULL if no such has been created using the supplied struct file *.
+ */
+struct mve_session *mve_session_get_by_file(struct file *filep);
+
+/**
+ * Set the OMX role of the session. The role is used to decide which firmware
+ * to load. The role must be set before the session can be activated. Note
+ * that the role is not validated until the session is activated.
+ * @param session The MVE session.
+ * @param role The OMX role.
+ */
+void mve_session_set_role(struct mve_session *session, char *role);
+
+/**
+ * Activate session. This function loads the firmware and sets up the
+ * MMU tables. Note that the role of the session must be set before
+ * the session can be activated.
+ * @param session Session to activate.
+ * @param version Firmware protocol version returned.
+ * @param fw_secure_desc Contains fw version and l2 page tables address for secure sessions.
+ * @return True on success, false on failure.
+ */
+bool mve_session_activate(struct mve_session *session, uint32_t *version,
+ struct mve_base_fw_secure_descriptor *fw_secure_desc);
+
+/**
+ * Enqueue commands to stop a running session and flush all input and output
+ * buffers.
+ *
+ * @param session Session to stop.
+ * @param flush Specifies what kind of flush to do. See enum mve_flush
+ * @return MVE error code.
+ */
+mve_base_error mve_session_enqueue_flush_buffers(struct mve_session *session,
+ uint32_t flush);
+
+/**
+ * Enqueues a state change command to the session.
+ * @param session The session to enqueue the state change command.
+ * @param state The state to change to.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_enqueue_state_change(struct mve_session *session,
+ enum mve_base_hw_state state);
+
+/**
+ * Get a queued MVE event. If no event is pending then the call is blocking
+ * until either an event becomes available, or the timeout has expired.
+ *
+ * If the timeout expires without an event being available the method will
+ * return NULL.
+ *
+ * If a non-NULL value is returned then it is the responsibility of the
+ * caller to free the memory associated with the returned pointer.
+ * @param session The session.
+ * @param timeout Timeout, in milliseconds.
+ * @return Pointer to a mve_event structure containing the event data, or NULL
+ * in the case of a timeout where no event is returned. */
+struct mve_base_event_header *mve_session_get_event(struct mve_session *session, uint32_t timeout);
+
+/**
+ * Set an OMX parameter or config. The attached data must contain the
+ * parameter/config index and the OMX parameter/config structure.
+ * @param session The session.
+ * @param size Size of the attached data.
+ * @param data The parameter/config data to set.
+ * @param firmware_error Firmware error code.
+ * @param entity Specifies whether this is a config or parameter operation.
+ * @return OMX error code.
+ */
+mve_base_error mve_session_set_paramconfig(struct mve_session *session,
+ uint32_t size,
+ void *data,
+ uint32_t *firmware_error,
+ enum MVE_ENTITY entity);
+
+/**
+ * Get an OMX parameter or config.
+ * @param session The session.
+ * @param in_data Pointer to the input structure.
+ * @param out_data Pointer that will receive the address to the parameter/config data.
+ * The client is responsible for freeing this buffer.
+ * @param out_size Size of the output data.
+ * @param firmware_error Firmware error code.
+ * @param entity Specifies whether this is a config or parameter operation.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_get_paramconfig(struct mve_session *session,
+ struct mve_base_command_header *command,
+ void *data,
+ struct mve_response *response,
+ enum MVE_ENTITY entity);
+
+/**
+ * Register a user allocated buffer with the session. This must be done before
+ * the buffer can be used by the MVE.
+ * @param session The session.
+ * @param port_index OMX port index (0 = input, 1 = output).
+ * @param descriptor Buffer descriptor.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_buffer_register(struct mve_session *session,
+ uint32_t port_index,
+ struct mve_base_buffer_userspace *descriptor);
+
+/**
+ * Unregister a user allocated buffer with the session. This must be done
+ * to free system resources.
+ * @param session The session.
+ * @param buffer_id ID of the buffer to unregister.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_buffer_unregister(struct mve_session *session,
+ mve_base_buffer_handle_t buffer_id);
+
+/**
+ * Enqueue an input or output buffer.
+ * @param session The session.
+ * @param param Description of the input/output buffer.
+ * @param empty_this_buffer Signals whether this buffer shall be emptied or filled.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_buffer_enqueue(struct mve_session *session,
+ struct mve_base_buffer_details *param,
+ bool empty_this_buffer);
+
+/**
+ * Send a request to the firmware that the userspace driver wants to be notified
+ * when the supplied buffer is no longer used as a reference frame.
+ * @param session The session.
+ * @param buffer_id ID of the buffer.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_buffer_notify_ref_frame_release(struct mve_session *session,
+ mve_base_buffer_handle_t buffer_id);
+
+/**
+ * Enqueue a command to the command queue. This function must not be
+ * exposed to userspace.
+ * @param session The session.
+ * @param code Command code.
+ * @param size Size of the attached data.
+ * @param data Pointer to the first byte of the data attached to the command.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_enqueue_message(struct mve_session *session,
+ uint32_t code,
+ uint32_t size,
+ void *data);
+
+/**
+ * This function is used to support fd poll and select.
+ * @param filp struct file * structure representing the fd.
+ * @param poll_table Linux Structure used by the Linux kernel to implement poll and select.
+ * @return Mask describing the operations that can be performed immediately without blocking.
+ */
+uint32_t mve_session_poll(struct file *filp, struct poll_table_struct *poll_table);
+
+#ifndef EMULATOR
+/**
+ * Handler for RPC Memory allocation. It can be called from mve_command in case
+ * of secure memory allocations.
+ * @param session The session.
+ * @param fd DMA Buf fd of allocated memory incase it is secure memory, -1 otherwise.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_handle_rpc_mem_alloc(struct mve_session *session, int fd);
+
+/**
+ * Handler for RPC Memory resize. It can be called from mve_command in case
+ * of secure memory allocations.
+ * @param session The session.
+ * @param fd DMA Buf fd for additional memory incase it is secure memory, -1 otherwise.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_handle_rpc_mem_resize(struct mve_session *session, int fd);
+
+#endif /* EMULATOR */
+
+#ifdef UNIT
+/**
+ * Simulate firmware hung by ignoring the pong response for unit test
+ * @param session The session.
+ * @param on 1 - Simulate hung on; 0 - Simulate off
+ */
+void mve_session_firmware_hung_simulation(struct mve_session *session, uint32_t on);
+
+void *mve_session_get_static_fptr(char *func_name);
+
+#endif /* UNIT */
+
+#endif /* MVE_SESSION_H */
diff --git a/drivers/video/arm/v5xx/base/mve_session_buffer.c b/drivers/video/arm/v5xx/base/mve_session_buffer.c
new file mode 100644
index 000000000000..98eece39ca0e
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_session_buffer.c
@@ -0,0 +1,992 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/list.h>
+#include <linux/slab.h>
+#endif
+
+#include "mve_session.h"
+#include "mve_buffer.h"
+#include "mve_mmu.h"
+#include "mve_com.h"
+
+#include "mve_rsrc_irq.h"
+
+#include <host_interface_v1/mve_protocol_kernel.h>
+
+#define AFBC_MB_WIDTH 16
+/* Map buffers on 16MB boundaries */
+#define EXTERNAL_OUTPUT_BUFFER_ALIGNMENT 16 * 1024 * 1024
+
+/**
+ * Maps a buffer into MVE address space.
+ * @param session The owner of the buffer.
+ * @param buffer The buffer to map.
+ * @param alignment VE alignment of the buffer mapping.
+ * @return True on success, false on failure.
+ */
+static bool map_external_buffer_mve(struct mve_session *session,
+ struct mve_buffer_external *buffer,
+ uint32_t alignment)
+{
+ return mve_mmu_map_pages_and_reserve(session->mmu_ctx,
+ buffer->mapping.pages,
+ &buffer->mapping.region,
+ buffer->mapping.num_pages,
+ buffer->mapping.num_pages,
+ alignment,
+ ATTRIB_SHARED_RW,
+ ACCESS_READ_WRITE,
+ false);
+}
+
+/**
+ * Remove the buffer pages from the MVE MMU table.
+ * @param session The owner of the buffer.
+ * @param buffer User allocated buffer.
+ */
+static void unmap_external_buffer_mve(struct mve_session *session, struct mve_buffer_external *buffer)
+{
+ mve_mmu_unmap_pages(session->mmu_ctx, buffer->mapping.region.start);
+ mver_scheduler_flush_tlb((mver_session_id)session);
+}
+
+/**
+ * Returns the buffer descriptor (if one exists) for a user allocated buffer
+ * mapping matching the supplied buffer ID.
+ * @param session The session.
+ * @param buffer_id ID of the user allocated buffer.
+ * @return The buffer descriptor if the supplied user allocated buffer has been
+ * mapped. NULL if no such descriptor exists.
+ */
+static struct mve_buffer_client *get_external_descriptor(struct mve_session *session,
+ mve_base_buffer_handle_t buffer_id)
+{
+ struct list_head *pos;
+
+ list_for_each(pos, &session->external_buffers)
+ {
+ struct mve_buffer_client *ptr = container_of(pos, struct mve_buffer_client, register_list);
+
+ if (buffer_id == ptr->buffer->info.buffer_id)
+ {
+ return ptr;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Destroy a struct mve_buffer_external. This function makes sure that the buffer
+ * is unmapped and that all memory is freed.
+ * @param session The owner of the buffer.
+ * @param buffer The buffer to destroy.
+ */
+static void destroy_buffer_external(struct mve_session *session,
+ struct mve_buffer_external *buffer)
+{
+ if (NULL != buffer)
+ {
+ if (MVE_BASE_BUFFER_ALLOCATOR_ATTACHMENT != buffer->info.allocator)
+ {
+ unmap_external_buffer_mve(session, buffer);
+ }
+
+ mve_buffer_unmap_physical_pages(buffer);
+ mve_buffer_destroy_buffer_external(buffer);
+ }
+}
+
+void mve_session_buffer_unmap_all(struct mve_session *session)
+{
+ struct list_head *pos, *next;
+
+ list_for_each_safe(pos, next, &session->external_buffers)
+ {
+ struct mve_buffer_client *ptr = container_of(pos, struct mve_buffer_client, register_list);
+
+ destroy_buffer_external(session, ptr->buffer);
+ destroy_buffer_external(session, ptr->crc);
+
+ /* Remove mapping from the bookkeeping structure */
+ list_del(&ptr->register_list);
+
+ MVE_RSRC_MEM_CACHE_FREE(ptr, sizeof(struct mve_buffer_client));
+ }
+}
+
+/**
+ * Creates a struct mve_buffer_external instance and setups its members.
+ * @param session The owner of the buffer.
+ * @param port_index The port this buffer will be mapped to.
+ * @param region_type Specifies where in the MVE address space this buffer will be mapped.
+ * @param info Buffer information.
+ * @return Returns a struct mve_buffer_external on success, NULL on failure.
+ */
+static struct mve_buffer_external *setup_buffer_external(struct mve_session *session,
+ uint32_t port_index,
+ enum mve_mem_virt_region_type region_type,
+ struct mve_buffer_info *info)
+{
+ struct mve_buffer_external *buffer;
+ bool res;
+
+ buffer = mve_buffer_create_buffer_external(info, port_index);
+ if (NULL == buffer)
+ {
+ return NULL;
+ }
+
+ mve_mem_virt_region_get(session->prot_version, region_type, &buffer->mapping.region);
+
+ res = mve_buffer_map_physical_pages(buffer);
+ if (false == res)
+ {
+ mve_buffer_destroy_buffer_external(buffer);
+ return NULL;
+ }
+ else
+ {
+ if (false != info->do_cache_maintenance)
+ {
+ /* Set the CPU as the owner. When the buffer is enqueued,
+ * owner will be changed to the hardware. */
+ mve_buffer_set_owner(buffer, MVE_BUFFER_OWNER_CPU, port_index);
+ }
+
+ if (MVE_BASE_BUFFER_ALLOCATOR_ATTACHMENT != info->allocator)
+ {
+ uint32_t alignment = 0;
+
+ if (VIRT_MEM_REGION_OUT_BUF == region_type)
+ {
+ alignment = EXTERNAL_OUTPUT_BUFFER_ALIGNMENT;
+ }
+
+ res = map_external_buffer_mve(session, buffer, alignment);
+ if (false == res)
+ {
+ mve_buffer_unmap_physical_pages(buffer);
+ mve_buffer_destroy_buffer_external(buffer);
+ return NULL;
+ }
+ }
+ }
+
+ return buffer;
+}
+
+mve_base_error mve_session_buffer_register_internal(struct mve_session *session,
+ uint32_t port_index,
+ struct mve_base_buffer_userspace *userspace_buffer)
+{
+ enum mve_mem_virt_region_type region_mapping[][2] =
+ {
+ /* Input port Output port */
+ { VIRT_MEM_REGION_PROTECTED, VIRT_MEM_REGION_OUT_BUF }, /* Decoder */
+ { VIRT_MEM_REGION_OUT_BUF, VIRT_MEM_REGION_PROTECTED } /* Encoder */
+ };
+
+ struct mve_buffer_client *buffer;
+ enum mve_mem_virt_region_type region_type;
+ bool res;
+ struct mve_buffer_info buffer_info, crc_info;
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+
+ buffer = get_external_descriptor(session, userspace_buffer->buffer_id);
+ if (NULL != buffer)
+ {
+ /* Buffer has already been registered */
+ return MVE_BASE_ERROR_NONE;
+ }
+
+ buffer_info.buffer_id = userspace_buffer->buffer_id;
+ buffer_info.handle = userspace_buffer->handle;
+ buffer_info.allocator = userspace_buffer->allocator;
+ buffer_info.size = userspace_buffer->size;
+ buffer_info.width = userspace_buffer->width;
+ buffer_info.height = userspace_buffer->height;
+ buffer_info.stride = userspace_buffer->stride;
+ buffer_info.stride_alignment = userspace_buffer->stride_alignment;
+ buffer_info.format = userspace_buffer->format;
+ buffer_info.offset = 0;
+ buffer_info.do_cache_maintenance = true;
+
+ buffer_info.afbc_width_in_superblocks = userspace_buffer->afbc_width_in_superblocks;
+ buffer_info.afbc_alloc_bytes = userspace_buffer->afbc_alloc_bytes;
+
+ crc_info.handle = userspace_buffer->crc_handle;
+ crc_info.allocator = userspace_buffer->crc_allocator;
+ crc_info.size = userspace_buffer->crc_size;
+ crc_info.format = MVE_BASE_OMX_BUFFER_FORMAT_CRC;
+ crc_info.offset = userspace_buffer->crc_offset;
+ crc_info.do_cache_maintenance = true;
+
+ if ((userspace_buffer->mve_flags & MVE_BASE_FLAGS_DMABUF_DISABLE_CACHE_MAINTENANCE) != 0 &&
+ MVE_BASE_BUFFER_ALLOCATOR_DMABUF == userspace_buffer->allocator)
+ {
+ buffer_info.do_cache_maintenance = false;
+ crc_info.do_cache_maintenance = false;
+ }
+
+ /* Sanity-check the buffer information */
+ res = mve_buffer_is_valid(&buffer_info) &&
+ (0 != crc_info.handle ? mve_buffer_is_valid(&crc_info) : true);
+ if (false == res)
+ {
+#ifdef _DEBUG
+ WARN_ON(true);
+#endif
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ /* Check that the buffer attachment is actually within the containing buffer */
+ if (0 != crc_info.handle && MVE_BASE_BUFFER_ALLOCATOR_ATTACHMENT == crc_info.allocator &&
+ buffer_info.size < crc_info.offset + crc_info.size)
+ {
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ buffer = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(struct mve_buffer_client), GFP_KERNEL);
+ if (NULL == buffer)
+ {
+ return MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ }
+
+ /* Decide in which MVE virtual region to map the external buffer */
+ region_type = region_mapping[session->session_type][port_index];
+
+ buffer->in_use = 0;
+ buffer->port_index = port_index;
+
+ buffer->buffer = setup_buffer_external(session, port_index, region_type, &buffer_info);
+ if (NULL == buffer->buffer)
+ {
+ ret = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ goto error;
+ }
+
+ if (0 != crc_info.handle)
+ {
+ buffer->crc = setup_buffer_external(session, port_index, region_type, &crc_info);
+ if (NULL == buffer->crc)
+ {
+ ret = MVE_BASE_ERROR_INSUFFICIENT_RESOURCES;
+ goto error;
+ }
+ }
+
+ /* Save the buffer for later */
+ INIT_LIST_HEAD(&buffer->register_list);
+ INIT_LIST_HEAD(&buffer->quick_flush_list);
+ list_add(&buffer->register_list, &session->external_buffers);
+
+ buffer->mve_handle = MVE_HANDLE_INVALID;
+
+ return MVE_BASE_ERROR_NONE;
+
+error:
+ if (NULL != buffer)
+ {
+ destroy_buffer_external(session, buffer->buffer);
+ MVE_RSRC_MEM_CACHE_FREE(buffer, sizeof(struct mve_buffer_client));
+ }
+
+ return ret;
+}
+
+mve_base_error mve_session_buffer_unregister_internal(struct mve_session *session,
+ mve_base_buffer_handle_t buffer_id)
+{
+ struct mve_buffer_client *buffer;
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+
+ buffer = get_external_descriptor(session, buffer_id);
+ if (NULL == buffer)
+ {
+ ret = MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+ else
+ {
+ if (0 != buffer->in_use)
+ {
+ /* The buffer is still in use! Do not unmap it */
+ WARN_ON(true);
+ ret = MVE_BASE_ERROR_NOT_READY;
+ }
+ else
+ {
+ /* Remove mapping from the bookkeeping structure */
+ list_del(&buffer->register_list);
+
+ destroy_buffer_external(session, buffer->buffer);
+ destroy_buffer_external(session, buffer->crc);
+
+ MVE_RSRC_MEM_CACHE_FREE(buffer, sizeof(struct mve_buffer_client));
+ }
+ }
+
+ return ret;
+}
+
+mve_base_error mve_session_buffer_enqueue_internal(struct mve_session *session,
+ struct mve_base_buffer_details *param,
+ bool empty_this_buffer)
+{
+ struct mve_buffer_client *buffer_client;
+ struct mve_buffer_info *buffer_info;
+ struct mve_buffer_external *external_buffer;
+ enum mve_com_buffer_type buffer_type = MVE_COM_BUFFER_TYPE_FRAME;
+
+ mve_base_error ret = MVE_BASE_ERROR_NONE;
+ mve_com_buffer buf;
+ bool res;
+ bool interlace = (param->mve_flags & MVE_FLAGS_INTERLACE) ? true : false;
+ int stride;
+ int stride_align;
+ int stride_shift = 0;
+ int slice_height;
+ uint32_t offset_in_page;
+
+ res = (false != empty_this_buffer) ? list_empty(&session->queued_input_buffers) : list_empty(&session->queued_output_buffers);
+ if (false == res)
+ {
+ /* There are already buffers queued waiting to be added to the firmware queue. */
+ return MVE_BASE_ERROR_TIMEOUT;
+ }
+
+ buffer_client = get_external_descriptor(session, param->buffer_id);
+ if (NULL == buffer_client)
+ {
+ /* The supplied user-space allocated buffer has not been registered
+ * or is invalid. */
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ external_buffer = buffer_client->buffer;
+ buffer_info = &external_buffer->info;
+
+ stride = buffer_info->stride;
+ stride_align = buffer_info->stride_alignment;
+ slice_height = buffer_info->height;
+
+ if (interlace)
+ {
+ stride_shift = 1;
+ slice_height >>= 1;
+ }
+
+ offset_in_page = external_buffer->mapping.offset_in_page;
+
+ /* Update the descriptor */
+ buffer_client->filled_len = param->filled_len;
+ buffer_client->flags = param->flags;
+ buffer_client->mve_flags = param->mve_flags;
+ buffer_client->timestamp = param->timestamp;
+
+ if (NULL != buffer_client->crc)
+ {
+ buffer_client->crc->info.offset = param->crc_offset;
+ }
+
+ /* Assign the buffer a new MVE handle. The handle is only valid until the
+ * buffer is dequeued. */
+ if (MVE_HANDLE_INVALID == buffer_client->mve_handle)
+ {
+ buffer_client->mve_handle = session->next_mve_handle++;
+ }
+
+ switch (buffer_info->format)
+ {
+ case MVE_BASE_OMX_BUFFER_FORMAT_BITSTREAM:
+ buf.bitstream.nHandle = buffer_client->mve_handle;
+ buf.bitstream.nAllocLen = buffer_info->size;
+ buf.bitstream.pBufferData = external_buffer->mapping.region.start + offset_in_page;
+ buf.bitstream.nFilledLen = param->filled_len;
+ buf.bitstream.nOffset = 0;
+ buf.bitstream.nFlags = param->flags;
+ buf.bitstream.timestamp = param->timestamp;
+ buffer_type = MVE_COM_BUFFER_TYPE_BITSTREAM;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_PLANAR:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YV12:
+ buf.frame.nHandle = buffer_client->mve_handle;
+ buf.frame.format = buffer_info->format;
+ buf.frame.nFlags = param->flags;
+ buf.frame.nMVEFlags = param->mve_flags;
+ buf.frame.timestamp = param->timestamp;
+ buf.frame.decoded_height = buffer_info->height;
+ buf.frame.decoded_width = buffer_info->width;
+
+ /* The chroma and luma stride should be specified from gralloc eventually */
+ buf.frame.data.planar.stride[0] = ROUND_UP(stride, stride_align) << stride_shift;
+ buf.frame.data.planar.stride[1] = ROUND_UP((stride + 1) >> 1, stride_align) << stride_shift;
+ buf.frame.data.planar.stride[2] = ROUND_UP((stride + 1) >> 1, stride_align) << stride_shift;
+ if (buf.frame.data.planar.stride[0] * buffer_info->height >=
+ ROUND_UP(buffer_info->height, stride_align) * buffer_info->width &&
+ buf.frame.data.planar.stride[1] * ((buffer_info->height + 1) >> 1) >=
+ ROUND_UP((buffer_info->height + 1) >> 1, stride_align) * ((buffer_info->width + 1) >> 1))
+ {
+ buf.frame.data.planar.stride_90[0] = ROUND_UP(buffer_info->height, stride_align) << stride_shift;
+ buf.frame.data.planar.stride_90[1] = ROUND_UP((buffer_info->height + 1) >> 1, stride_align) << stride_shift;
+ buf.frame.data.planar.stride_90[2] = ROUND_UP((buffer_info->height + 1) >> 1, stride_align) << stride_shift;
+ }
+ else
+ {
+ buf.frame.data.planar.stride_90[0] = buffer_info->height << stride_shift;
+ buf.frame.data.planar.stride_90[1] = ((buffer_info->height + 1) >> 1) << stride_shift;
+ buf.frame.data.planar.stride_90[2] = ((buffer_info->height + 1) >> 1) << stride_shift;
+ }
+ buf.frame.data.planar.plane_top[0] = external_buffer->mapping.region.start + offset_in_page;
+
+ if (MVE_BASE_OMX_BUFFER_FORMAT_YV12 == buffer_info->format)
+ {
+ buf.frame.data.planar.plane_top[2] = buf.frame.data.planar.plane_top[0] +
+ buf.frame.data.planar.stride[0] * slice_height;
+ buf.frame.data.planar.plane_top[1] = buf.frame.data.planar.plane_top[2] +
+ buf.frame.data.planar.stride[2] * (slice_height >> 1);
+ }
+ else /* if (MVE_BASE_OMX_BUFFER_FORMAT_YUV420_PLANAR == buffer_info->format) */
+ {
+ buf.frame.data.planar.plane_top[1] = buf.frame.data.planar.plane_top[0] +
+ buf.frame.data.planar.stride[0] * slice_height;
+ buf.frame.data.planar.plane_top[2] = buf.frame.data.planar.plane_top[1] +
+ buf.frame.data.planar.stride[1] * (slice_height >> 1);
+ }
+
+ if (interlace)
+ {
+ buf.frame.data.planar.plane_bot[0] =
+ buf.frame.data.planar.plane_top[0] + (buf.frame.data.planar.stride[0] >> 1);
+ buf.frame.data.planar.plane_bot[1] =
+ buf.frame.data.planar.plane_top[1] + (buf.frame.data.planar.stride[1] >> 1);
+ buf.frame.data.planar.plane_bot[2] =
+ buf.frame.data.planar.plane_top[2] + (buf.frame.data.planar.stride[2] >> 1);
+ }
+ else
+ {
+ buf.frame.data.planar.plane_bot[0] = 0;
+ buf.frame.data.planar.plane_bot[1] = 0;
+ buf.frame.data.planar.plane_bot[2] = 0;
+ }
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_SEMIPLANAR:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YVU420_SEMIPLANAR:
+ buf.frame.nHandle = buffer_client->mve_handle;
+ buf.frame.format = buffer_info->format;
+ buf.frame.nFlags = param->flags;
+ buf.frame.nMVEFlags = param->mve_flags;
+ buf.frame.timestamp = param->timestamp;
+ buf.frame.decoded_height = buffer_info->height;
+ buf.frame.decoded_width = buffer_info->width;
+
+ buf.frame.data.planar.stride[0] = ROUND_UP(stride, stride_align) << stride_shift;
+ buf.frame.data.planar.stride[1] = ROUND_UP(stride, stride_align) << stride_shift;
+ buf.frame.data.planar.stride[2] = 0;
+ if (buf.frame.data.planar.stride[0] * buffer_info->height >=
+ ROUND_UP(buffer_info->height, stride_align) * buffer_info->width)
+ {
+ buf.frame.data.planar.stride_90[0] = ROUND_UP(buffer_info->height, stride_align) << stride_shift;
+ buf.frame.data.planar.stride_90[1] = ROUND_UP(buffer_info->height, stride_align) << stride_shift;
+ }
+ else
+ {
+ buf.frame.data.planar.stride_90[0] = buffer_info->height << stride_shift;
+ buf.frame.data.planar.stride_90[1] = buffer_info->height << stride_shift;
+ }
+ buf.frame.data.planar.stride_90[2] = 0;
+ buf.frame.data.planar.plane_top[0] = external_buffer->mapping.region.start + offset_in_page;
+ buf.frame.data.planar.plane_top[1] = buf.frame.data.planar.plane_top[0] +
+ buf.frame.data.planar.stride[0] * slice_height;
+ buf.frame.data.planar.plane_top[2] = 0;
+ if (interlace)
+ {
+ buf.frame.data.planar.plane_bot[0] =
+ buf.frame.data.planar.plane_top[0] + (buf.frame.data.planar.stride[0] >> 1);
+ buf.frame.data.planar.plane_bot[1] =
+ buf.frame.data.planar.plane_top[1] + (buf.frame.data.planar.stride[1] >> 1);
+ buf.frame.data.planar.plane_bot[2] = 0;
+ }
+ else
+ {
+ buf.frame.data.planar.plane_bot[0] = 0;
+ buf.frame.data.planar.plane_bot[1] = 0;
+ buf.frame.data.planar.plane_bot[2] = 0;
+ }
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUYYVY_10B:
+ buf.frame.nHandle = buffer_client->mve_handle;
+ buf.frame.format = buffer_info->format;
+ buf.frame.nFlags = param->flags;
+ buf.frame.nMVEFlags = param->mve_flags;
+ buf.frame.timestamp = param->timestamp;
+ buf.frame.decoded_height = buffer_info->height;
+ buf.frame.decoded_width = buffer_info->width;
+
+ buf.frame.data.planar.stride[0] = ROUND_UP(stride * 4, stride_align) << stride_shift;
+ buf.frame.data.planar.stride[1] = 0;
+ buf.frame.data.planar.stride[2] = 0;
+ if (buf.frame.data.planar.stride[0] * buffer_info->height >=
+ ROUND_UP(buffer_info->height * 4, stride_align) * buffer_info->width)
+ {
+ buf.frame.data.planar.stride_90[0] = ROUND_UP(buffer_info->height * 4, stride_align);
+ }
+ else
+ {
+ buf.frame.data.planar.stride_90[0] = buffer_info->height * 4;
+ }
+ buf.frame.data.planar.stride_90[1] = 0;
+ buf.frame.data.planar.stride_90[2] = 0;
+ buf.frame.data.planar.plane_top[0] = external_buffer->mapping.region.start + offset_in_page;
+ buf.frame.data.planar.plane_top[1] = 0;
+ buf.frame.data.planar.plane_top[2] = 0;
+
+ if (interlace)
+ {
+ buf.frame.data.planar.plane_bot[0] = buf.frame.data.planar.plane_top[0] +
+ buffer_info->size / 2;
+ buf.frame.data.planar.plane_bot[1] = 0;
+ buf.frame.data.planar.plane_bot[2] = 0;
+ }
+ else
+ {
+ buf.frame.data.planar.plane_bot[0] = 0;
+ buf.frame.data.planar.plane_bot[1] = 0;
+ buf.frame.data.planar.plane_bot[2] = 0;
+ }
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC: /* Intentional fallthrough */
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC_10B:
+ buf.frame.nHandle = buffer_client->mve_handle;
+ buf.frame.format = buffer_info->format;
+ buf.frame.nFlags = param->flags;
+ buf.frame.nMVEFlags = param->mve_flags;
+ buf.frame.timestamp = param->timestamp;
+ buf.frame.decoded_height = buffer_info->height;
+ buf.frame.decoded_width = buffer_info->width;
+ buf.frame.data.afbc.plane_top = external_buffer->mapping.region.start + offset_in_page;
+
+ if (interlace)
+ {
+ buf.frame.data.afbc.plane_bot = buf.frame.data.afbc.plane_top +
+ (((buffer_info->size >> 1) + 31) & 0xffffffe0);
+ buf.frame.data.afbc.alloc_bytes_top = buf.frame.data.afbc.plane_bot - buf.frame.data.afbc.plane_top;
+ buf.frame.data.afbc.alloc_bytes_bot = buffer_info->size - buf.frame.data.afbc.alloc_bytes_top;
+ }
+ else
+ {
+ buf.frame.data.afbc.plane_bot = 0;
+ buf.frame.data.afbc.alloc_bytes_top = buffer_info->size;
+ buf.frame.data.afbc.alloc_bytes_bot = 0;
+ }
+ buf.frame.data.afbc.cropx = 0;
+ buf.frame.data.afbc.cropy = 0;
+ buf.frame.data.afbc.y_offset = 0;
+ buf.frame.data.afbc.afbc_alloc_bytes = buffer_info->afbc_alloc_bytes;
+ buf.frame.data.afbc.afbc_width_in_superblocks[0] = buffer_info->afbc_width_in_superblocks;
+ buf.frame.data.afbc.afbc_width_in_superblocks[1] = (false == interlace) ? 0 : buffer_info->afbc_width_in_superblocks;
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV422_1P:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YVU422_1P:
+ buf.frame.nHandle = buffer_client->mve_handle;
+ buf.frame.format = buffer_info->format;
+ buf.frame.nFlags = param->flags;
+ buf.frame.nMVEFlags = param->mve_flags;
+ buf.frame.timestamp = param->timestamp;
+ buf.frame.decoded_height = buffer_info->height;
+ buf.frame.decoded_width = buffer_info->width;
+
+ buf.frame.data.planar.stride[0] = (stride * 2) << stride_shift;
+ buf.frame.data.planar.stride[1] = 0;
+ buf.frame.data.planar.stride[2] = 0;
+ buf.frame.data.planar.stride_90[0] = 0;
+ buf.frame.data.planar.stride_90[1] = 0;
+ buf.frame.data.planar.stride_90[2] = 0;
+
+ buf.frame.data.planar.plane_top[0] = external_buffer->mapping.region.start + offset_in_page;
+ buf.frame.data.planar.plane_top[1] = 0;
+ buf.frame.data.planar.plane_top[2] = 0;
+ if (interlace)
+ {
+ buf.frame.data.planar.plane_bot[0] =
+ buf.frame.data.planar.plane_top[0] + (buf.frame.data.planar.stride[0] >> 1);
+ buf.frame.data.planar.plane_bot[1] =
+ buf.frame.data.planar.plane_top[1] + (buf.frame.data.planar.stride[1] >> 1);
+ buf.frame.data.planar.plane_bot[2] =
+ buf.frame.data.planar.plane_top[2] + (buf.frame.data.planar.stride[2] >> 1);
+ }
+ else
+ {
+ buf.frame.data.planar.plane_bot[0] = 0;
+ buf.frame.data.planar.plane_bot[1] = 0;
+ buf.frame.data.planar.plane_bot[2] = 0;
+ }
+ break;
+ case MVE_BASE_OMX_BUFFER_FORMAT_RGBA_8888:
+ case MVE_BASE_OMX_BUFFER_FORMAT_BGRA_8888:
+ case MVE_BASE_OMX_BUFFER_FORMAT_ARGB_8888:
+ case MVE_BASE_OMX_BUFFER_FORMAT_ABGR_8888:
+ buf.frame.nHandle = buffer_client->mve_handle;
+ buf.frame.format = buffer_info->format;
+ buf.frame.nFlags = param->flags;
+ buf.frame.nMVEFlags = param->mve_flags;
+ buf.frame.timestamp = param->timestamp;
+ buf.frame.decoded_height = buffer_info->height;
+ buf.frame.decoded_width = buffer_info->width;
+
+ buf.frame.data.planar.stride[0] = ROUND_UP(stride, stride_align) << stride_shift;
+ buf.frame.data.planar.stride[1] = 0;
+ buf.frame.data.planar.stride[2] = 0;
+ if (buf.frame.data.planar.stride[0] * buffer_info->height >=
+ ROUND_UP(buffer_info->height, stride_align) * buffer_info->width)
+ {
+ buf.frame.data.planar.stride_90[0] = ROUND_UP(buffer_info->height, stride_align);
+ }
+ else
+ {
+ buf.frame.data.planar.stride_90[0] = buffer_info->height;
+ }
+ buf.frame.data.planar.stride_90[1] = 0;
+ buf.frame.data.planar.stride_90[2] = 0;
+ buf.frame.data.planar.plane_top[0] = external_buffer->mapping.region.start + offset_in_page;
+ buf.frame.data.planar.plane_top[1] = 0;
+ buf.frame.data.planar.plane_top[2] = 0;
+ buf.frame.data.planar.plane_bot[0] = 0;
+ buf.frame.data.planar.plane_bot[1] = 0;
+ buf.frame.data.planar.plane_bot[2] = 0;
+ break;
+ default:
+ /* Unsupported format */
+ WARN_ON(true);
+ buffer_client->mve_handle = MVE_HANDLE_INVALID;
+ /* Give back the handle to the session */
+ session->next_mve_handle--;
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ /* Setup ROI if this feature is enabled */
+ if (0 != (param->mve_flags & MVE_FLAGS_ROI_PRESENT) &&
+ 0 != param->nRegions)
+ {
+ /* Send ROI rectangles to the FW */
+ mve_com_buffer roi_buf;
+ roi_buf.roi.nRegions = param->nRegions;
+ memcpy(roi_buf.roi.regions, param->regions, param->nRegions * sizeof(param->regions[0]));
+
+ ret = mve_com_add_input_buffer(session, &roi_buf, MVE_COM_BUFFER_TYPE_ROI);
+ WARN_ON(ret != MVE_BASE_ERROR_NONE);
+ }
+
+ /* Setup CRC buffer and correct MVE flags */
+ switch (buffer_info->format)
+ {
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_PLANAR: /* Intentional fallthrough */
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_SEMIPLANAR:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YVU420_SEMIPLANAR:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUYYVY_10B:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC_10B:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YUV422_1P:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YVU422_1P:
+ case MVE_BASE_OMX_BUFFER_FORMAT_YV12:
+ case MVE_BASE_OMX_BUFFER_FORMAT_RGBA_8888:
+ case MVE_BASE_OMX_BUFFER_FORMAT_BGRA_8888:
+ case MVE_BASE_OMX_BUFFER_FORMAT_ARGB_8888:
+ case MVE_BASE_OMX_BUFFER_FORMAT_ABGR_8888:
+ /* Adjust MVE flags to indicate if the buffer is empty or not. */
+ if (0 == param->filled_len)
+ {
+ buf.frame.nMVEFlags &= ~(MVE_FLAGS_TOP_PRESENT | MVE_FLAGS_BOT_PRESENT);
+ }
+ else
+ {
+ buf.frame.nMVEFlags |= MVE_FLAGS_TOP_PRESENT;
+ }
+
+ if (NULL != buffer_client->crc)
+ {
+ if (0 != (param->mve_flags & MVE_FLAGS_CRC_PRESENT))
+ {
+ if (MVE_BASE_BUFFER_ALLOCATOR_ATTACHMENT == buffer_client->crc->info.allocator)
+ {
+ buf.frame.crc_top = external_buffer->mapping.region.start + offset_in_page + param->crc_offset;
+ }
+ else
+ {
+ buf.frame.crc_top = buffer_client->crc->mapping.region.start +
+ buffer_client->crc->mapping.offset_in_page;
+ }
+ buf.frame.crc_bot = 0;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Transfer ownership of the buffer from the CPU to the hardware. This
+ * call handles cache management */
+ if (false != buffer_info->do_cache_maintenance)
+ {
+ mve_buffer_set_owner(buffer_client->buffer, MVE_BUFFER_OWNER_DEVICE, buffer_client->port_index);
+ if (NULL != buffer_client->crc)
+ {
+ mve_buffer_set_owner(buffer_client->crc, MVE_BUFFER_OWNER_DEVICE, buffer_client->port_index);
+ }
+ }
+
+ if (false == empty_this_buffer)
+ {
+ ret = mve_com_add_output_buffer(session, &buf, buffer_type);
+ }
+ else
+ {
+ ret = mve_com_add_input_buffer(session, &buf, buffer_type);
+ }
+
+ if (MVE_BASE_ERROR_NONE != ret)
+ {
+ /* Failed to enqueue the buffer. */
+ buffer_client->mve_handle = MVE_HANDLE_INVALID;
+ /* Give back the handle to the session */
+ session->next_mve_handle--;
+ WARN_ON(0 < buffer_client->in_use);
+ }
+ else
+ {
+ /* Success! The buffer is now owned by the MVE */
+ buffer_client->in_use += 1;
+ }
+
+ return ret;
+}
+
+static uint32_t mve_session_buffer_get_afbc_size(uint32_t mbw,
+ uint32_t mbh,
+ uint32_t mb_size,
+ uint32_t payload_alignment,
+ uint32_t interlaced)
+{
+ unsigned size;
+
+ if (interlaced)
+ {
+ mbh = (mbh + 1) >> 1;
+ }
+ size = mbw * (mbh + 0) * (16 + mb_size);
+ size = (size + payload_alignment - 1) & ~(payload_alignment - 1);
+
+ if (interlaced)
+ {
+ size <<= 1;
+ }
+ return size;
+}
+
+mve_base_error mve_session_buffer_dequeue_internal(struct mve_session *session,
+ mve_base_buffer_handle_t buffer_id,
+ mve_com_buffer *buffer)
+{
+ struct mve_buffer_client *buffer_client;
+ struct mve_buffer_info *buffer_info;
+
+ buffer_client = get_external_descriptor(session, buffer_id);
+ if (NULL == buffer_client)
+ {
+ /* The supplied user-space allocated buffer has not been registered
+ * or is invalid. */
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ buffer_info = &buffer_client->buffer->info;
+ /* Mark the buffer as no longer in use by the MVE */
+ buffer_client->in_use -= 1;
+ WARN_ON(0 > buffer_client->in_use);
+
+ if (MVE_SESSION_TYPE_DECODER == session->session_type &&
+ MVE_PORT_INDEX_OUTPUT == buffer_client->port_index)
+ {
+ if (MVE_FW_PROTOCOL_VERSION_1_0 == session->prot_version)
+ {
+ /* Buffers on hold count is only reported like this on V550 */
+ session->buffer_on_hold_count = (buffer->frame.nMVEFlags >> 24);
+ }
+ else
+ {
+ session->buffer_on_hold_count = session->pending_buffer_on_hold_count;
+ }
+ }
+
+ if (MVE_BASE_OMX_BUFFER_FORMAT_BITSTREAM == buffer_info->format)
+ {
+ buffer_client->filled_len = buffer->bitstream.nFilledLen;
+ buffer_client->offset = buffer->bitstream.nOffset;
+ buffer_client->flags = buffer->bitstream.nFlags;
+ buffer_client->timestamp = buffer->bitstream.timestamp;
+ }
+ else if (MVE_BASE_OMX_BUFFER_FORMAT_YUV420_PLANAR == buffer_info->format ||
+ MVE_BASE_OMX_BUFFER_FORMAT_YUV420_SEMIPLANAR == buffer_info->format ||
+ MVE_BASE_OMX_BUFFER_FORMAT_YV12 == buffer_info->format ||
+ MVE_BASE_OMX_BUFFER_FORMAT_YVU420_SEMIPLANAR == buffer_info->format)
+ {
+ if (buffer->frame.nMVEFlags & (MVE_FLAGS_TOP_PRESENT | MVE_FLAGS_BOT_PRESENT))
+ {
+ buffer_client->filled_len = buffer->frame.decoded_width * buffer->frame.decoded_height * 3 / 2;
+ }
+ buffer_client->flags = buffer->frame.nFlags;
+ buffer_client->mve_flags = buffer->frame.nMVEFlags;
+ buffer_client->pic_index = buffer->frame.pic_index;
+ buffer_client->timestamp = buffer->frame.timestamp;
+ buffer_client->decoded_width = buffer->frame.decoded_width;
+ buffer_client->decoded_height = buffer->frame.decoded_height;
+ }
+ else if (MVE_BASE_OMX_BUFFER_FORMAT_YUYYVY_10B == buffer_info->format ||
+ MVE_BASE_OMX_BUFFER_FORMAT_YUV422_1P == buffer_info->format ||
+ MVE_BASE_OMX_BUFFER_FORMAT_YVU422_1P == buffer_info->format)
+ {
+ if (buffer->frame.nMVEFlags & (MVE_FLAGS_TOP_PRESENT | MVE_FLAGS_BOT_PRESENT))
+ {
+ buffer_client->filled_len = buffer->frame.decoded_width * buffer->frame.decoded_height * 2;
+ }
+ buffer_client->flags = buffer->frame.nFlags;
+ buffer_client->mve_flags = buffer->frame.nMVEFlags;
+ buffer_client->pic_index = buffer->frame.pic_index;
+ buffer_client->timestamp = buffer->frame.timestamp;
+ buffer_client->decoded_width = buffer->frame.decoded_width;
+ buffer_client->decoded_height = buffer->frame.decoded_height;
+ }
+ else if (MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC == buffer_info->format ||
+ MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC_10B == buffer_info->format)
+ {
+ if (buffer->frame.nMVEFlags & (MVE_FLAGS_TOP_PRESENT | MVE_FLAGS_BOT_PRESENT))
+ {
+ if (0 == buffer_info->afbc_alloc_bytes)
+ {
+ uint32_t w = (buffer->frame.decoded_width + AFBC_MB_WIDTH - 1) / AFBC_MB_WIDTH;
+ uint32_t h = (buffer->frame.decoded_height + AFBC_MB_WIDTH - 1) / AFBC_MB_WIDTH;
+ uint32_t mbs;
+
+ /* Magic formula for worstcase content */
+ if (MVE_BASE_OMX_BUFFER_FORMAT_YUV420_AFBC == buffer_info->format)
+ {
+ mbs = AFBC_MB_WIDTH * AFBC_MB_WIDTH * 3 / 2;
+ }
+ else
+ {
+ mbs = AFBC_MB_WIDTH * AFBC_MB_WIDTH * 15 / 8;
+ }
+
+ buffer_client->filled_len = mve_session_buffer_get_afbc_size(w, h, mbs, 128, 0);
+ }
+ else
+ {
+ buffer_client->filled_len = buffer_info->afbc_alloc_bytes;
+ }
+ }
+ /* If the bottom field is present and valid then the filled_len needs
+ * to be extended.
+ */
+ if (buffer->frame.nMVEFlags & MVE_FLAGS_BOT_PRESENT)
+ {
+ unsigned long b = (unsigned long)buffer->frame.data.afbc.plane_bot;
+ unsigned long t = (unsigned long)buffer->frame.data.afbc.plane_top;
+ buffer_client->filled_len += (b - t);
+ }
+ buffer_client->flags = buffer->frame.nFlags;
+ buffer_client->mve_flags = buffer->frame.nMVEFlags;
+ buffer_client->cropx = buffer->frame.data.afbc.cropx;
+ buffer_client->cropy = buffer->frame.data.afbc.cropy;
+ buffer_client->y_offset = buffer->frame.data.afbc.y_offset;
+ buffer_client->pic_index = buffer->frame.pic_index;
+ buffer_client->timestamp = buffer->frame.timestamp;
+ buffer_client->decoded_width = buffer->frame.decoded_width;
+ buffer_client->decoded_height = buffer->frame.decoded_height;
+ }
+
+ if (false != buffer_info->do_cache_maintenance)
+ {
+ /* Transfer ownership of the buffer from the hardware to the CPU. This
+ * call handles cache management */
+ mve_buffer_set_owner(buffer_client->buffer, MVE_BUFFER_OWNER_CPU, buffer_client->port_index);
+ if (NULL != buffer_client->crc)
+ {
+ mve_buffer_set_owner(buffer_client->crc, MVE_BUFFER_OWNER_CPU, buffer_client->port_index);
+ }
+ }
+
+ return MVE_BASE_ERROR_NONE;
+}
+
+mve_base_error mve_session_buffer_notify_ref_frame_release_internal(struct mve_session *session,
+ mve_base_buffer_handle_t buffer_id)
+{
+ struct mve_buffer_client *buffer_client;
+ struct mve_com_notify_release_ref_frame data;
+
+ buffer_client = get_external_descriptor(session, buffer_id);
+ if (NULL == buffer_client)
+ {
+ /* The supplied buffer handle has not been registered
+ * or is invalid. */
+ return MVE_BASE_ERROR_BAD_PARAMETER;
+ }
+
+ data.mve_buffer_addr = buffer_client->buffer->mapping.region.start +
+ buffer_client->buffer->mapping.offset_in_page;
+
+ return mve_com_add_message(session, MVE_MESSAGE_CODE_RELEASE_REF_FRAME, sizeof(data), (uint32_t *)&data);
+}
+
+void mve_session_buffer_convert_to_userspace(struct mve_base_buffer_userspace *dst,
+ struct mve_buffer_client *src)
+{
+ memset(dst, 0, sizeof(struct mve_base_buffer_userspace));
+
+ dst->handle = src->buffer->info.handle;
+ dst->buffer_id = src->buffer->info.buffer_id;
+ dst->allocator = src->buffer->info.allocator;
+ dst->size = src->buffer->info.size;
+ dst->width = src->buffer->info.width;
+ dst->height = src->buffer->info.height;
+ dst->stride = src->buffer->info.stride;
+ dst->stride_alignment = src->buffer->info.stride_alignment;
+ dst->format = src->buffer->info.format;
+
+ dst->filled_len = src->filled_len;
+ dst->offset = src->offset;
+ dst->flags = src->flags;
+ dst->mve_flags = src->mve_flags;
+ dst->cropx = src->cropx;
+ dst->cropy = src->cropy;
+ dst->y_offset = src->y_offset;
+ dst->pic_index = src->pic_index;
+ dst->timestamp = src->timestamp;
+ dst->decoded_width = src->decoded_width;
+ dst->decoded_height = src->decoded_height;
+
+ dst->afbc_width_in_superblocks = src->buffer->info.afbc_width_in_superblocks;
+ dst->afbc_alloc_bytes = src->buffer->info.afbc_alloc_bytes;
+
+ if (NULL != src->crc)
+ {
+ dst->crc_handle = src->crc->info.handle;
+ dst->crc_allocator = src->crc->info.allocator;
+ dst->crc_size = src->crc->info.size;
+ dst->crc_offset = src->crc->info.offset;
+ }
+}
diff --git a/drivers/video/arm/v5xx/base/mve_session_buffer.h b/drivers/video/arm/v5xx/base/mve_session_buffer.h
new file mode 100644
index 000000000000..95eadbaedfc4
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/mve_session_buffer.h
@@ -0,0 +1,95 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_SESSION_BUFFER_H
+#define MVE_SESSION_BUFFER_H
+
+#include "mve_base.h"
+#include "mve_com.h"
+
+/**
+ * Unregister all user allocated buffers with the driver.
+ * @param session The session.
+ */
+void mve_session_buffer_unmap_all(struct mve_session *session);
+
+/**
+ * Internal function called by mve_session_buffer_register. It registers
+ * a user allocated buffer with the session. This must be done before
+ * the buffer can be used by the MVE.
+ * @param session The session.
+ * @param port_index OMX port index (0 = input, 1 = output).
+ * @param userspace_buffer Buffer descriptor.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_buffer_register_internal(struct mve_session *session,
+ uint32_t port_index,
+ struct mve_base_buffer_userspace *userspace_buffer);
+
+/**
+ * Internal function called by mve_session_buffer_unregister. It unregisters
+ * a user allocated buffer with the session. This must be done to free
+ * the resources that were allocated when the buffer was registered.
+ * @param session The session.
+ * @param buffer_id ID of the buffer.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_buffer_unregister_internal(struct mve_session *session,
+ mve_base_buffer_handle_t buffer_id);
+
+/**
+ * Internal function called by mve_session_buffer_enqueue. It enqueues an input
+ * or output buffer.
+ * @param session The session.
+ * @param param Description of the input/output buffer.
+ * @param empty_this_buffer Signals whether this buffer shall be emptied or filled.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_buffer_enqueue_internal(struct mve_session *session,
+ struct mve_base_buffer_details *param,
+ bool empty_this_buffer);
+
+/**
+ * Internal function called by the interrupt handler when a buffer has been
+ * returned by the hardware. This function updates the internal representation
+ * of the buffer with the data returned by the hardware.
+ * @param session The session.
+ * @param buffer_id ID of the returned buffer.
+ * @param buffer The buffer data returned by the hardware.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_buffer_dequeue_internal(struct mve_session *session,
+ mve_base_buffer_handle_t buffer_id,
+ mve_com_buffer *buffer);
+
+/**
+ * Internal function called by mve_session_buffer_notify_ref_frame_release. It sends
+ * a request to the firmware that the userspace driver wants to be notified
+ * when the supplied buffer is no longer used as a reference frame.
+ * @param session The session.
+ * @param buffer_id ID of the buffer.
+ * @return MVE error code.
+ */
+mve_base_error mve_session_buffer_notify_ref_frame_release_internal(struct mve_session *session,
+ mve_base_buffer_handle_t buffer_id);
+
+/**
+ * Fill in the data for a MVE_EVENT_INPUT / MVE_EVENT_INPUT event.
+ * @param dst Destination of the data (event data container).
+ * @param src The buffer client where the data can be found.
+ */
+void mve_session_buffer_convert_to_userspace(struct mve_base_buffer_userspace *dst,
+ struct mve_buffer_client *src);
+
+#endif /* MVE_SESSION_BUFFER_H */
diff --git a/drivers/video/arm/v5xx/base/sconscript b/drivers/video/arm/v5xx/base/sconscript
new file mode 100644
index 000000000000..bed7e8a897f3
--- /dev/null
+++ b/drivers/video/arm/v5xx/base/sconscript
@@ -0,0 +1,46 @@
+#
+#
+# (C) COPYRIGHT ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+import os
+
+Import('driver_env')
+myEnv = driver_env.Clone()
+
+if(myEnv['watchdog'] == '0'):
+ myEnv.Append(CPPDEFINES=['DISABLE_WATCHDOG=1'])
+
+if((myEnv['arch'] == 'x86_32') or (myEnv['arch'] == 'x86_64')):
+ myEnv.Append(CPPPATH=[myEnv['libs_install'],
+ '#/kernel/drivers/video/arm/v5xx/base',
+ '#/kernel/drivers/video/arm/v5xx/resource',
+ '#/kernel/drivers/video/arm/v5xx/external',
+ '#/fw/include',
+ '#/util/src'])
+
+ if(myEnv['unit'] == '0'):
+ myEnv.Append(CPPDEFINES=['SCHEDULER_MODE_IDLE_SWITCHOUT=1'])
+ else:
+ myEnv.Append(CPPDEFINES=['SCHEDULER_MODE_IDLE_SWITCHOUT=0'])
+
+ sources = ['mve_command.c', 'mve_session.c', 'mve_session_buffer.c', 'mve_mmu.c', 'mve_mem_region.c', 'mve_fw.c',
+ 'mve_queue.c', 'mve_buffer_common.c', 'mve_com.c', 'mve_com_host_interface_v1.c', 'mve_com_host_interface_v2.c',
+ 'mve_buffer_valloc.c', 'mve_buffer_attachment.c',
+ os.path.join(myEnv['BUILD_DIR_PATH'], 'emul/emulator_wrapper.o'),
+ os.path.join(myEnv['BUILD_DIR_PATH'], 'emul/emulator_userspace.o'),
+ os.path.join(myEnv['BUILD_DIR_PATH'], 'emul/emulator_mem.o'),
+ os.path.join(myEnv['BUILD_DIR_PATH'], 'emul/emulator_utils.o')]
+
+ sMVEDriverLib = myEnv.StaticLibrary(os.path.join("$STATIC_LIB_PATH", "mve_base"), sources)
+ myEnv.LibTarget("mve_base", sMVEDriverLib)
diff --git a/drivers/video/arm/v5xx/external/host_interface_v1/mve_coresched_reg.h b/drivers/video/arm/v5xx/external/host_interface_v1/mve_coresched_reg.h
new file mode 100644
index 000000000000..e811f7ea53b7
--- /dev/null
+++ b/drivers/video/arm/v5xx/external/host_interface_v1/mve_coresched_reg.h
@@ -0,0 +1,170 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __FW_INCLUDE__MVE_CORESCHED_REG_H__
+#define __FW_INCLUDE__MVE_CORESCHED_REG_H__
+
+/* */
+#define SOC_CORE_INTERVAL 0x200000u
+
+#define SOC_GET_COREID(base, addr) ((((addr)-(base)) >> 21) & 0xf)
+
+#define SOC_CORESCHED_OFFSET 0x000000u
+
+#define SESSIONRAM_SIZE_PER_LSID (4096)
+
+typedef struct _CS
+{
+ REG32 VERSION; /* (0x0000) Version register */
+ REG32 ENABLE; /* (0x0004) 1:enable scheduling. Poll for 0 after writing 0. */
+ REG32 NCORES; /* (0x0008) Number of cores (read-only) */
+ REG32 NLSID; /* (0x000c) Number of lsid (read-only) */
+ REG32 CORELSID; /* (0x0010) 4-bit entry per core specifying the current lsid or 15 if none (read-only) */
+ REG32 JOBQUEUE; /* (0x0014) Four 8-bit entries specifying jobs to be scheduled. */
+ /* Entries should be packed at the lower end of JOBQUEUE and consist of
+ a pair (LSID, NCORES-1) specifying which session and how many cores to schedule
+ the job to. Unused entries should hold the value JOBQUEUE_JOB_INVALID. */
+ #define CORESCHED_JOBQUEUE_JOB0 (0)
+ #define CORESCHED_JOBQUEUE_JOB1 (8)
+ #define CORESCHED_JOBQUEUE_JOB2 (16)
+ #define CORESCHED_JOBQUEUE_JOB3 (24)
+ #define CORESCHED_JOBQUEUE_JOB0_SZ (8)
+ #define CORESCHED_JOBQUEUE_JOB1_SZ (8)
+ #define CORESCHED_JOBQUEUE_JOB2_SZ (8)
+ #define CORESCHED_JOBQUEUE_JOB3_SZ (8)
+ #define JOBQUEUE_JOB_LSID (0) /* Which LSID to run */
+ #define JOBQUEUE_JOB_NCORES (4) /* No. of cores-1 needed to run this job */
+ #define JOBQUEUE_JOB_LSID_SZ (4)
+ #define JOBQUEUE_JOB_NCORES_SZ (4)
+ #define JOBQUEUE_JOB_INVALID (0x0f) /* code indicating an unused job queue entry */
+ REG32 IRQVE; /* (0x0018) one bit per LSID, irq from VE to host (read-only bitvector version of LSID[].IRQVE) */
+ REG32 CLKPAUSE; /* (0x001c) one bit per core - set to pause its clock */
+ REG32 CLKIDLE; /* (0x0020) one bit per core - '1' indicates that pause has taken effect (read-only) */
+ REG32 CLKFORCE; /* (0x0024) one bit per core - '1' forces clock to be enabled */
+ #define CORESCHED_CLKFORCE_CORE (0) /* set to force clock to run for core */
+ #define CORESCHED_CLKFORCE_CS (8) /* set to force clock to run for core scheduler (makes TIMER run) */
+ #define CORESCHED_CLKFORCE_POWER (9) /* set to force core power to be enabled */
+ #define CORESCHED_CLKFORCE_CORE_SZ (8) /* actually only NCORES bits */
+ #define CORESCHED_CLKFORCE_CS_SZ (1) /* single-bit field */
+ #define CORESCHED_CLKFORCE_POWER_SZ (1) /* single-bit field */
+ REG32 TIMER; /* (0x0028) 32 high bits of 36 bit timer (use CLKFORCE_CS to make it run) */
+ REG32 COREDEBUG; /* (0x002c) Base address mapped over SESSIONRAM */
+ REG32 SVNREV; /* (0x0030) SVN revision no (read-only) */
+ REG32 FUSE; /* (0x0034) Fuse bits (read-only) */
+ #define CORESCHED_FUSE_DISABLE_AFBC (0)
+ #define CORESCHED_FUSE_DISABLE_REAL (1)
+ #define CORESCHED_FUSE_DISABLE_VP8 (2)
+ #define CORESCHED_FUSE_DISABLE_AFBC_SZ (1)
+ #define CORESCHED_FUSE_DISABLE_REAL_SZ (1)
+ #define CORESCHED_FUSE_DISABLE_VP8_SZ (1)
+ REG32 CONFIG; /* (0x0038) Configuration (read-only) */
+ #define CORESCHED_CONFIG_NCORES (0)
+ #define CORESCHED_CONFIG_DATA_WIDTH (8) /* note: HSIZE notation */
+ #define CORESCHED_CONFIG_IDBITS (12)
+ #define CORESCHED_CONFIG_ENCODE (16)
+ #define CORESCHED_CONFIG_AFBC (17)
+ #define CORESCHED_CONFIG_REAL (18)
+ #define CORESCHED_CONFIG_VP8 (19)
+ #define CORESCHED_CONFIG_NCORES_SZ (8)
+ #define CORESCHED_CONFIG_DATA_WIDTH_SZ (4)
+ #define CORESCHED_CONFIG_IDBITS_SZ (4)
+ #define CORESCHED_CONFIG_ENCODE_SZ (1)
+ #define CORESCHED_CONFIG_AFBC_SZ (1)
+ #define CORESCHED_CONFIG_REAL_SZ (1)
+ #define CORESCHED_CONFIG_VP8_SZ (1)
+ REG32 TOPRAM_KB; /* (0x003c) TOPRAM size in kilobytes (read-only) */
+ REG32 PROTCTRL; /* (0x0040) Protection policy */
+ #define CORESCHED_PROTCTRL_CPS (0)
+ #define CORESCHED_PROTCTRL_L1R (1)
+ #define CORESCHED_PROTCTRL_L2R (3)
+ #define CORESCHED_PROTCTRL_PTMASK (8)
+ #define CORESCHED_PROTCTRL_NSAIDDEFAULT (12)
+ #define CORESCHED_PROTCTRL_NSAIDPUBLIC (16)
+ #define CORESCHED_PROTCTRL_NSAIDPROTECTED (20)
+ #define CORESCHED_PROTCTRL_NSAIDOUTBUF (24)
+ #define CORESCHED_PROTCTRL_NSAIDPRIVATE (28)
+ #define CORESCHED_PROTCTRL_CPS_SZ (1)
+ #define CORESCHED_PROTCTRL_L1R_SZ (2)
+ #define CORESCHED_PROTCTRL_L2R_SZ (2)
+ #define CORESCHED_PROTCTRL_PTMASK_SZ (4)
+ #define CORESCHED_PROTCTRL_NSAIDDEFAULT_SZ (4)
+ #define CORESCHED_PROTCTRL_NSAIDPUBLIC_SZ (4)
+ #define CORESCHED_PROTCTRL_NSAIDPROTECTED_SZ (4)
+ #define CORESCHED_PROTCTRL_NSAIDOUTBUF_SZ (4)
+ #define CORESCHED_PROTCTRL_NSAIDPRIVATE_SZ (4)
+ REG32 BUSCTRL; /* (0x0044) Bus control */
+ #define CORESCHED_BUSCTRL_SPLIT (0) /* Configuration for burst splitting */
+ #define CORESCHED_BUSCTRL_SPLIT_SZ (2)
+ #define CORESCHED_BUSCTRL_SPLIT_128 (0) /* Split bursts at 128-byte boundaries */
+ #define CORESCHED_BUSCTRL_SPLIT_256 (1) /* Split bursts at 256-byte boundaries */
+ #define CORESCHED_BUSCTRL_SPLIT_512 (2) /* Split bursts at 512-byte boundaries */
+ #define CORESCHED_BUSCTRL_SPLIT_1024 (3) /* Split bursts at 1024-byte boundaries */
+
+ REG32 _pad0[2];
+ REG32 RESET; /* (0x0050) Write '1' to reset the VE. Writing other values results in undefined behaviour */
+ REG32 _pad1[8*16-21];
+
+ struct _LSID_ENTRY /* (0x0200 + 0x0040*n) */
+ {
+ REG32 CTRL; /* (+0x00) [NCORES-1:0]: disallow-coremask, [11:8]: maxcores */
+ #define CORESCHED_LSID_CTRL_DISALLOW (0) /* NCORES */
+ #define CORESCHED_LSID_CTRL_MAXCORES (8) /* max no. of cores that may run this LSID. 0=infinite */
+ #define CORESCHED_LSID_CTRL_DISALLOW_SZ (8) /* actually only NCORES bits */
+ #define CORESCHED_LSID_CTRL_MAXCORES_SZ (4)
+
+ REG32 MMU_CTRL; /* (+0x04) Startup pagetable base */
+ #define CORESCHED_LSID_MMU_CTRL_TATTR (30) /* Index 0-3, bus attributes when reading L1 table */
+ #define CORESCHED_LSID_MMU_CTRL_TBASE (2) /* Upper 28 bits of 40-bit L1 page table address */
+ #define CORESCHED_LSID_MMU_CTRL_ENABLE (1) /* Should be set to '1' */
+ #define CORESCHED_LSID_MMU_CTRL_PROTECT (0) /* Should be set to '1' */
+ #define CORESCHED_LSID_MMU_CTRL_TATTR_SZ (2)
+ #define CORESCHED_LSID_MMU_CTRL_TBASE_SZ (28)
+ #define CORESCHED_LSID_MMU_CTRL_ENABLE_SZ (1)
+ #define CORESCHED_LSID_MMU_CTRL_PROTECT_SZ (1)
+
+ REG32 NPROT; /* (+0x08) 0=protected session, 1=non-protected sessions */
+ REG32 ALLOC; /* (+0x0c) Write 1 or 2 to attempt allocation, write 0 to deallocate.
+ Write 1 allocates a non-protected session, write 2 allocates a protected session.
+ If ALLOC=0 and 1 or 2 is written then the session is allocated
+ This sets ALLOC=1, NPROT=PPROT[1], STREAMID=PADDR[19:16] and SCHED=0.
+ If ALLOC=1 or 2 and 0 is written then the session is deallocated
+ This sets ALLOC=0, NPROT=1, STREAMID=0 and SCHED=0. */
+ REG32 FLUSH_ALL; /* (+0x10) initiate mmu flush all for all cores */
+ REG32 SCHED; /* (+0x14) 1 if LSID is available for scheduling */
+ REG32 TERMINATE; /* (+0x18) Write 1 to reset all cores, poll 0 for completion */
+ REG32 IRQVE; /* (+0x1c) Signal IRQ to host (from VE) */
+ REG32 IRQHOST; /* (+0x20) Signal IRQ to VE (from host) */
+ REG32 INTSIG; /* (+0x24) Internal IRQ signals between cores */
+ REG32 _pad[1]; /* (+0x28) reserved */
+ REG32 STREAMID; /* (+0x2c) STREAMID value for protected pages */
+ REG32 BUSATTR[4]; /* (+0x30) Bus attribute registers, holding four different configurations
+ corresponding to the four ATTR values in the page tables */
+ #define CORESCHED_BUSATTR_ARCACHE (0) /* Value used for external AXI ARCACHE, default 0011 */
+ #define CORESCHED_BUSATTR_AWCACHE (4) /* Value used for external AXI AWCACHE, default 0011 */
+ #define CORESCHED_BUSATTR_ARDOMAIN (8) /* Value used for external AXI ARDOMAIN, default 00 */
+ #define CORESCHED_BUSATTR_AWDOMAIN (10) /* Value used for external AXI AWDOMAIN, default 00 */
+ #define CORESCHED_BUSATTR_ARCACHE_SZ (4)
+ #define CORESCHED_BUSATTR_AWCACHE_SZ (4)
+ #define CORESCHED_BUSATTR_ARDOMAIN_SZ (2)
+ #define CORESCHED_BUSATTR_AWDOMAIN_SZ (2)
+
+ } LSID[8];
+
+ REG32 _pad3[8192-256]; /* (0x0400) pad to 32 kB */
+
+ REG32 SESSIONRAM[4*SESSIONRAM_SIZE_PER_LSID/4]; /* (0x8000) NLSID * 4kB of session RAM */
+
+} tCS;
+
+#endif /* __FW_INCLUDE__MVE_CORESCHED_REG_H__ */
diff --git a/drivers/video/arm/v5xx/external/host_interface_v1/mve_protocol_kernel.h b/drivers/video/arm/v5xx/external/host_interface_v1/mve_protocol_kernel.h
new file mode 100644
index 000000000000..fd4494844f0a
--- /dev/null
+++ b/drivers/video/arm/v5xx/external/host_interface_v1/mve_protocol_kernel.h
@@ -0,0 +1,478 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __FW_INCLUDE__MVE_PROTOCOL_KERNEL_H__
+#define __FW_INCLUDE__MVE_PROTOCOL_KERNEL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/*
+ * Virtual memory regions
+ *
+ * ..._ADDR_BEGIN gives the starting virtual address of the region,
+ * and ..._ADDR_END the (non-inclusive) ending address, such that
+ * the size of the region is obtained with the subtraction
+ * (..._ADDR_END - ..._ADDR_BEGIN).
+ */
+
+/* Memory region for first firmware instance */
+#define MVE_MEM_REGION_FW_INSTANCE0_ADDR_BEGIN (0x00000000u)
+#define MVE_MEM_REGION_FW_INSTANCE0_ADDR_END (0x000FFFFFu + 1)
+
+/*
+ * Areas for communication between host and MVE are placed in the interval
+ * 0x10079000 - 0x1007FFFF, see special defines further down.
+ */
+
+/* PROTECTED virtual memory region */
+#define MVE_MEM_REGION_PROTECTED_ADDR_BEGIN (0x20000000u)
+#define MVE_MEM_REGION_PROTECTED_ADDR_END (0x4FFFFFFFu + 1)
+
+/* FRAMEBUF virtual memory region */
+#define MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN (0x50000000u)
+#define MVE_MEM_REGION_FRAMEBUF_ADDR_END (0x7FFFFFFFu + 1)
+
+/* Memory regions for other firmware instances */
+#define MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN (0x80000000u)
+#define MVE_MEM_REGION_FW_INSTANCE1_ADDR_END (0x8FFFFFFFu + 1)
+#define MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN (0x90000000u)
+#define MVE_MEM_REGION_FW_INSTANCE2_ADDR_END (0x9FFFFFFFu + 1)
+#define MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN (0xA0000000u)
+#define MVE_MEM_REGION_FW_INSTANCE3_ADDR_END (0xAFFFFFFFu + 1)
+#define MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN (0xB0000000u)
+#define MVE_MEM_REGION_FW_INSTANCE4_ADDR_END (0xBFFFFFFFu + 1)
+#define MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN (0xC0000000u)
+#define MVE_MEM_REGION_FW_INSTANCE5_ADDR_END (0xCFFFFFFFu + 1)
+#define MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN (0xD0000000u)
+#define MVE_MEM_REGION_FW_INSTANCE6_ADDR_END (0xDFFFFFFFu + 1)
+#define MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN (0xE0000000u)
+#define MVE_MEM_REGION_FW_INSTANCE7_ADDR_END (0xEFFFFFFFu + 1)
+/* 0xF0000000 - 0xFFFFFFFF is used internally in MVE */
+
+/* Communication queues between HOST/DRIVER and MVE */
+/*
+ * Address for queue for messages in to MVE,
+ * one struct mve_comm_area_host located here
+ */
+#define MVE_MSG_INQ 0x10079000u
+
+/*
+ * Address for queue for messages out from MVE,
+ * one struct mve_comm_area_mve located here
+ */
+#define MVE_MSG_OUTQ 0x1007A000u
+
+/*
+ * Address for queue for input buffers in to MVE,
+ * one struct mve_comm_area_host located here
+ */
+#define MVE_BUF_INQ 0x1007B000u
+
+/*
+ * Address for queue for input buffers returned from MVE,
+ * one struct mve_comm_area_mve located here
+ */
+#define MVE_BUF_INRQ 0x1007C000u
+
+/*
+ * Address for queue for output buffers in to MVE,
+ * one struct mve_comm_area_host located here
+ */
+#define MVE_BUF_OUTQ 0x1007D000u
+
+/*
+ * Address for queue for output buffers returned from MVE,
+ * one struct mve_comm_area_mve located here
+ */
+#define MVE_BUF_OUTRQ 0x1007E000u
+
+/* One struct mve_rpc_comunication_area located here */
+#define MVE_RPC_BUF 0x1007F000u
+
+/*
+ * One page of memory (4 kB) is used for each queue,
+ * so maximum 1024 words, but need room for some counters as well,
+ * see structs mve_comm_area_mve and mve_comm_area_host below.
+ */
+#define MVE_COMM_QUEUE_SIZE_IN_WORDS 1020
+
+/* This is the part of the message area that is written by host. */
+struct mve_comm_area_host {
+ volatile uint16_t out_rpos;
+ volatile uint16_t in_wpos;
+ volatile uint32_t reserved[3];
+ /*
+ * Queue of messages to MVE, each block of data prefixed with
+ * a mve_msg_header
+ */
+ volatile uint32_t in_data[MVE_COMM_QUEUE_SIZE_IN_WORDS];
+};
+
+/* This is the part of the message area that is written by MVE. */
+struct mve_comm_area_mve {
+ volatile uint16_t out_wpos;
+ volatile uint16_t in_rpos;
+ volatile uint32_t reserved[3];
+ /*
+ * Queue of messages to host, each block of data prefixed with
+ * a mve_msg_header
+ */
+ volatile uint32_t out_data[MVE_COMM_QUEUE_SIZE_IN_WORDS];
+};
+
+/* Data sent between host and mve is prefixed with a header */
+struct mve_msg_header {
+ uint16_t code;
+ uint16_t size;
+};
+
+struct mve_job_command {
+ uint16_t cores;
+ uint16_t frames;
+ uint32_t flags;
+};
+
+/* Message codes for messages from host to MVE */
+enum MVE_MESSAGE_CODE {
+ MVE_MESSAGE_CODE_GO = 1,
+ MVE_MESSAGE_CODE_STOP = 2,
+ MVE_MESSAGE_CODE_INPUT_FLUSH = 4,
+ MVE_MESSAGE_CODE_OUTPUT_FLUSH = 5,
+ MVE_MESSAGE_CODE_SET_PARAMETER = 6,
+ MVE_MESSAGE_CODE_GET_PARAMETER = 7,
+ MVE_MESSAGE_CODE_SWITCH = 8,
+ MVE_MESSAGE_CODE_PING = 9,
+ MVE_MESSAGE_CODE_RESET = 10,
+ MVE_MESSAGE_CODE_SET_CONFIG = 11,
+ MVE_MESSAGE_CODE_GET_CONFIG = 12,
+ MVE_MESSAGE_CODE_DUMP = 13,
+ MVE_MESSAGE_CODE_JOB = 14
+};
+
+/* Response codes for responses from MVE to host */
+enum MVE_RESPONSE_CODE {
+ MVE_RESPONSE_CODE_INPUT = 1,
+ MVE_RESPONSE_CODE_OUTPUT = 2,
+ MVE_RESPONSE_CODE_PROCESSED = 3,
+ MVE_RESPONSE_CODE_EVENT = 4,
+ MVE_RESPONSE_CODE_SWITCHED_OUT = 5,
+ MVE_RESPONSE_CODE_SWITCHED_IN = 6,
+ MVE_RESPONSE_CODE_ERROR = 7,
+ MVE_RESPONSE_CODE_PONG = 8,
+ MVE_RESPONSE_CODE_STATE_CHANGE = 9,
+ MVE_RESPONSE_CODE_GET_PARAMETER_REPLY = 10,
+ MVE_RESPONSE_CODE_SET_PARAMETER_REPLY = 11,
+ MVE_RESPONSE_CODE_GET_CONFIG_REPLY = 12,
+ MVE_RESPONSE_CODE_SET_CONFIG_REPLY = 13,
+ MVE_RESPONSE_CODE_INPUT_FLUSHED = 14,
+ MVE_RESPONSE_CODE_OUTPUT_FLUSHED = 15,
+ MVE_RESPONSE_CODE_DUMP = 16,
+ MVE_RESPONSE_CODE_JOB_DEQUEUED = 17,
+ MVE_RESPONSE_CODE_IDLE = 18
+};
+
+/*
+ * #define, not enum because mve_userspace.h already introduced
+ * enum with these names.
+ */
+#define MVE_STATE_STOPPED 0
+#define MVE_STATE_RUNNING 2
+
+enum MVE_ERROR_CODE {
+ MVE_ERROR_ABORT = 1,
+ MVE_ERROR_OUT_OF_MEMORY = 2,
+ MVE_ERROR_ASSERT = 3,
+ MVE_ERROR_UNSUPPORTED = 4,
+ MVE_ERROR_INVALID_BUFFER = 6,
+ MVE_ERROR_CORRUPT_STREAM = 7,
+ MVE_ERROR_INVALID_STATE = 8,
+ MVE_ERROR_WATCHDOG = 9
+};
+
+#define MVE_IN_PORT 0
+#define MVE_OUT_PORT 1
+
+/* Buffer contains an interlaced frame */
+#define MVE_FLAGS_INTERLACE 0x1
+
+/* Bottom field first (interlaced only) */
+#define MVE_FLAGS_BOT_FIRST 0x2
+
+/* Buffer contains a frame or top field */
+#define MVE_FLAGS_TOP_PRESENT 0x4
+
+/* Buffer contains a bottom field */
+#define MVE_FLAGS_BOT_PRESENT 0x8
+
+/* Mask used for field presense */
+#define MVE_FLAGS_FIELD_MASK 0xC
+
+/* Frame is rotated 90 degrees */
+#define MVE_FLAGS_ROTATION_90 0x10
+
+/* Frame is rotated 180 degrees */
+#define MVE_FLAGS_ROTATION_180 0x20
+
+/* Frame is rotated 270 degrees */
+#define MVE_FLAGS_ROTATION_270 0x30
+
+/* Mask used for rotations in nMVEFlags */
+#define MVE_FLAGS_ROTATION_MASK 0x30
+
+/* Buffer contains a CRC map */
+#define MVE_FLAGS_CRC_PRESENT 0x40
+
+/* Enable the low latency encoding */
+#define MVE_FLAGS_LOW_LATENCY_ENABLE 0x80
+
+/* Qp specified for this frame */
+#define MVE_FLAGS_QP_PRESENT 0x10000
+
+/* Regions specified for this frame */
+#define MVE_FLAGS_ROI_PRESENT 0x20000
+
+/* bits 31..24 are number of output buffers that are complete and
+ * held by firmware in the DPB for reordering purposes
+ */
+#define MVE_FLAGS_DPB_FRAMES_SHIFT 24
+#define MVE_FLAGS_DPB_FRAMES_MASK 0xFF000000
+
+/* The planar buffer stores 3-plane decompressed video content */
+struct MVE_BUFFER_PLANAR {
+ /* Stride between rows for 0 and 180 deg rotation */
+ int16_t stride[3];
+
+ /* Stride between rows for 90 and 270 deg rotation */
+ int16_t stride_90[3];
+
+ /* Y,Cb,Cr top field */
+ uint32_t plane_top[3];
+
+ /* Y,Cb,Cr bottom field (interlace only) */
+ uint32_t plane_bot[3];
+};
+
+/*
+ * The AFBC buffer stores AFBC compressed content that is also used
+ * as the reference frame. Out of loop processing (crop, rotation,
+ * range reduction) must be supported by the user of this buffer and
+ * the parameters are signaled within the buffer descriptor below.
+ */
+struct MVE_BUFFER_AFBC {
+ uint32_t plane_top; /* Top field (interlace) or frame (progressive) */
+ uint32_t plane_bot; /* Bottom field (interlace only) */
+ uint16_t cropx; /* Luma x crop */
+ uint16_t cropy; /* Luma y crop */
+ uint8_t y_offset; /* Deblocking y offset of picture */
+ uint8_t rangemap; /* Packed VC-1 Luma and Chroma range map coefs */
+};
+
+/*
+ * The FRAME buffer stores the common information for PLANAR and AFBC buffers,
+ * and a union of PLANAR and AFBC specific information.
+ */
+struct MVE_BUFFER_FRAME {
+ /*
+ * Host buffer handle number
+ * WARNING: struct mve_core_buffer_header relies on having the same
+ * nHandle position as MVE_BUFFER
+ */
+ uint16_t nHandle;
+
+ /* Ensure alignment (even if forced packed) */
+ uint16_t nUnused0;
+
+ /* BufferFlags */
+ uint32_t nFlags;
+
+ /* User supplied tracking identifier (keep aligned) */
+ uint64_t nUserDataTag;
+
+ /* MVE sideband information */
+ uint32_t nMVEFlags;
+
+ /* Picture index in decode order */
+ uint32_t pic_index;
+
+ /* Decoded height may be smaller than display height */
+ uint16_t decoded_height;
+
+ /* Decoded width may be smaller than display width */
+ uint16_t decoded_width;
+ union {
+ struct MVE_BUFFER_PLANAR planar;
+ struct MVE_BUFFER_AFBC afbc;
+ } data;
+
+ /* Below fields are valid since Host interface spec v1.1 */
+
+ /* CRC map address top field or frame */
+ uint32_t crc_top;
+
+ /* CRC map bottom field */
+ uint32_t crc_bot;
+
+ /*
+ * Addr of a 32-bit word indicates how many luma pixel rows have been
+ * written by host
+ */
+ uint32_t pRowsCounter;
+
+ /* Base Qp to use for encoding this picture */
+ uint8_t QP;
+
+ uint16_t Padding16[3];
+ uint32_t Padding;
+};
+
+/* The bitstream buffer stores a number of bitstream bytes */
+struct MVE_BUFFER_BITSTREAM {
+ /*
+ * Host buffer handle number
+ * WARNING: struct mve_core_buffer_header relies on having the same
+ * nHandle position as MVE_BUFFER
+ */
+ uint16_t nHandle;
+
+ /* Ensure alignment (even if forced packed) */
+ uint16_t nUnused0;
+
+ /* BufferFlags */
+ uint32_t nFlags;
+
+ /* User supplied tracking identifier (keep aligned) */
+ uint64_t nUserDataTag;
+
+ /* Length of allocated buffer */
+ uint32_t nAllocLen;
+
+ /* Byte offset from start to first byte */
+ uint32_t nOffset;
+
+ /* Number of bytes in the buffer */
+ uint32_t nFilledLen;
+
+ /* Pointer to buffer start */
+ uint32_t pBufferData;
+};
+
+/*
+ * Define a region in 16x16 units
+ *
+ * The region is macroblock positions (x,y) in the range
+ * mbx_left <= x < mbx_right
+ * mby_top <= y < mby_bottom
+ */
+struct MVE_FRAME_REGION {
+ uint16_t mbx_left; /* macroblock x left edge (inclusive) */
+ uint16_t mbx_right; /* macroblock x right edge (exclusive) */
+ uint16_t mby_top; /* macroblock y top edge (inclusive) */
+ uint16_t mby_bottom; /* macroblock y bottom edge (exclusive) */
+ int8_t qp_delta; /* QP delta value for this region */
+};
+
+/*
+ * The MVE_BUFFER_REGION buffer stores the information for FRAME buffers,
+ * and the information for regions of interest.
+ */
+#define MVE_MAX_FRAME_REGIONS 16
+struct MVE_BUFFER_REGION {
+ uint8_t nRegions; /* Number of regions */
+ struct MVE_FRAME_REGION region[MVE_MAX_FRAME_REGIONS];
+};
+
+union MVE_BUFFER {
+ struct MVE_BUFFER_FRAME frame;
+ struct MVE_BUFFER_BITSTREAM bitstream;
+ struct MVE_BUFFER_REGION region;
+};
+
+#define MVE_MSG_HEADER_CODE_BUFFER_BITSTREAM 0
+#define MVE_MSG_HEADER_CODE_BUFFER_FRAME 1
+#define MVE_MSG_HEADER_CODE_BUFFER_REGION 2
+#define MVE_MAX_MESSAGE_SIZE_IN_WORDS 192
+
+/* RPC */
+enum MVE_RPC_FUNCTION {
+ MVE_RPC_FUNCTION_DEBUG_PRINTF = 1,
+ MVE_RPC_FUNCTION_MEM_ALLOC = 2,
+ MVE_RPC_FUNCTION_MEM_RESIZE = 3,
+ MVE_RPC_FUNCTION_MEM_FREE = 4
+};
+
+/* Memory region selection */
+enum MVE_MEM_REGION {
+ MVE_MEM_REGION_PROTECTED = 0,
+ MVE_MEM_REGION_OUTBUF = 1,
+ MVE_MEM_REGION_FRAMEBUF = MVE_MEM_REGION_OUTBUF
+};
+
+enum MVE_RPC_STATE {
+ MVE_RPC_STATE_FREE = 0,
+ MVE_RPC_STATE_PARAM = 1,
+ MVE_RPC_STATE_RETURN = 2
+};
+
+#define MVE_RPC_AREA_SIZE_IN_WORDS 256
+#define MVE_RPC_DATA_SIZE_IN_WORDS (MVE_RPC_AREA_SIZE_IN_WORDS - 3)
+union rpc_params {
+ volatile uint32_t data[MVE_RPC_DATA_SIZE_IN_WORDS];
+
+ struct {
+ char string[MVE_RPC_DATA_SIZE_IN_WORDS * 4];
+ } debug_print;
+
+ struct {
+ uint32_t size;
+ uint32_t max_size;
+ uint8_t region;
+ } mem_alloc;
+
+ struct {
+ uint32_t ve_pointer;
+ uint32_t new_size;
+ } mem_resize;
+
+ struct {
+ uint32_t ve_pointer;
+ } mem_free;
+
+ struct {
+ uint32_t ve_pointer;
+ uint32_t n_pages;
+ uint32_t bitmap_size;
+ uint32_t bitmap[MVE_RPC_DATA_SIZE_IN_WORDS - 3];
+ } mem_map;
+};
+
+struct mve_rpc_comunication_area {
+ volatile uint32_t state;
+ volatile uint32_t call_id;
+ volatile uint32_t size;
+ union rpc_params params;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FW_INCLUDE__MVE_PROTOCOL_KERNEL_H__ */
diff --git a/drivers/video/arm/v5xx/external/host_interface_v2/mve_protocol_def.h b/drivers/video/arm/v5xx/external/host_interface_v2/mve_protocol_def.h
new file mode 100644
index 000000000000..f86e06c28f85
--- /dev/null
+++ b/drivers/video/arm/v5xx/external/host_interface_v2/mve_protocol_def.h
@@ -0,0 +1,1574 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __FW_INCLUDE__MVE_PROTOCOL_DEF_H__
+#define __FW_INCLUDE__MVE_PROTOCOL_DEF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/*****************************************************************************
+ *
+ * Communication protocol between the host/driver and the MVE firmware,
+ * the 'host interface'.
+ *
+ * MVE == MALI Video Engine
+ *
+ * Note: Message structs may be expanded in the future; the host should
+ * use the 'size' of the message to determine how many bytes to
+ * read from the message queue, rather than a sizeof(struct).
+ *
+ ****************************************************************************/
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*****************************************************************************
+ *
+ * Virtual memory regions
+ *
+ * ..._ADDR_BEGIN gives the starting virtual address of the region,
+ * and ..._ADDR_END the (non-inclusive) ending address, such that
+ * the size of the region is obtained with the subtraction
+ * (..._ADDR_END - ..._ADDR_BEGIN).
+ *
+ ****************************************************************************/
+
+/* Memory region for first firmware instance */
+#define MVE_MEM_REGION_FW_INSTANCE0_ADDR_BEGIN (0x00000000u)
+#define MVE_MEM_REGION_FW_INSTANCE0_ADDR_END (0x000FFFFFu + 1)
+
+/*
+ * Areas for communication between host and MVE are placed in the interval
+ * 0x10079000 - 0x1007FFFF, see special defines further down.
+ */
+
+/* PROTECTED virtual memory region */
+#define MVE_MEM_REGION_PROTECTED_ADDR_BEGIN (0x20000000u)
+#define MVE_MEM_REGION_PROTECTED_ADDR_END (0x4FFFFFFFu + 1)
+
+/* FRAMEBUF virtual memory region */
+#define MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN (0x50000000u)
+#define MVE_MEM_REGION_FRAMEBUF_ADDR_END (0x7FFFFFFFu + 1)
+
+/* Memory regions for other firmware instances */
+#define MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN (0x80000000u)
+#define MVE_MEM_REGION_FW_INSTANCE1_ADDR_END \
+ (MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
+
+#define MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN (0x90000000u)
+#define MVE_MEM_REGION_FW_INSTANCE2_ADDR_END \
+ (MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
+
+#define MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN (0xA0000000u)
+#define MVE_MEM_REGION_FW_INSTANCE3_ADDR_END \
+ (MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
+
+#define MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN (0xB0000000u)
+#define MVE_MEM_REGION_FW_INSTANCE4_ADDR_END \
+ (MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
+
+#define MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN (0xC0000000u)
+#define MVE_MEM_REGION_FW_INSTANCE5_ADDR_END \
+ (MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
+
+#define MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN (0xD0000000u)
+#define MVE_MEM_REGION_FW_INSTANCE6_ADDR_END \
+ (MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
+
+#define MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN (0xE0000000u)
+#define MVE_MEM_REGION_FW_INSTANCE7_ADDR_END \
+ (MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END)
+
+/* 0xF0000000 - 0xFFFFFFFF is used internally in MVE */
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*****************************************************************************
+ *
+ * Communication queues between HOST/DRIVER and MVE
+ *
+ * Address for queue for messages in to MVE,
+ * one struct mve_comm_area_host located here
+ *
+ ****************************************************************************/
+
+#define MVE_COMM_MSG_INQ_ADDR (0x10079000u)
+
+/* Address for queue for messages out from MVE,
+ * one struct mve_comm_area_mve located here
+ */
+#define MVE_COMM_MSG_OUTQ_ADDR (0x1007A000u)
+
+/* Address for queue for input buffers in to MVE,
+ * one struct mve_comm_area_host located here
+ */
+#define MVE_COMM_BUF_INQ_ADDR (0x1007B000u)
+
+/* Address for queue for input buffers returned from MVE,
+ * one struct mve_comm_area_mve located here
+ */
+#define MVE_COMM_BUF_INRQ_ADDR (0x1007C000u)
+
+/* Address for queue for output buffers in to MVE,
+ * one struct mve_comm_area_host located here
+ */
+#define MVE_COMM_BUF_OUTQ_ADDR (0x1007D000u)
+
+/* Address for queue for output buffers returned from MVE,
+ * one struct mve_comm_area_mve located here
+ */
+#define MVE_COMM_BUF_OUTRQ_ADDR (0x1007E000u)
+
+/* One struct mve_rpc_communication_area located here */
+#define MVE_COMM_RPC_ADDR (0x1007F000u)
+
+/* One page of memory (4 kB) is used for each queue,
+ * so maximum 1024 words, but need room for some counters as well,
+ * see structs mve_comm_area_mve and mve_comm_area_host below.
+ */
+#define MVE_COMM_QUEUE_SIZE_IN_WORDS 1020
+
+/* This is the part of the message area that is written by host. */
+struct mve_comm_area_host
+{
+ volatile uint16_t out_rpos;
+ volatile uint16_t in_wpos;
+ volatile uint32_t reserved[ 3 ];
+ /*
+ * Queue of messages to MVE, each block of data prefixed with
+ * a mve_msg_header
+ */
+ volatile uint32_t in_data[ MVE_COMM_QUEUE_SIZE_IN_WORDS ];
+};
+
+/* This is the part of the message area that is written by MVE. */
+struct mve_comm_area_mve
+{
+ volatile uint16_t out_wpos;
+ volatile uint16_t in_rpos;
+ volatile uint32_t reserved[ 3 ];
+ /*
+ * Queue of messages to host, each block of data prefixed with
+ * a mve_msg_header
+ */
+ volatile uint32_t out_data[ MVE_COMM_QUEUE_SIZE_IN_WORDS ];
+};
+
+#define MVE_RPC_AREA_SIZE_IN_WORDS 256
+#define MVE_RPC_DATA_SIZE_IN_WORDS (MVE_RPC_AREA_SIZE_IN_WORDS - 3)
+union mve_rpc_params
+{
+ volatile uint32_t data[ MVE_RPC_DATA_SIZE_IN_WORDS ];
+ struct
+ {
+ char string[ MVE_RPC_DATA_SIZE_IN_WORDS * 4 ];
+ } debug_print;
+ struct
+ {
+ uint32_t size;
+ uint32_t max_size;
+ uint8_t region; /* Memory region selection */
+ #define MVE_MEM_REGION_PROTECTED (0)
+ #define MVE_MEM_REGION_OUTBUF (1)
+ #define MVE_MEM_REGION_FRAMEBUF (MVE_MEM_REGION_OUTBUF)
+
+ /* The newly allocated memory must be placed
+ * on (at least) a 2^(log2_alignment) boundary
+ */
+ uint8_t log2_alignment;
+ } mem_alloc;
+ struct
+ {
+ uint32_t ve_pointer;
+ uint32_t new_size;
+ } mem_resize;
+ struct
+ {
+ uint32_t ve_pointer;
+ } mem_free;
+};
+
+struct mve_rpc_communication_area
+{
+ volatile uint32_t state;
+ #define MVE_RPC_STATE_FREE (0)
+ #define MVE_RPC_STATE_PARAM (1)
+ #define MVE_RPC_STATE_RETURN (2)
+ volatile uint32_t call_id;
+ #define MVE_RPC_FUNCTION_DEBUG_PRINTF (1)
+ #define MVE_RPC_FUNCTION_MEM_ALLOC (2)
+ #define MVE_RPC_FUNCTION_MEM_RESIZE (3)
+ #define MVE_RPC_FUNCTION_MEM_FREE (4)
+ volatile uint32_t size;
+ union mve_rpc_params params;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*********************************************************************
+ *
+ * Message codes
+ *
+ *********************************************************************/
+
+/* Messages consist of one struct mve_msg_header, possibly followed
+ * by extra data.
+ */
+struct mve_msg_header
+{
+ uint16_t code;
+ /* REQUESTs are messages from the
+ * host/driver to the firmware: Code: Extra data in message: */
+ #define MVE_REQUEST_CODE_GO (1001) /* no extra data */
+ #define MVE_REQUEST_CODE_STOP (1002) /* no extra data */
+ #define MVE_REQUEST_CODE_INPUT_FLUSH (1003) /* no extra data */
+ #define MVE_REQUEST_CODE_OUTPUT_FLUSH (1004) /* no extra data */
+ #define MVE_REQUEST_CODE_SWITCH (1005) /* no extra data */
+ #define MVE_REQUEST_CODE_PING (1006) /* no extra data */
+ #define MVE_REQUEST_CODE_DUMP (1008) /* no extra data */
+ #define MVE_REQUEST_CODE_JOB (1009) /* struct mve_request_job */
+ #define MVE_REQUEST_CODE_SET_OPTION (1010) /* struct mve_request_set_option (variable size) */
+ #define MVE_REQUEST_CODE_RELEASE_REF_FRAME (1011) /* struct mve_request_release_ref_frame */
+ /* RESPONSEs are messages from
+ * the firmware to the host: */
+ #define MVE_RESPONSE_CODE_SWITCHED_IN (2001) /* struct mve_response_switched_in */
+ #define MVE_RESPONSE_CODE_SWITCHED_OUT (2002) /* struct mve_response_switched_out */
+ #define MVE_RESPONSE_CODE_SET_OPTION_CONFIRM (2003) /* no extra data */
+ #define MVE_RESPONSE_CODE_JOB_DEQUEUED (2004) /* struct mve_response_job_dequeued */
+ #define MVE_RESPONSE_CODE_INPUT (2005) /* no extra data, but buffer placed in buffer queue */
+ #define MVE_RESPONSE_CODE_OUTPUT (2006) /* no extra data, but buffer placed in buffer queue */
+ #define MVE_RESPONSE_CODE_INPUT_FLUSHED (2007) /* no extra data */
+ #define MVE_RESPONSE_CODE_OUTPUT_FLUSHED (2008) /* no extra data */
+ #define MVE_RESPONSE_CODE_PONG (2009) /* no extra data */
+ #define MVE_RESPONSE_CODE_ERROR (2010) /* struct mve_response_error */
+ #define MVE_RESPONSE_CODE_STATE_CHANGE (2011) /* struct mve_response_state_change */
+ #define MVE_RESPONSE_CODE_DUMP (2012) /* no extra data */
+ #define MVE_RESPONSE_CODE_IDLE (2013) /* no extra data */
+ #define MVE_RESPONSE_CODE_FRAME_ALLOC_PARAM (2014) /* struct mve_response_frame_alloc_parameters */
+ #define MVE_RESPONSE_CODE_SEQUENCE_PARAMETERS (2015) /* struct mve_response_sequence_parameters */
+ #define MVE_RESPONSE_CODE_EVENT (2016) /* struct mve_response_event (variable size) */
+ #define MVE_RESPONSE_CODE_SET_OPTION_FAIL (2017) /* struct mve_response_set_option_failed */
+ #define MVE_RESPONSE_CODE_REF_FRAME_UNUSED (2018) /* struct mve_response_ref_frame_unused */
+ /* BUFFERs are sent from host to firmware,
+ * and then return at some time: */
+ #define MVE_BUFFER_CODE_FRAME (3001) /* struct mve_buffer_frame */
+ #define MVE_BUFFER_CODE_BITSTREAM (3002) /* struct mve_buffer_bitstream */
+ #define MVE_BUFFER_CODE_PARAM (3003) /* struct mve_buffer_param */
+ #define MVE_BUFFER_CODE_GENERAL (3004) /* struct mve_buffer_general */
+
+ uint16_t size; /* size in bytes of trailing data, 0 if none */
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*********************************************************************
+ *
+ * REQUESTs are messages from the host to the firmware
+ *
+ * Some of the MVE_REQUEST_CODE_ codes are followed by one of the
+ * structs below.
+ *
+ *********************************************************************/
+
+struct mve_request_job
+{
+ uint16_t cores; /* >= 1, number of cores to use, must match request to HW scheduler */
+ uint16_t frames; /* number of frames to process, zero means infinite */
+ uint32_t flags; /* can be zero */
+ #define MVE_JOB_FLAG_DISABLE_BNDMGR (0x01)
+};
+
+struct mve_request_set_option
+{
+ uint32_t index;
+ #define MVE_SET_OPT_INDEX_NALU_FORMAT (1) /* see arg, MVE_OPT_NALU_FORMAT_ */
+ #define MVE_SET_OPT_INDEX_STREAM_ESCAPING (2) /* arg=1 to enable (default), arg=0 to disable */
+ #define MVE_SET_OPT_INDEX_PROFILE_LEVEL (3) /* data.profile_level */
+ #define MVE_SET_OPT_INDEX_HOST_PROTOCOL_PRINTS (4) /* arg=1 to enable, arg=0 to disable (default) */
+ #define MVE_SET_OPT_INDEX_PROFILING (5) /* arg=1 to enable, arg=0 to disable (default) */
+ #define MVE_SET_OPT_INDEX_DISABLE_FEATURES (6) /* see arg, MVE_OPT_DISABLE_FEATURE_ */
+ #define MVE_SET_OPT_INDEX_IGNORE_STREAM_HEADERS (7) /* decode, arg=1 to enable,
+ * arg=0 to disable (default) */
+ #define MVE_SET_OPT_INDEX_FRAME_REORDERING (8) /* decode, arg=1 to enable (default),
+ * arg=0 to disable */
+ #define MVE_SET_OPT_INDEX_INTBUF_SIZE (9) /* decode, arg = suggested limit of intermediate
+ * buffer allocation */
+ #define MVE_SET_OPT_INDEX_ENC_P_FRAMES (16) /* encode, arg = nPFrames */
+ #define MVE_SET_OPT_INDEX_ENC_B_FRAMES (17) /* encode, arg = number of B frames */
+ #define MVE_SET_OPT_INDEX_GOP_TYPE (18) /* encode, see arg */
+ #define MVE_SET_OPT_INDEX_INTRA_MB_REFRESH (19) /* encode, arg */
+ #define MVE_SET_OPT_INDEX_ENC_CONSTR_IPRED (20) /* encode, arg = 0 or 1 */
+ #define MVE_SET_OPT_INDEX_ENC_ENTROPY_SYNC (21) /* encode, arg = 0 or 1 */
+ #define MVE_SET_OPT_INDEX_ENC_TEMPORAL_MVP (22) /* encode, arg = 0 or 1 */
+ #define MVE_SET_OPT_INDEX_TILES (23) /* encode, data.tiles */
+ #define MVE_SET_OPT_INDEX_ENC_MIN_LUMA_CB_SIZE (24) /* HEVC encode, arg = 8 or 16,
+ * for sizes 8x8 or 16x16 */
+ #define MVE_SET_OPT_INDEX_ENC_MB_TYPE_ENABLE (25) /* encode, see arg */
+ #define MVE_SET_OPT_INDEX_ENC_MB_TYPE_DISABLE (26) /* encode, see arg */
+ #define MVE_SET_OPT_INDEX_ENC_H264_CABAC (27) /* encode, arg = 0 or 1, enabled by default */
+ #define MVE_SET_OPT_INDEX_ENC_SLICE_SPACING (28) /* encode, arg = suggested number of
+ * CTUs/macroblocks in a slice */
+ #define MVE_SET_OPT_INDEX_ENC_VP9_PROB_UPDATE (30) /* VP9 encode, see arg */
+ #define MVE_SET_OPT_INDEX_RESYNC_INTERVAL (31) /* JPEG encode, arg = nRestartInterval
+ * = nResynchMarkerSpacing */
+ #define MVE_SET_OPT_INDEX_HUFFMAN_TABLE (32) /* JPEG encode, data.huffman_table */
+ #define MVE_SET_OPT_INDEX_QUANT_TABLE (33) /* JPEG encode, data.quant_table */
+ #define MVE_SET_OPT_INDEX_ENC_EXPOSE_REF_FRAMES (34) /* encode debug, arg = 0 or 1,
+ * disabled by default */
+ #define MVE_SET_OPT_INDEX_MBINFO_OUTPUT (35) /* encode, arg=1 to enable,
+ * arg=0 to disable (default) */
+ #define MVE_SET_OPT_INDEX_MV_SEARCH_RANGE (36) /* encode, data,motion_vector_search_range */
+ #define MVE_SET_OPT_INDEX_ENC_STREAM_BITDEPTH (38) /* encode, data.bitdepth, to set other bitdepth
+ * of encoded stream than of input frames */
+ #define MVE_SET_OPT_INDEX_ENC_STREAM_CHROMA_FORMAT (39) /* encode, arg, to set other chroma format of
+ * encoded stream than of input frames */
+ #define MVE_SET_OPT_INDEX_ENC_RGB_TO_YUV_MODE (40) /* encode, arg, select which way RGB is converted
+ * to YUV before encoding */
+ #define MVE_SET_OPT_INDEX_ENC_BANDWIDTH_LIMIT (41) /* encode, arg, the maxium bandwidth limit defined
+ * by host */
+ #define MVE_SET_OPT_INDEX_WATCHDOG_TIMEOUT (42) /* arg=timeout, arg=0 to disable */
+ #define MVE_SET_OPT_INDEX_ENC_CABAC_INIT_IDC (43) /* encode, arg; 0,1,2 for H264; 0,1 for HEVC */
+ #define MVE_SET_OPT_INDEX_ENC_ADPTIVE_QUANTISATION (44) /* encode (h264 and hevc) */
+ #define MVE_SET_OPT_INDEX_QP_DELTA_I_P (45)
+ #define MVE_SET_OPT_INDEX_QP_DELTA_I_B_REF (46)
+ #define MVE_SET_OPT_INDEX_QP_DELTA_I_B_NONREF (47)
+ #define MVE_SET_OPT_INDEX_CB_QP_OFFSET (48)
+ #define MVE_SET_OPT_INDEX_CR_QP_OFFSET (49)
+ #define MVE_SET_OPT_INDEX_LAMBDA_SCALE (50) /* encode, data.lambda_scale */
+ #define MVE_SET_OPT_INDEX_ENC_MAX_NUM_CORES (51) /* maximum number of cores */
+
+ union
+ {
+ uint32_t arg; /* Most options only need a uint32_t as argument */
+ /* For option MVE_SET_OPT_INDEX_NALU_FORMAT, arg should
+ * be one of these: */
+ #define MVE_OPT_NALU_FORMAT_START_CODES (1)
+ #define MVE_OPT_NALU_FORMAT_ONE_NALU_PER_BUFFER (2)
+ #define MVE_OPT_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD (4)
+ #define MVE_OPT_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD (8)
+ #define MVE_OPT_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD (16)
+ /* For option MVE_SET_OPT_INDEX_GOP_TYPE, arg should
+ * be one of these: */
+ #define MVE_OPT_GOP_TYPE_BIDIRECTIONAL (1)
+ #define MVE_OPT_GOP_TYPE_LOW_DELAY (2)
+ #define MVE_OPT_GOP_TYPE_PYRAMID (3)
+ /* For option MVE_SET_OPT_INDEX_ENC_VP9_PROB_UPDATE,
+ * arg should be one of these: */
+ #define MVE_OPT_VP9_PROB_UPDATE_DISABLED (0)
+ #define MVE_OPT_VP9_PROB_UPDATE_IMPLICIT (1)
+ #define MVE_OPT_VP9_PROB_UPDATE_EXPLICIT (2)
+ /* For option MVE_SET_OPT_INDEX_DISABLE_FEATURES, arg
+ * should be a bitmask with features to disable: */
+ #define MVE_OPT_DISABLE_FEATURE_AFBC_COMP (0x00000001) /* VDMA AFBC Compression */
+ #define MVE_OPT_DISABLE_FEATURE_REF_CACHE (0x00000002) /* REF caching */
+ #define MVE_OPT_DISABLE_FEATURE_DEBLOCK (0x00000004) /* Deblocking */
+ #define MVE_OPT_DISABLE_FEATURE_SAO (0x00000008) /* SAO */
+ #define MVE_OPT_DISABLE_FEATURE_PIC_OUTPUT (0x00000020) /* Picture Output Removal */
+ #define MVE_OPT_DISABLE_FEATURE_PIPE (0x00000040) /* Pipe (i.e. parser-only) */
+ #define MVE_OPT_DISABLE_FEATURE_SLEEP (0x00000080) /* Clock gating
+ * (SOC_SYSCTRL.SLEEP bit) */
+ #define MVE_OPT_DISABLE_FEATURE_AFBC_LEGACY_REF (0x00000100) /* Enables tiled AFBC format in
+ * reference buffers. Ignored
+ * for decode AFBC output */
+ #define MVE_OPT_DISABLE_FEATURE_REF_PICS (0x00000400) /* Forces use of static 16x16
+ * reference pics */
+ #define MVE_OPT_DISABLE_FEATURE_CHNG_RECT_WA (0x00000800) /* Disables workaround */
+ /* For options MVE_SET_OPT_INDEX_ENC_MB_TYPE_ENABLE
+ * and MVE_SET_OPT_INDEX_ENC_MB_TYPE_DISABLE, arg
+ * should be a bitmask of MVE_MBTYPEs: */
+ #define MVE_MBTYPE_4x4 (0x00000001) /* 4x4 inter */
+ #define MVE_MBTYPE_4x8 (0x00000002) /* 4x8 inter */
+ #define MVE_MBTYPE_8x4 (0x00000004) /* 8x4 inter */
+ #define MVE_MBTYPE_8x8 (0x00000008) /* 8x8 inter */
+ #define MVE_MBTYPE_8x16 (0x00000010) /* 8x16 inter */
+ #define MVE_MBTYPE_16x8 (0x00000020) /* 16x8 inter */
+ #define MVE_MBTYPE_16x16 (0x00000040) /* 16x16 inter */
+ #define MVE_MBTYPE_PSKIP (0x00000080) /* P Skip inter */
+ #define MVE_MBTYPE_I4x4 (0x00000100) /* 4x4 intra */
+ #define MVE_MBTYPE_I8x8 (0x00000200) /* 8x8 intra */
+ #define MVE_MBTYPE_I16x16 (0x00000400) /* 16x16 intra */
+ #define MVE_MBTYPE_I32x32 (0x00000800) /* 32x32 intra */
+ #define MVE_MBTYPE_16x32 (0x00001000) /* 16x32 inter */
+ #define MVE_MBTYPE_32x16 (0x00002000) /* 32x16 inter */
+ #define MVE_MBTYPE_32x32 (0x00004000) /* 32x32 inter */
+ /* For option MVE_SET_OPT_INDEX_ENC_RGB_TO_YUV_MODE,
+ * arg should be one of these: */
+ #define MVE_OPT_RGB_TO_YUV_BT601_STUDIO (0)
+ #define MVE_OPT_RGB_TO_YUV_BT601_FULL (1)
+ #define MVE_OPT_RGB_TO_YUV_BT709_STUDIO (2)
+ #define MVE_OPT_RGB_TO_YUV_BT709_FULL (3)
+ struct
+ {
+ uint16_t profile;
+ /* AVC/H.264 profiles */
+ #define MVE_OPT_PROFILE_H264_BASELINE (1)
+ #define MVE_OPT_PROFILE_H264_MAIN (2)
+ #define MVE_OPT_PROFILE_H264_HIGH (3)
+ /* HEVC/H.265 profiles */
+ #define MVE_OPT_PROFILE_H265_MAIN (1)
+ #define MVE_OPT_PROFILE_H265_MAIN_STILL (2)
+ #define MVE_OPT_PROFILE_H265_MAIN_INTRA (3)
+ #define MVE_OPT_PROFILE_H265_MAIN_10 (4)
+ /* VC-1 profiles */
+ #define MVE_OPT_PROFILE_VC1_SIMPLE (1)
+ #define MVE_OPT_PROFILE_VC1_MAIN (2)
+ #define MVE_OPT_PROFILE_VC1_ADVANCED (3)
+ /* VP8 profiles */
+ #define MVE_OPT_PROFILE_VP8_MAIN (1)
+ uint16_t level;
+ /* AVC/H.264 levels */
+ #define MVE_OPT_LEVEL_H264_1 (1)
+ #define MVE_OPT_LEVEL_H264_1b (2)
+ #define MVE_OPT_LEVEL_H264_11 (3)
+ #define MVE_OPT_LEVEL_H264_12 (4)
+ #define MVE_OPT_LEVEL_H264_13 (5)
+ #define MVE_OPT_LEVEL_H264_2 (6)
+ #define MVE_OPT_LEVEL_H264_21 (7)
+ #define MVE_OPT_LEVEL_H264_22 (8)
+ #define MVE_OPT_LEVEL_H264_3 (9)
+ #define MVE_OPT_LEVEL_H264_31 (10)
+ #define MVE_OPT_LEVEL_H264_32 (11)
+ #define MVE_OPT_LEVEL_H264_4 (12)
+ #define MVE_OPT_LEVEL_H264_41 (13)
+ #define MVE_OPT_LEVEL_H264_42 (14)
+ #define MVE_OPT_LEVEL_H264_5 (15)
+ #define MVE_OPT_LEVEL_H264_51 (16)
+ #define MVE_OPT_LEVEL_H264_52 (17)
+ #define MVE_OPT_LEVEL_H264_6 (18)
+ #define MVE_OPT_LEVEL_H264_61 (19)
+ #define MVE_OPT_LEVEL_H264_62 (20)
+ #define MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE (32)
+ /* The value (MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE + level_idc) encodes a user
+ * supplied level_idc value in the range 0 to 255 inclusive. If the host supplies a level_idc
+ * value by this method then the encoder will encode this level_idc value in the bitstream
+ * without checking the validity of the level_idc value
+ */
+ #define MVE_OPT_LEVEL_H264_USER_SUPPLIED_MAX (MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE + 255)
+ /* HEVC/H.265 levels */
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_1 (1)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_1 (2)
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_2 (3)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_2 (4)
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_21 (5)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_21 (6)
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_3 (7)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_3 (8)
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_31 (9)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_31 (10)
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_4 (11)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_4 (12)
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_41 (13)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_41 (14)
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_5 (15)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_5 (16)
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_51 (17)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_51 (18)
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_52 (19)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_52 (20)
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_6 (21)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_6 (22)
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_61 (23)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_61 (24)
+ #define MVE_OPT_LEVEL_H265_MAIN_TIER_62 (25)
+ #define MVE_OPT_LEVEL_H265_HIGH_TIER_62 (26)
+ } profile_level;
+ struct
+ {
+ int32_t mv_search_range_x;
+ int32_t mv_search_range_y;
+ } motion_vector_search_range;
+ struct
+ {
+ uint32_t type;
+ #define MVE_OPT_HUFFMAN_TABLE_DC_LUMA (1)
+ #define MVE_OPT_HUFFMAN_TABLE_AC_LUMA (2)
+ #define MVE_OPT_HUFFMAN_TABLE_DC_CHROMA (3)
+ #define MVE_OPT_HUFFMAN_TABLE_AC_CHROMA (4)
+ uint8_t number_of_huffman_of_code_length[ 16 ];
+ uint8_t table[ 162 ]; /* 12 are used for DC, 162 for AC */
+ } huffman_table;
+ struct
+ {
+ uint32_t type;
+ #define MVE_OPT_QUANT_TABLE_LUMA (1)
+ #define MVE_OPT_QUANT_TABLE_CHROMA (2)
+ uint8_t matrix[ 64 ];
+ } quant_table;
+ struct
+ {
+ /* For HEVC, tile_cols must be zero. For VP9, tile_rows
+ * and tile_cols must be powers of 2. */
+ uint16_t tile_rows;
+ uint16_t tile_cols;
+ } tiles;
+ struct
+ {
+ uint16_t luma_bitdepth;
+ uint16_t chroma_bitdepth;
+ } bitdepth;
+ struct
+ {
+ /* Scale factors, and their square roots, for the lambda
+ * coefficients used by the encoder, in unsigned Q8 fixed-point
+ * format. Default (no scaling) is 1.0 (so 0x0100 in hex).
+ */
+ uint16_t lambda_scale_i_q8;
+ uint16_t lambda_scale_sqrt_i_q8;
+ uint16_t lambda_scale_p_q8;
+ uint16_t lambda_scale_sqrt_p_q8;
+ uint16_t lambda_scale_b_ref_q8;
+ uint16_t lambda_scale_sqrt_b_ref_q8;
+ uint16_t lambda_scale_b_nonref_q8;
+ uint16_t lambda_scale_sqrt_b_nonref_q8;
+ } lambda_scale;
+ } data;
+};
+
+struct mve_request_release_ref_frame
+{
+ /* Decode only: For a frame buffer that MVE has returned
+ * marked as _REF_FRAME, the host can send this message
+ * to ask the MVE to release the buffer as soon as it is
+ * no longer used as reference anymore. (Otherwise, in
+ * normal operation, the host would re-enqueue the buffer
+ * to the MVE when it has been displayed and can be over-
+ * written with a new frame.)
+ *
+ * Note: When a frame is no longer used as reference depends
+ * on the stream being decoded, and there is no way to
+ * guarantee a short response time, the response may not
+ * come until the end of the stream.
+ */
+ uint32_t buffer_address;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*********************************************************************
+ *
+ * RESPONSEs are messages from the host to the firmware
+ *
+ * Some of the MVE_RESPONSE_CODE_ codes are followed by one of the
+ * structs below.
+ *
+ *********************************************************************/
+
+/* Sent when firmware has booted.
+ */
+struct mve_response_switched_in
+{
+ uint32_t core;
+};
+
+/* Sent when last core in a session has switched out.
+ */
+struct mve_response_switched_out
+{
+ uint32_t core;
+ uint32_t reason;
+ uint32_t sub_reason;
+};
+
+/* Response confirming state transition after either GO or STOP
+ * command from host.
+ */
+struct mve_response_state_change
+{
+ uint32_t new_state;
+ #define MVE_STATE_STOPPED (0)
+ #define MVE_STATE_RUNNING (2)
+};
+
+/* Message sent when the all cores in the session have dequeued a
+ * job from the firmware job queue.
+ */
+struct mve_response_job_dequeued
+{
+ uint32_t valid_job;
+};
+
+/* Fatal error message from firmware, if sent then no further
+ * operation is possible.
+ */
+struct mve_response_error
+{
+ uint32_t error_code;
+ #define MVE_ERROR_ABORT (1)
+ #define MVE_ERROR_OUT_OF_MEMORY (2)
+ #define MVE_ERROR_ASSERT (3)
+ #define MVE_ERROR_UNSUPPORTED (4)
+ #define MVE_ERROR_INVALID_BUFFER (6)
+ #define MVE_ERROR_INVALID_STATE (8)
+ #define MVE_ERROR_WATCHDOG (9)
+
+ #define MVE_MAX_ERROR_MESSAGE_SIZE (128)
+ char message[ MVE_MAX_ERROR_MESSAGE_SIZE ];
+};
+
+/* When a set-option succeeds, a confirmation message is
+ * sent, including the index-code for that particular option.
+ */
+struct mve_response_set_option_confirm
+{
+ uint32_t index; /* Same as 'index' in struct mve_request_set_option */
+};
+
+/* If a set-option request fails, this message is returned.
+ * This is not a fatal error. The set-option had no effect,
+ * and the session is still alive.
+ * For example, trying to set an option with a too large
+ * or small parameter would result in this message.
+ * The included text string is meant for development and
+ * debugging purposes only.
+ * (When a set-option succeeds the set-option-confirm
+ * message code is sent instead.)
+ */
+struct mve_response_set_option_fail
+{
+ uint32_t index; /* Same as 'index' in struct mve_request_set_option */
+ char message[ MVE_MAX_ERROR_MESSAGE_SIZE ];
+};
+
+/* Decode only: This message is sent from MVE to the host so that it can
+ * allocate large enough output buffers. Output buffers that are to small
+ * will be returned to the host marked as 'rejected'.
+ */
+struct mve_response_frame_alloc_parameters
+{
+ /* Please note that the below information is a hint
+ * for what buffers to allocate, it does not say
+ * what actual resolution an output picture has.
+ */
+
+ /* To use if allocating PLANAR YUV output buffers: */
+ uint16_t planar_alloc_frame_width;
+ uint16_t planar_alloc_frame_height;
+
+ /* To use if allocating AFBC output buffers
+ * (if interlace, each field needs this size):
+ */
+ uint32_t afbc_alloc_bytes;
+
+ /* For situations where downscaled AFBC is supported,
+ * this number of bytes is needed for the downscaled frame.
+ */
+ uint32_t afbc_alloc_bytes_downscaled;
+
+ /* When the host allocates an AFBC frame buffer, it should normally set
+ * the the afbc_width_in_superblocks to be at least this recommended value.
+ * Buffers with smaller values are likely to be returned rejected by the MVE.
+ * See also comments above for afbc_alloc_bytes and
+ * afbc_alloc_bytes_downscaled, they describe the situations where the
+ * different values are used.
+ */
+ uint16_t afbc_width_in_superblocks;
+ uint16_t afbc_width_in_superblocks_downscaled;
+
+ /* For PLANAR YUV output, every plane's address need to be adjusted to get
+ * optimal AXI bursts when the pixel data is written, the values below may
+ * be used to calculate address offsets.
+ */
+ uint16_t cropx;
+ uint16_t cropy;
+
+ uint32_t mbinfo_alloc_bytes; /* Only for debugging */
+};
+
+/* Decode only: This message is sent from MVE to the host so that it can
+ * allocate suitable output buffers. The needed size of the buffer is sent
+ * in a separate message (above).
+ * When MVE sends the message below, it enters a waiting-state and will not
+ * make any progress until the host sends an output-flush command, upon
+ * which MVE will return all output buffers, followed by a message saying
+ * that the output has been flushed. Only then should the host start
+ * enqueueing new output buffers.
+ */
+struct mve_response_sequence_parameters
+{
+ /* Other stream parameters affecting buffer allocation,
+ * any change in these values will trigger a flush.
+ */
+ uint8_t interlace; /* 0 or 1 */
+ uint8_t chroma_format;
+ #define MVE_CHROMA_FORMAT_MONO (0x0)
+ #define MVE_CHROMA_FORMAT_420 (0x1)
+ #define MVE_CHROMA_FORMAT_422 (0x2)
+ #define MVE_CHROMA_FORMAT_440 (0x3)
+ #define MVE_CHROMA_FORMAT_ARGB (0x4)
+ uint8_t bitdepth_luma; /* 8, 9 or 10 */
+ uint8_t bitdepth_chroma; /* 8, 9 or 10 */
+ uint8_t num_buffers_planar; /* number of planar buffers needed */
+ uint8_t num_buffers_afbc; /* number of AFBC buffers needed, for
+ * AFBC output more buffers are needed
+ * (for planar output, the firmware
+ * will allocate extra memory via RPC)
+ */
+ uint8_t range_mapping_enabled; /* VC-1 AP specific feature, if enabled
+ * then AFBC buffers may need special
+ * filtering before they can be
+ * displayed correctly. If the host is
+ * not able to do that, then planar output
+ * should be used, for which MVE
+ * automatically performs the filtering.
+ */
+ uint8_t reserved0;
+};
+
+struct mve_response_ref_frame_unused
+{
+ /* Decode only: If requested by the host with the message
+ * MVE_REQUEST_CODE_RELEASE_REF_FRAME, the MVE will respond
+ * with this message when (if ever) the buffer is no longer
+ * used.
+ */
+ uint32_t unused_buffer_address;
+};
+
+
+/* This message is only for debugging and performance profiling.
+ * Is sent by the firmware if the corresponding options is enabled.
+ */
+struct mve_event_processed
+{
+ uint8_t pic_format;
+ uint8_t qp;
+ uint8_t pad0;
+ uint8_t pad1;
+ uint32_t parse_start_time; /* Timestamp, absolute time */
+ uint32_t parse_end_time; /* Timestamp, absolute time */
+ uint32_t parse_idle_time; /* Definition of idle here is waiting for in/out buffers or available RAM */
+
+ uint32_t pipe_start_time; /* Timestamp */
+ uint32_t pipe_end_time; /* Timestamp, end-start = process time. Idle time while in a frame is
+ * not measured. */
+ uint32_t pipe_idle_time; /* Always 0 in decode, */
+
+ uint32_t parser_coreid; /* Core used to parse this frame */
+ uint32_t pipe_coreid; /* Core used to pipe this frame */
+
+ uint32_t bitstream_bits; /* Number of bitstream bits used for this frame. */
+
+ uint32_t intermediate_buffer_size; /* Size of intermediate (mbinfo/residuals) buffer after this frame was
+ * parsed. */
+ uint32_t total_memory_allocated; /* after the frame was parsed. Including reference frames. */
+
+ uint32_t bus_read_bytes; /* bus read bytes */
+ uint32_t bus_write_bytes; /* bus written bytes */
+
+ uint32_t afbc_bytes; /* afbc data transferred */
+
+ uint32_t slice0_end_time; /* Timestamp, absolute time */
+ uint32_t stream_start_time; /* Timestamp, absolute stream start time */
+ uint32_t stream_open_time; /* Timestamp, absolute stream open time */
+};
+
+/* This message is only for debugging, is sent by the
+ * firmware if the corresponding option is enabled.
+ */
+struct mve_event_ref_frame
+{
+ uint32_t ref_addr; /* MVE virtual address of AFBC reference frame */
+ uint32_t ref_width; /* Width of display area in luma pixels */
+ uint32_t ref_height; /* Height of display area in luma pixels */
+ uint32_t ref_mb_width; /* Width in macroblocks */
+ uint32_t ref_mb_height; /* Height in macroblocks */
+ uint32_t ref_left_crop; /* Left crop in luma pixels */
+ uint32_t ref_top_crop; /* Top crop in luma pixels */
+ uint32_t ref_frame_size; /* Total AFBC frame size in bytes */
+ uint32_t ref_display_order;
+ uint16_t bit_width; /* bit width of the YUV either 8 or 10 */
+ uint16_t tiled_headers; /* AFBC format is tiled */
+};
+
+/* This message is only for debugging, is sent by the firmware if event tracing
+ * is enabled.
+ */
+struct mve_event_trace_buffers
+{
+ uint16_t reserved;
+ uint8_t num_cores;
+ uint8_t rasc_mask;
+ #define MVE_MAX_TRACE_BUFFERS 40
+ /* this array will contain one buffer per rasc in rasc_mask per num_core */
+ struct
+ {
+ uint32_t rasc_addr; /* rasc address of the buffer */
+ uint32_t size; /* size of the buffer in bytes */
+ } buffers[MVE_MAX_TRACE_BUFFERS];
+};
+
+/* 'Events' are informative messages, the host is not required to react in
+ * any particular way.
+ */
+struct mve_response_event
+{
+ uint32_t event_code;
+ #define MVE_EVENT_ERROR_STREAM_CORRUPT (1) /* message, text string */
+ #define MVE_EVENT_ERROR_STREAM_NOT_SUPPORTED (2) /* message, text string */
+ #define MVE_EVENT_PROCESSED (3) /* struct mve_event_processed */
+ #define MVE_EVENT_REF_FRAME (4) /* struct mve_event_ref_frame */
+ #define MVE_EVENT_TRACE_BUFFERS (5) /* struct mve_event_trace_buffers */
+ union
+ {
+ struct mve_event_processed event_processed;
+ struct mve_event_ref_frame event_ref_frame;
+ struct mve_event_trace_buffers event_trace_buffers;
+ char message[ MVE_MAX_ERROR_MESSAGE_SIZE ];
+ } event_data;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*********************************************************************
+ *
+ * BUFFERs are sent both ways, from host to firmware and back again
+ *
+ * Each MVE_BUFFER_CODE_ code is followed by one of the structs
+ * below.
+ *
+ *********************************************************************/
+
+/* Flags in mve_buffer_frame::frame_flags:
+ * Set by whom? Meaning:
+ * DECODE: ENCODE:
+ * MVE_BUFFER_FRAME_FLAG_INTERLACE host - Buffer is interlaced (both top and
+ * bottom fields are allocated)
+ * MVE_BUFFER_FRAME_FLAG_BOT_FIRST fw - Bottom field should be displayed
+ * first (only if interlaced)
+ * MVE_BUFFER_FRAME_FLAG_TOP_PRESENT fw host Top field present (or full frame if
+ * not interlaced)
+ * MVE_BUFFER_FRAME_FLAG_BOT_PRESENT fw - Bottom present (only if interlaced)
+ *
+ * MVE_BUFFER_FRAME_FLAG_ROTATION_* host host Decode: MVE will rotate the output frame
+ * according to this setting.
+ * Encode: MVE will rotate the input frame
+ * according to this setting before
+ * encoding them.
+ * MVE_BUFFER_FRAME_FLAG_SCALING_MASK host - Output pictures should be downscaled
+ *
+ * MVE_BUFFER_FRAME_FLAG_MIRROR_* - host Input frame should be mirrored before encoding
+ *
+ * MVE_BUFFER_FRAME_FLAG_REJECTED fw - Buffer was too small, host should re-allocate
+ *
+ * MVE_BUFFER_FRAME_FLAG_CORRUPT fw - Frame contains visual corruption
+ *
+ * MVE_BUFFER_FRAME_FLAG_DECODE_ONLY fw - Frame should not be displayed
+ *
+ * MVE_BUFFER_FRAME_FLAG_REF_FRAME fw - Frame is used by MVE as reference, host must
+ * not change, just re-enqueue when displayed
+ * MVE_BUFFER_FRAME_FLAG_EOS fw host This is the last frame in the stream.
+ */
+
+/* mve_buffer_frame_planar stores uncompressed YUV pictures.
+ * ________________________________________
+ * | ^ | | ^
+ * |<-:--visible_frame_width---->| | :
+ * | : | | :
+ * | : | | :
+ * | visible_frame_height | | max_frame_height
+ * | : | | :
+ * | : | | :
+ * |__v__________________________| | :
+ * | | :
+ * |<-------------max_frame_width---------->| :
+ * |________________________________________| v
+ *
+ */
+struct mve_buffer_frame_planar
+{
+ /* Y,Cb,Cr top field */
+ uint32_t plane_top[ 3 ];
+
+ /* Y,Cb,Cr bottom field (interlace only) */
+ uint32_t plane_bot[ 3 ];
+
+ /* Stride between rows, in bytes */
+ int32_t stride[ 3 ];
+
+ /* Size of largest frame allowed to put in this buffer */
+ uint16_t max_frame_width;
+ uint16_t max_frame_height;
+};
+
+/* mve_buffer_frame_afbc stores AFBC compressed content that is also used
+ * as the reference frame. Out of loop processing (crop, rotation,
+ * range reduction) must be supported by the user of this buffer and
+ * the parameters are signaled within the buffer descriptor below.
+ * ________________________________________
+ * | ^ |
+ * | cropy |
+ * | v_____________________________ |
+ * |<-cropx->| ^ ||
+ * | |<-:--visible_frame_width---->||
+ * | | : ||
+ * | | : ||
+ * | | visible_frame_height ||
+ * | | : ||
+ * | | : ||
+ * | |__v__________________________||
+ * |________________________________________|
+ *
+ * <----- superblock_width --------------->
+ * * afbc_width_in_superblocks
+ *
+ * Note that the sizes and cropping values need not be multiples of 16.
+ *
+ * For interlaced streams, the values refer to a full frame,
+ * while the output is actually separated into fields. Thus for fields,
+ * cropy and visible_frame_height should be divided by two.
+ *
+ * For dual-downscaled AFBC output (not supported for interlace),
+ * then the cropx, cropy, visible_frame_width and visible_frame_height
+ * should be divided by two for the downscaled plane.
+ */
+struct mve_buffer_frame_afbc
+{
+ uint32_t plane[ 2 ]; /* Addresses for up to two AFBC planes:
+ * Top and bottom fields for interlace,
+ * or standard and optional downscaled output. */
+ uint32_t alloc_bytes[ 2 ]; /* Size of allocation for each plane */
+ uint16_t cropx; /* Luma x crop */
+ uint16_t cropy; /* Luma y crop */
+ uint16_t afbc_width_in_superblocks[ 2 ]; /* Width of AFBC frame buffer, in units
+ * of superblock width (32 or 16).
+ * If dual-downscaled output is chosen,
+ * this width can be different for the
+ * two planes.
+ * For first plane:
+ * (cropx + frame_width)
+ * <= superblock_width * afbc_width...
+ */
+ uint32_t afbc_params; /* AFBC parameters */
+ #define MVE_BUFFER_FRAME_AFBC_TILED_BODY (0x00000001) /* Output body blocks should be tiled */
+ #define MVE_BUFFER_FRAME_AFBC_TILED_HEADER (0x00000002) /* Output headers should be tiled */
+ #define MVE_BUFFER_FRAME_AFBC_32X8_SUPERBLOCK (0x00000004) /* Super block is 32x8, default is 16x16,
+ * (only supported as input for encode) */
+ #define MVE_BUFFER_FRAME_AFBC_DN_FORCE_8BIT (0x00000008) /* For downscaled AFBC plane: It shall
+ * be 8-bit, even if full-scale is 10-bit */
+ #define MVE_BUFFER_FRAME_AFBC_DN_FORCE_420 (0x00000010) /* For downscaled AFBC plane: It shall
+ * be 4:2:0, even if full-scale is 4:2:2 */
+ #define MVE_BUFFER_FRAME_AFBC_STRIDE_SET_BY_MVE (0x00000020) /* Decode only: By default, the host should
+ set the afbc_width_in_superblocks. If the
+ value is zero, or if this bit is set, then
+ the MVE sets an appropriate value. */
+
+};
+
+/*
+ * The FRAME buffer stores the common information for PLANAR and AFBC buffers,
+ * and a union of PLANAR and AFBC specific information.
+ */
+struct mve_buffer_frame
+{
+ /* For identification of the buffer, this is not changed by
+ * the firmware. */
+ uint64_t host_handle;
+
+ /* For matching input buffer with output buffers, the firmware
+ * copies these values between frame buffers and bitstream buffers. */
+ uint64_t user_data_tag;
+
+ /* Frame buffer flags, see commentary above */
+ uint32_t frame_flags;
+ #define MVE_BUFFER_FRAME_FLAG_INTERLACE (0x00000001)
+ #define MVE_BUFFER_FRAME_FLAG_BOT_FIRST (0x00000002)
+ #define MVE_BUFFER_FRAME_FLAG_TOP_PRESENT (0x00000004)
+ #define MVE_BUFFER_FRAME_FLAG_BOT_PRESENT (0x00000008)
+ #define MVE_BUFFER_FRAME_FLAG_ROTATION_90 (0x00000010)
+ #define MVE_BUFFER_FRAME_FLAG_ROTATION_180 (0x00000020)
+ #define MVE_BUFFER_FRAME_FLAG_ROTATION_270 (0x00000030)
+ #define MVE_BUFFER_FRAME_FLAG_SCALING_MASK (0x000000C0)
+ #define MVE_BUFFER_FRAME_FLAG_MIRROR_HORI (0x00000100)
+ #define MVE_BUFFER_FRAME_FLAG_MIRROR_VERT (0x00000200)
+ #define MVE_BUFFER_FRAME_FLAG_REJECTED (0x00001000)
+ #define MVE_BUFFER_FRAME_FLAG_CORRUPT (0x00002000)
+ #define MVE_BUFFER_FRAME_FLAG_DECODE_ONLY (0x00004000)
+ #define MVE_BUFFER_FRAME_FLAG_REF_FRAME (0x00008000)
+ #define MVE_BUFFER_FRAME_FLAG_EOS (0x00010000)
+
+ /* Height (in luma samples) of visible part of frame,
+ * may be smaller than allocated frame size. */
+ uint16_t visible_frame_height;
+
+ /* Width (in luma samples) of visible part of frame,
+ * may be smaller than allocated frame size. */
+ uint16_t visible_frame_width;
+
+ /* Color format of buffer */
+ uint16_t format;
+ /* format bitfield: */
+ #define MVE_FORMAT_BF_C (0) /* 3 bits, chroma subsampling */
+ #define MVE_FORMAT_BF_B (4) /* 4 bits, max bitdepth minus 8 */
+ #define MVE_FORMAT_BF_N (8) /* 2 bits, number of planes */
+ #define MVE_FORMAT_BF_V (12) /* 2 bits, format variant */
+ #define MVE_FORMAT_BF_A (15) /* 1 bit, AFBC bit */
+ /* formats: */
+ #define MVE_FORMAT_YUV420_AFBC_8 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_A) )
+
+ #define MVE_FORMAT_YUV420_AFBC_10 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
+ ( (10 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_A) )
+
+ #define MVE_FORMAT_YUV422_AFBC_8 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_A) )
+
+ #define MVE_FORMAT_YUV422_AFBC_10 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
+ ( (10 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_A) )
+
+ #define MVE_FORMAT_YUV420_I420 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
+ ( 3 << MVE_FORMAT_BF_N) | \
+ ( 0 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_YUV420_NV12 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
+ ( 2 << MVE_FORMAT_BF_N) | \
+ ( 0 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_YUV420_NV21 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
+ ( 2 << MVE_FORMAT_BF_N) | \
+ ( 1 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_YUV420_P010 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
+ ( (16 - 8) << MVE_FORMAT_BF_B) | \
+ ( 2 << MVE_FORMAT_BF_N) | \
+ ( 0 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_YUV420_Y0L2 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
+ ( (10 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_N) | \
+ ( 0 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_YUV420_AQB1 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \
+ ( (10 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_N) | \
+ ( 1 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_YUV422_YUY2 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_N) | \
+ ( 0 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_YUV422_UYVY ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_N) | \
+ ( 1 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_YUV422_Y210 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \
+ ( (16 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_N) | \
+ ( 0 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_RGBA_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_N) | \
+ ( 0 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_BGRA_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_N) | \
+ ( 1 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_ARGB_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_N) | \
+ ( 2 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_ABGR_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \
+ ( ( 8 - 8) << MVE_FORMAT_BF_B) | \
+ ( 1 << MVE_FORMAT_BF_N) | \
+ ( 3 << MVE_FORMAT_BF_V) )
+
+ #define MVE_FORMAT_MBINFO (0x0001) /* only used for debugging */
+
+ #define MVE_FORMAT_UNUSED (0x0000)
+
+ uint16_t reserved0; /* force 'data' to be 4-byte aligned */
+
+ union
+ {
+ struct mve_buffer_frame_planar planar;
+ struct mve_buffer_frame_afbc afbc;
+ } data;
+
+ uint32_t reserved1; /* force size to be multiple of 8 bytes */
+};
+
+/* The bitstream buffer stores a number of bitstream bytes */
+struct mve_buffer_bitstream
+{
+ /* For identification of the buffer, this is not changed by
+ * the firmware. */
+ uint64_t host_handle;
+
+ /* For matching input buffer with output buffers, the firmware
+ * copies these values between frame buffers and bitstream buffers. */
+ uint64_t user_data_tag;
+
+ /* BufferFlags */
+ uint32_t bitstream_flags;
+ #define MVE_BUFFER_BITSTREAM_FLAG_EOS (0x00000001)
+ #define MVE_BUFFER_BITSTREAM_FLAG_ENDOFFRAME (0x00000010)
+ #define MVE_BUFFER_BITSTREAM_FLAG_SYNCFRAME (0x00000020)
+ #define MVE_BUFFER_BITSTREAM_FLAG_CODECCONFIG (0x00000080)
+ #define MVE_BUFFER_BITSTREAM_FLAG_ENDOFSUBFRAME (0x00000400)
+
+ /* Length of allocated buffer */
+ uint32_t bitstream_alloc_bytes;
+
+ /* Byte offset from start to first byte */
+ uint32_t bitstream_offset;
+
+ /* Number of bytes in the buffer */
+ uint32_t bitstream_filled_len;
+
+ /* Pointer to buffer start */
+ uint32_t bitstream_buf_addr;
+
+ /* Pad to force 8-byte alignment */
+ uint32_t reserved;
+};
+
+/*
+ * Define a region in 16x16 units
+ *
+ * The region is macroblock positions (x,y) in the range
+ * mbx_left <= x < mbx_right
+ * mby_top <= y < mby_bottom
+ */
+struct mve_buffer_param_region
+{
+ uint16_t mbx_left; /* macroblock x left edge (inclusive) */
+ uint16_t mbx_right; /* macroblock x right edge (exclusive) */
+ uint16_t mby_top; /* macroblock y top edge (inclusive) */
+ uint16_t mby_bottom; /* macroblock y bottom edge (exclusive) */
+ int16_t qp_delta; /* QP delta value for this region, this
+ * delta applies to QP values in the ranges:
+ * H264: 0-51
+ * HEVC: 0-51
+ * VP9: 0-255 */
+ uint16_t reserved;
+};
+
+/* input for encoder,
+ * the mve_buffer_param_regions buffer stores the information for FRAME buffers,
+ * and the information for regions of interest.
+ */
+struct mve_buffer_param_regions
+{
+ uint8_t n_regions; /* Number of regions */
+ uint8_t reserved[ 3 ];
+ #define MVE_MAX_FRAME_REGIONS 16
+ struct mve_buffer_param_region region[ MVE_MAX_FRAME_REGIONS ];
+};
+
+/* the block parameter record specifies the various properties of a quad */
+struct mve_block_param_record
+{
+ uint16_t qp_delta; /* Bitset of four 4-bit QP delta values for a quad */
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16 (0)
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16_SZ (4)
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16 (4)
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16_SZ (4)
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16 (8)
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16_SZ (4)
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16 (12)
+ #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16_SZ (4)
+
+ uint8_t force;
+ #define MVE_BLOCK_PARAM_RECORD_FORCE_NONE (0x00)
+ #define MVE_BLOCK_PARAM_RECORD_FORCE_QP (0x01)
+
+ uint8_t reserved;
+};
+
+/* block configuration uncompressed rows header. this configures the size of the
+ * uncompressed body. */
+struct mve_buffer_general_rows_uncomp_hdr
+{
+ uint8_t n_cols_minus1; /* number of quad cols in picture minus 1 */
+ uint8_t n_rows_minus1; /* number of quad rows in picture minus 1 */
+ uint8_t reserved[2];
+};
+
+/* block configuration uncompressed rows body. this structure contains an array
+ * of block parameter records whose length is (n_cols_minus1 + 1) * (n_rows_minus1 + 1)
+ * elements. therefore the allocation of this structure needs to be dynamic and
+ * a pointer to the allocated memory should then be assigned to the general
+ * purpose buffer data pointer
+ */
+struct mve_buffer_general_rows_uncomp_body
+{
+ /* the size of this array is variable and not necessarily equal to 1.
+ * therefore the sizeof operator should not be used
+ */
+ struct mve_block_param_record bpr[1];
+};
+
+/* input for encoder, block level configurations.
+ * the row based block configurations can be defined in different formats. they
+ * are stored in the blk_cfgs union and identified by the blk_cfg_type member.
+ * these definitions consist of a header and body pair. the header part contains
+ * configuration information for the body. the body part describes the actual
+ * layout of the data buffer pointed to by the mve_buffer_general_hdr buffer_ptr.
+ */
+struct mve_buffer_general_block_configs
+{
+ uint8_t blk_cfg_type;
+ #define MVE_BLOCK_CONFIGS_TYPE_NONE (0x00)
+ #define MVE_BLOCK_CONFIGS_TYPE_ROW_UNCOMP (0xff)
+ uint8_t reserved[3];
+ union
+ {
+ struct mve_buffer_general_rows_uncomp_hdr rows_uncomp;
+ } blk_cfgs;
+};
+
+
+/* input for encoder */
+struct mve_buffer_param_qp
+{
+ /* QP (quantization parameter) for encode.
+ *
+ * When used to set fixed QP for encode, with rate control
+ * disabled, then the valid ranges are:
+ * H264: 0-51
+ * HEVC: 0-51
+ * VP8: 0-63
+ * VP9: 0-63
+ * Note: The QP must be set separately for I, P and B frames.
+ *
+ * But when this message is used with the regions-feature,
+ * then the valid ranges are the internal bitstream ranges:
+ * H264: 0-51
+ * HEVC: 0-51
+ * VP8: 0-127
+ * VP9: 0-255
+ */
+ int32_t qp;
+};
+
+/* output from decoder */
+struct mve_buffer_param_display_size
+{
+ uint16_t display_width;
+ uint16_t display_height;
+};
+
+/* output from decoder, colour information needed for hdr */
+struct mve_buffer_param_colour_description
+{
+ uint32_t flags;
+ #define MVE_BUFFER_PARAM_COLOUR_FLAG_MASTERING_DISPLAY_DATA_VALID (1)
+ #define MVE_BUFFER_PARAM_COLOUR_FLAG_CONTENT_LIGHT_DATA_VALID (2)
+
+ uint8_t range; /* Unspecified=0, Limited=1, Full=2 */
+ #define MVE_BUFFER_PARAM_COLOUR_RANGE_UNSPECIFIED (0)
+ #define MVE_BUFFER_PARAM_COLOUR_RANGE_LIMITED (1)
+ #define MVE_BUFFER_PARAM_COLOUR_RANGE_FULL (2)
+
+ uint8_t colour_primaries; /* see hevc spec. E.3.1 */
+ uint8_t transfer_characteristics; /* see hevc spec. E.3.1 */
+ uint8_t matrix_coeff; /* see hevc spec. E.3.1 */
+
+ uint16_t mastering_display_primaries_x[3]; /* see hevc spec. D.3.27 */
+ uint16_t mastering_display_primaries_y[3]; /* see hevc spec. D.3.27 */
+ uint16_t mastering_white_point_x; /* see hevc spec. D.3.27 */
+ uint16_t mastering_white_point_y; /* see hevc spec. D.3.27 */
+ uint32_t max_display_mastering_luminance; /* see hevc spec. D.3.27 */
+ uint32_t min_display_mastering_luminance; /* see hevc spec. D.3.27 */
+
+ uint32_t max_content_light_level; /* unused */
+ uint32_t avg_content_light_level; /* unused */
+};
+
+/* output from decoder see hevc spec. D.3.3 */
+struct mve_buffer_param_frame_field_info
+{
+ uint8_t pic_struct;
+ uint8_t source_scan_type;
+ uint8_t duplicate_flag;
+ uint8_t reserved;
+};
+
+/* output from decoder, VC-1 specific feature only relevant
+ * if using AFBC output
+ */
+struct mve_buffer_param_range_map
+{
+ uint8_t luma_map_enabled;
+ uint8_t luma_map_value;
+ uint8_t chroma_map_enabled;
+ uint8_t chroma_map_value;
+};
+
+/* input for encoder */
+struct mve_buffer_param_rate_control
+{
+ uint32_t rate_control_mode;
+ #define MVE_OPT_RATE_CONTROL_MODE_OFF (0)
+ #define MVE_OPT_RATE_CONTROL_MODE_STANDARD (1)
+ #define MVE_OPT_RATE_CONTROL_MODE_VARIABLE (2)
+ #define MVE_OPT_RATE_CONTROL_MODE_CONSTANT (3)
+ uint32_t target_bitrate; /* in bits per second */
+};
+
+/* input for encoder */
+struct mve_buffer_param_rate_control_qp_range
+{
+ int32_t qp_min;
+ int32_t qp_max;
+};
+
+/* input for encoder, see hevc spec. D.3.16 */
+struct mve_buffer_param_frame_packing
+{
+ uint32_t flags;
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_QUINCUNX_SAMPLING (1)
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_SPATIAL_FLIPPING (2)
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_FRAME0_FLIPPED (4)
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_FIELD_VIEWS (8)
+ #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_CURRENT_FRAME_IS_FRAME0 (16)
+
+ uint8_t frame_packing_arrangement_type;
+ uint8_t content_interpretation_type;
+
+ uint8_t frame0_grid_position_x;
+ uint8_t frame0_grid_position_y;
+ uint8_t frame1_grid_position_x;
+ uint8_t frame1_grid_position_y;
+
+ uint8_t reserved[ 2 ];
+};
+
+struct mve_buffer_param_rectangle
+{
+ uint16_t x_left; /* pixel x left edge (inclusive) */
+ uint16_t x_right; /* pixel x right edge (exclusive) */
+ uint16_t y_top; /* pixel y top edge (inclusive) */
+ uint16_t y_bottom; /* pixel y bottom edge (exclusive) */
+};
+
+/* input for encoder,
+ * indicate which parts of the source picture has changed.
+ * The encoder can (optionally) use this information to
+ * reduce memory bandwidth.
+ *
+ * n_rectangles=0 indicates the source picture is unchanged.
+ *
+ * This parameter only applies to the picture that immediately
+ * follows (and not to subsequent ones).
+ */
+struct mve_buffer_param_change_rectangles
+{
+ uint8_t n_rectangles; /* Number of rectangles */
+ uint8_t reserved[3];
+ #define MVE_MAX_FRAME_CHANGE_RECTANGLES 2
+ struct mve_buffer_param_rectangle rectangles[MVE_MAX_FRAME_CHANGE_RECTANGLES];
+};
+
+
+/* Parameters that are sent in the same communication channels
+ * as the buffers. A parameter applies to all subsequent buffers.
+ * Some types are only valid for decode, and some only for encode.
+ */
+struct mve_buffer_param
+{
+ uint32_t type; /* Extra data: */
+ #define MVE_BUFFER_PARAM_TYPE_QP (2) /* qp */
+ #define MVE_BUFFER_PARAM_TYPE_REGIONS (3) /* regions */
+ #define MVE_BUFFER_PARAM_TYPE_DISPLAY_SIZE (5) /* display_size */
+ #define MVE_BUFFER_PARAM_TYPE_RANGE_MAP (6) /* range_map */
+ #define MVE_BUFFER_PARAM_TYPE_FRAME_RATE (9) /* arg, in frames per second, as a
+ * fixed point Q16 value, for example
+ * 0x001e0000 == 30.0 fps */
+ #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL (10) /* rate_control */
+ #define MVE_BUFFER_PARAM_TYPE_QP_I (12) /* qp for I frames, when no rate control */
+ #define MVE_BUFFER_PARAM_TYPE_QP_P (13) /* qp for P frames, when no rate control */
+ #define MVE_BUFFER_PARAM_TYPE_QP_B (14) /* qp for B frames, when no rate control */
+ #define MVE_BUFFER_PARAM_TYPE_COLOUR_DESCRIPTION (15) /* colour_description */
+ #define MVE_BUFFER_PARAM_TYPE_FRAME_PACKING (16) /* frame_packing */
+ #define MVE_BUFFER_PARAM_TYPE_FRAME_FIELD_INFO (17) /* frame_field_info */
+ #define MVE_BUFFER_PARAM_TYPE_GOP_RESET (18) /* no extra data */
+ #define MVE_BUFFER_PARAM_TYPE_DPB_HELD_FRAMES (19) /* arg, number of output buffers that are
+ * complete and held by firmware in the
+ * DPB for reordering purposes.
+ * Valid after the next frame is output */
+ #define MVE_BUFFER_PARAM_TYPE_CHANGE_RECTANGLES (20) /* change rectangles */
+ #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_QP_RANGE (21) /* rate_control_qp_range */
+ #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_HRD_BUF_SIZE (23) /* arg */
+
+ union
+ {
+ uint32_t arg; /* some parameters only need a uint32_t as argument */
+ struct mve_buffer_param_qp qp;
+ struct mve_buffer_param_regions regions;
+ struct mve_buffer_param_display_size display_size;
+ struct mve_buffer_param_range_map range_map;
+ struct mve_buffer_param_rate_control rate_control;
+ struct mve_buffer_param_rate_control_qp_range rate_control_qp_range;
+ struct mve_buffer_param_colour_description colour_description;
+ struct mve_buffer_param_frame_packing frame_packing;
+ struct mve_buffer_param_frame_field_info frame_field_info;
+ struct mve_buffer_param_change_rectangles change_rectangles;
+ } data;
+};
+
+
+/* The general purpose buffer header stores the common fields of an
+ * mve_buffer_general. it contains the pointer to the data buffer that contains
+ * the general purpose data
+ */
+struct mve_buffer_general_hdr
+{
+ /* For identification of the buffer, this is not changed by the firmware. */
+ uint64_t host_handle;
+
+ /* this depends upon the type of the general purpose buffer */
+ uint64_t user_data_tag;
+
+ /* pointer to the buffer containing the general purpose data. the format
+ * of this data is defined by the configuration in the mve_buffer_general */
+ uint32_t buffer_ptr;
+
+ /* size of the buffer pointed to by buffer_ptr */
+ uint32_t buffer_size;
+
+ /* selects the type of semantics to use for the general purpose buffer. it
+ * tags (or discriminates) the union config member in mve_buffer_general
+ */
+ uint16_t type; /* Extra data: */
+ #define MVE_BUFFER_GENERAL_TYPE_BLOCK_CONFIGS (1) /* block_configs */
+
+ /* size of the mve_buffer_general config member */
+ uint16_t config_size;
+
+ /* pad to force 8-byte alignment */
+ uint32_t reserved;
+};
+
+/* The general purpose buffer consists of a header and a configuration. The
+ * header contains a pointer to a buffer whose format is described by the
+ * configuration. The type of configuration is indicated by the type value in
+ * the header. N.B. In use, the size of the config part of this structure is
+ * defined in the header and is not necessarily equal to that returned by the
+ * sizeof() operator. This allows a more size efficient communication between
+ * the host and firmware.
+ */
+struct mve_buffer_general
+{
+ struct mve_buffer_general_hdr header;
+
+ /* used to describe the configuration of the general purpose buffer data
+ * pointed to be buffer_ptr
+ */
+ union
+ {
+ struct mve_buffer_general_block_configs block_configs;
+ } config;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FW_INCLUDE__MVE_PROTOCOL_DEF_H__ */
diff --git a/drivers/video/arm/v5xx/resource/Makefile b/drivers/video/arm/v5xx/resource/Makefile
new file mode 100644
index 000000000000..0aeb05fb06d5
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/Makefile
@@ -0,0 +1,75 @@
+#
+#
+# (C) COPYRIGHT ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+obj-$(CONFIG_MALI_VPU) := mve_rsrc.o
+
+mve_rsrc-y := mve_rsrc_driver.o \
+ mve_rsrc_mem_frontend.o \
+ mve_rsrc_mem_cache.o \
+ mve_rsrc_mem_backend.o \
+ mve_rsrc_register.o \
+ mve_rsrc_irq.o \
+ mve_rsrc_scheduler.o \
+ mve_rsrc_dvfs.o \
+ mve_rsrc_pm.o \
+ mve_rsrc_circular_buffer.o \
+ mve_rsrc_mem_dma.o \
+ mve_rsrc_mem_dma_uncached.o \
+ mve_rsrc_log.o \
+ machine/mve_config.o
+
+# Only build machine/board-vexpress.c when building for one of the FPGA platforms
+mve_rsrc-$(CONFIG_MALI_VPU_VEX6) += machine/board-vexpress.o
+mve_rsrc-$(CONFIG_MALI_VPU_VEX7) += machine/board-vexpress.o
+mve_rsrc-$(CONFIG_MALI_VPU_JUNO) += machine/board-vexpress.o
+
+ccflags-y = -I$(src)/../external/
+ccflags-$(CONFIG_MALI_VPU_DEVICE_TREE) += -DDEVICETREE
+ccflags-$(CONFIG_MALI_VPU_ENABLE_FTRACE) += -DMVE_LOG_FTRACE_ENABLE
+ccflags-$(CONFIG_MALI_VPU_ENABLE_ALOG) += -DMVE_LOG_ALOG_ENABLE
+ccflags-$(CONFIG_MALI_VPU_ENABLE_PRINT_FILE) += -DMVE_LOG_PRINT_FILE_ENABLE
+ccflags-$(CONFIG_MALI_VPU_ENABLE_DVFS_SIM) += -DENABLE_DVFS_FREQ_SIM
+
+# If debug is enabled, disable function inlining to allow ftrace to give a more detailed picture of the executed functions
+ccflags-$(CONFIG_MALI_VPU_DEBUG) += -D_DEBUG -fno-inline
+ccflags-$(CONFIG_MALI_VPU_UNIT) += -DUNIT
+ccflags-$(CONFIG_MALI_VPU_POWER_SAVING_MODE_CLOCK_GATING) += -DSCHEDULER_MODE_IDLE_SWITCHOUT -DDISABLE_DVFS
+
+ifeq ($(CONFIG_MALI_VPU_TRACKMEM), y)
+ ccflags-y += -DMVE_MEM_DBG_TRACKMEM=1
+ MVE_MEM_DEBUG=y
+else
+ ccflags-y += -DMVE_MEM_DBG_TRACKMEM=0
+endif
+
+ifeq ($(CONFIG_MALI_VPU_RESFAIL), y)
+ ccflags-y += -DMVE_MEM_DBG_RESFAIL=1
+ MVE_MEM_DEBUG=y
+else
+ ccflags-y += -DMVE_MEM_DBG_RESFAIL=0
+endif
+
+ifeq ($(MVE_MEM_DEBUG), y)
+ ccflags-y += -DMVE_MEM_DBG_SUPPORT=1
+else
+ ccflags-y += -DMVE_MEM_DBG_SUPPORT=0
+endif
+
+# Always switch out on idleness
+ccflags-y += -DSCHEDULER_MODE_IDLE_SWITCHOUT=1
+
+ccflags-$(CONFIG_MALI_VPU_VEX6) += -DHW=0
+ccflags-$(CONFIG_MALI_VPU_VEX7) += -DHW=1
+ccflags-$(CONFIG_MALI_VPU_JUNO) += -DHW=2
diff --git a/drivers/video/arm/v5xx/resource/docs/Doxyfile b/drivers/video/arm/v5xx/resource/docs/Doxyfile
new file mode 100644
index 000000000000..06fa8d68397c
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/docs/Doxyfile
@@ -0,0 +1,122 @@
+#
+#
+# (C) COPYRIGHT ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+##############################################################################
+# This file contains per-module Doxygen configuration. Please do not add
+# extra settings to this file without consulting all stakeholders, as they
+# may cause override project-wide settings.
+#
+# Additionally, when defining aliases, macros, sections etc, use the module
+# name as a prefix e.g. gles_my_alias.
+##############################################################################
+
+@INCLUDE = ../../bldsys/Doxyfile_common
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT += ../../kernel/drivers/video/arm/v5xx/resource
+
+##############################################################################
+# Everything below here is optional, and in most cases not required
+##############################################################################
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES +=
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS +=
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS +=
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE += ../../omx_components/prototype_decoder
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS +=
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS +=
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH += ../..
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH +=
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH +=
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED +=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED +=
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS += .
diff --git a/drivers/video/arm/v5xx/resource/machine/board-vexpress.c b/drivers/video/arm/v5xx/resource/machine/board-vexpress.c
new file mode 100644
index 000000000000..5fed9faf8433
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/machine/board-vexpress.c
@@ -0,0 +1,433 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+#define VEXP6 0
+#define VEXP7 1
+#define JUNO 2
+
+#if (VEXP6 == HW) || (VEXP7 == HW) || (JUNO == HW)
+#include "../mve_rsrc_log.h"
+
+#include "mve_power_management.h"
+#include "mve_dvfs.h"
+#include "mve_config.h"
+
+#if defined(ENABLE_DVFS_FREQ_SIM)
+#include "../mve_rsrc_register.h"
+
+#if defined(EMULATOR)
+#include "emulator_userspace.h"
+#else
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/printk.h>
+#include <linux/jiffies.h>
+#endif /* EMULATOR */
+
+#endif /* ENABLE_DVFS_FREQ_SIM */
+
+/* BEGIN: Move this to mach/arm/board-... */
+
+/* Called by the driver when power to the MVE needs to be turned on */
+static int power_on(void)
+{
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_INFO, "Enable power.");
+ /* No need to do anything on the FPGA */
+ return 0;
+}
+
+/* Called by the driver when MVE no longer needs to be powered */
+static void power_off(void)
+{
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_INFO, "Disable power.");
+ /* No need to do anything on the FPGA */
+}
+
+static struct mve_pm_callback_conf pm_callbacks =
+{
+ .power_off_callback = power_off,
+ .power_on_callback = power_on,
+};
+
+/* Callback function for retrieving AXI bus memory access settings */
+static uint32_t bus_attributes_callback(int attribute)
+{
+ uint32_t ret = 0;
+
+ switch (attribute)
+ {
+ case 0:
+ /* Intentional fallthrough */
+ case 1:
+ ret = 0;
+ break;
+ case 2:
+ /* Intentional fallthrough */
+ case 3:
+ ret = 0x33;
+ break;
+ default:
+ WARN_ON(true);
+ }
+
+ return ret;
+}
+
+/**
+ * Currently the only supported value for DVFS_FREQ_MAX is 100.
+ * This is to simplify calculation of sleep duration in dvfs_thread().
+ */
+#define DVFS_FREQ_MAX (100)
+static uint32_t dvfs_freq = DVFS_FREQ_MAX;
+
+#if defined(ENABLE_DVFS_FREQ_SIM)
+static bool dvfs_is_started = false;
+
+static struct task_struct *dvfs_task = NULL;
+static struct semaphore dvfs_sem;
+static struct completion dvfs_ping;
+static int dvfs_ncores;
+
+/**
+ * Pause all MVE cores.
+ *
+ * This function tries to pause all MVE cores. It will try to make sure
+ * all cores are on pause before it returns, but it will not wait forever
+ * and will return anyway after some time.
+ */
+static void dvfs_cores_pause(void)
+{
+ tCS *regs;
+ uint32_t req;
+ uint32_t val;
+ int retries = 1000;
+
+ req = (1 << dvfs_ncores) - 1;
+
+ regs = mver_reg_get_coresched_bank();
+ mver_reg_write32(&regs->CLKPAUSE, req);
+ do
+ {
+ val = mver_reg_read32(&regs->CLKIDLE);
+ }
+ while (req != val && retries--);
+ mver_reg_put_coresched_bank(&regs);
+
+ if (req != val)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_WARNING,
+ "Cannot pause all cores. dvfs_ncores=%d, req=%08x, val=%08x",
+ dvfs_ncores, req, val);
+ }
+}
+
+/**
+ * Resume MVE cores from pause.
+ *
+ * This function will resume MVE cores from pause and return immediately.
+ */
+static void dvfs_cores_resume(void)
+{
+ tCS *regs;
+
+ regs = mver_reg_get_coresched_bank();
+ mver_reg_write32(&regs->CLKPAUSE, 0);
+ mver_reg_put_coresched_bank(&regs);
+}
+
+/**
+ * DVFS clock simulation thread.
+ *
+ * This function is started in a separate kernel thread. It puts MVE cores on
+ * pause for a fraction of a millisecond. Exact sleep time is determined from
+ * requested clock frequency.
+ */
+static int dvfs_thread(void *v)
+{
+ int sem_failed;
+ uint32_t freq;
+ unsigned long timeout = msecs_to_jiffies(5000);
+
+ while (!kthread_should_stop())
+ {
+ sem_failed = down_interruptible(&dvfs_sem);
+ freq = dvfs_freq;
+ if (!sem_failed)
+ {
+ up(&dvfs_sem);
+ }
+
+ if (DVFS_FREQ_MAX == freq)
+ {
+ wait_for_completion_interruptible_timeout(&dvfs_ping, timeout);
+ }
+ else
+ {
+ unsigned int pause_us = 1000 - 10 * freq;
+ if (pause_us > 980)
+ {
+ pause_us = 980;
+ }
+ dvfs_cores_pause();
+ usleep_range(pause_us, pause_us + 20);
+ dvfs_cores_resume();
+ usleep_range(1000 - pause_us, 1000 - pause_us + 20);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Retrieve count of MVE cores.
+ */
+static int dvfs_get_ncores(void)
+{
+ tCS *regs;
+ uint32_t ncores;
+
+ regs = mver_reg_get_coresched_bank();
+ ncores = mver_reg_read32(&regs->NCORES);
+ mver_reg_put_coresched_bank(&regs);
+
+ return ncores;
+}
+
+/**
+ * Set requested clock frequency for clock simulation.
+ */
+static void dvfs_set_freq(const uint32_t freq)
+{
+ bool dvfs_thread_sleeps_now = false;
+ int sem_failed = down_interruptible(&dvfs_sem);
+
+ if (dvfs_freq >= DVFS_FREQ_MAX)
+ {
+ dvfs_thread_sleeps_now = true;
+ }
+
+ if (freq > DVFS_FREQ_MAX)
+ {
+ dvfs_freq = DVFS_FREQ_MAX;
+ }
+ else
+ {
+ dvfs_freq = freq;
+ }
+
+ if (dvfs_thread_sleeps_now && dvfs_freq < DVFS_FREQ_MAX)
+ {
+ complete(&dvfs_ping);
+ }
+
+ if (!sem_failed)
+ {
+ up(&dvfs_sem);
+ }
+}
+
+/**
+ * Retrieve simulated clock frequency value.
+ */
+static uint32_t dvfs_get_freq(void)
+{
+ uint32_t freq;
+ int sem_failed = down_interruptible(&dvfs_sem);
+
+ freq = dvfs_freq;
+
+ if (!sem_failed)
+ {
+ up(&dvfs_sem);
+ }
+
+ return freq;
+}
+
+/**
+ * Initialize clock frequency simulation.
+ *
+ * This function will simply exit if clock simulation was already initialized.
+ */
+void dvfs_sim_init(void)
+{
+ if (dvfs_is_started)
+ {
+ return;
+ }
+
+ sema_init(&dvfs_sem, 1);
+ init_completion(&dvfs_ping);
+ dvfs_ncores = dvfs_get_ncores();
+ dvfs_task = kthread_run(dvfs_thread, NULL, "dvfs");
+
+ dvfs_is_started = true;
+}
+#endif /* ENABLE_DVFS_FREQ_SIM */
+
+static void enable_clock(void)
+{
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_INFO, "Enable clock.");
+}
+
+static void disable_clock(void)
+{
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_INFO, "Disable clock.");
+}
+
+static void set_clock_rate(uint32_t clk_rate)
+{
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_DEBUG, "Setting clock rate. rate=%u (%u)", clk_rate, dvfs_freq);
+#if defined(ENABLE_DVFS_FREQ_SIM)
+ dvfs_sim_init();
+ dvfs_set_freq(clk_rate);
+#else
+ dvfs_freq = clk_rate;
+#endif
+}
+
+static uint32_t get_clock_rate(void)
+{
+#if defined(ENABLE_DVFS_FREQ_SIM)
+ dvfs_sim_init();
+ return dvfs_get_freq();
+#else
+ return dvfs_freq;
+#endif
+}
+
+static uint32_t get_max_clock_rate(void)
+{
+ return DVFS_FREQ_MAX;
+}
+
+static void stop(void)
+{
+#if defined(ENABLE_DVFS_FREQ_SIM)
+ if (dvfs_is_started)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_DEBUG, "Stopping thread.");
+ kthread_stop(dvfs_task);
+ dvfs_is_started = false;
+ }
+#endif
+}
+
+static struct mve_dvfs_callback_conf dvfs_callbacks =
+{
+ .enable_clock = enable_clock,
+ .disable_clock = disable_clock,
+ .set_rate = set_clock_rate,
+ .get_rate = get_clock_rate,
+ .get_max_rate = get_max_clock_rate,
+ .stop = stop,
+};
+
+static struct mve_config_attribute attributes[] =
+{
+ {
+ MVE_CONFIG_DEVICE_ATTR_POWER_CALLBACKS,
+ (uint32_t *)&pm_callbacks
+ },
+ {
+ MVE_CONFIG_DEVICE_ATTR_BUS_ATTRIBUTES,
+ (uint32_t *)bus_attributes_callback
+ },
+ {
+ MVE_CONFIG_DEVICE_ATTR_DVFS_CALLBACKS,
+ (uint32_t *)&dvfs_callbacks
+ },
+ {
+ MVE_CONFIG_DEVICE_ATTR_END,
+ NULL
+ }
+};
+
+struct mve_config_attribute *mve_device_get_config(void)
+{
+ return attributes;
+}
+#endif /* (VEXP6 == HW) || (VEXP7 == HW) || (JUNO == HW) */
+
+#ifndef DEVICETREE
+/* Driver built without device tree (DT) support */
+
+#include <linux/dma-mapping.h>
+
+#if (VEXP7 == HW)
+#define MVE_CORE_BASE 0xFC030000
+#define MVE_CORE_BASE_SIZE 0xFFFF
+#define MVE_IRQ 70
+#elif (JUNO == HW)
+#define MVE_CORE_BASE 0x6F030000
+#define MVE_CORE_BASE_SIZE 0xFFFF
+#define MVE_IRQ 200
+#elif (VEXP6 == HW)
+#define MVE_CORE_BASE (0xE0000000 + 0x1C020000)
+#define MVE_CORE_BASE_SIZE 0xC000
+#define MVE_IRQ 70
+#else
+#error Unknown platform
+#endif
+
+static struct resource mve_resources[] =
+{
+ [0] = {
+ .start = MVE_CORE_BASE,
+ .end = MVE_CORE_BASE + MVE_CORE_BASE_SIZE,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = MVE_IRQ,
+ .end = MVE_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static void mve_device_release(struct device *dev)
+{}
+
+/* Mali-V500 can handle 40 bits wide physical addresses */
+static uint64_t mv500_dma_mask = DMA_BIT_MASK(40);
+
+static struct platform_device mve_device =
+{
+ .name = "mali-v500-rsrc",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(mve_resources),
+ .resource = mve_resources,
+ .dev = {
+ .platform_data = NULL,
+ .release = mve_device_release,
+ .dma_mask = &mv500_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(40),
+ },
+};
+
+void mver_init_machine(void)
+{
+ platform_device_register(&mve_device);
+}
+
+void mver_deinit_machine(void)
+{
+ platform_device_unregister(&mve_device);
+}
+
+#endif /* #ifndef DEVICETREE */
diff --git a/drivers/video/arm/v5xx/resource/machine/mve_config.c b/drivers/video/arm/v5xx/resource/machine/mve_config.c
new file mode 100644
index 000000000000..24e158e1d184
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/machine/mve_config.c
@@ -0,0 +1,36 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "mve_config.h"
+
+uint32_t *mve_config_get_value(struct mve_config_attribute *attributes,
+ enum mve_config_key key)
+{
+ if (NULL == attributes)
+ {
+ return NULL;
+ }
+
+ while (MVE_CONFIG_DEVICE_ATTR_END != attributes->key)
+ {
+ if (attributes->key == key)
+ {
+ return attributes->value;
+ }
+
+ attributes++;
+ }
+
+ return NULL;
+}
diff --git a/drivers/video/arm/v5xx/resource/machine/mve_config.h b/drivers/video/arm/v5xx/resource/machine/mve_config.h
new file mode 100644
index 000000000000..dbb52c580a66
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/machine/mve_config.h
@@ -0,0 +1,86 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_CONFIG_H
+#define MVE_CONFIG_H
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#endif
+
+/**
+ * This enum lists the possible keys that may be inserted into a key-value list.
+ */
+enum mve_config_key
+{
+ /**
+ * Power management functions. The value associated with this key must be a
+ * reference to a struct mve_pm_callback_conf.
+ */
+ MVE_CONFIG_DEVICE_ATTR_POWER_CALLBACKS,
+
+ /**
+ * Callback function to retrieve bus attributes. The value associated
+ * with this key must be a function pointer conforming to the prototype
+ * uint32_t (*)(int)
+ */
+ MVE_CONFIG_DEVICE_ATTR_BUS_ATTRIBUTES,
+
+ /**
+ * Callback functions to support dynamic voltage and frequency scaling.
+ * Currently, this is used to enable/disable the clock and change the
+ * clock frequency. The implementation of these functions are assumed
+ * to take care of the voltage settings. The value associated with this
+ * key must be a reference to a struct mve_dvfs_callback_conf.
+ */
+ MVE_CONFIG_DEVICE_ATTR_DVFS_CALLBACKS,
+
+ /**
+ * End of key-value pairs vector indicator. The configuration loader will stop
+ * processing any more elements when it encounters this key. Note that
+ * the key-value vector must be terminated with this key! The value
+ * associated with this key is ignored.
+ */
+ MVE_CONFIG_DEVICE_ATTR_END
+};
+
+/**
+ * Type definition of the value type used in the key-value pairs structure.
+ */
+typedef uint32_t *mve_config_attribute_value;
+
+/**
+ * Each element in the key-value pairs vector is stored in an instance of this
+ * structure.
+ */
+struct mve_config_attribute
+{
+ enum mve_config_key key; /**< The key of the element */
+ mve_config_attribute_value value; /**< The value corresponding to the key
+ * above */
+};
+
+/**
+ * Returns the value associated with the first occurrence of the supplied key.
+ * The vector must be terminated by a MVE_CONFIG_DEVICE_ATTR_END key.
+ * @param attributes A vector containing key-value pairs.
+ * @param key The key to find the value for.
+ * @return The value if the key exists in the list. NULL if no such key exists.
+ */
+uint32_t *mve_config_get_value(struct mve_config_attribute *attributes,
+ enum mve_config_key key);
+
+#endif /* MVE_CONFIG_H */
diff --git a/drivers/video/arm/v5xx/resource/machine/mve_dvfs.h b/drivers/video/arm/v5xx/resource/machine/mve_dvfs.h
new file mode 100644
index 000000000000..32c8ce6f151a
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/machine/mve_dvfs.h
@@ -0,0 +1,70 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_DVFS_H
+#define MVE_DVFS_H
+
+/**
+ * An instance of this struct is stored in the key-value pairs vector using the
+ * key MVE_CONFIG_DEVICE_ATTR_DVFS_CALLBACKS.
+ */
+struct mve_dvfs_callback_conf
+{
+ /**
+ * Called by the driver when the clock signal must be provided to the
+ * hardware. This function must not return until the clock is stable.
+ */
+ void (*enable_clock)(void);
+
+ /**
+ * Called by the driver when the clock signal may be shut off to enable
+ * power saving. This is just a notification. The implementation may
+ * chose to ignore this call.
+ */
+ void (*disable_clock)(void);
+
+ /**
+ * Called by the driver when the clock frequency needs to be adjusted.
+ * This may for example happen when the clients attempt to decode
+ * several video clips at the same time and the current clock frequency
+ * is not sufficient to decode frames at the required rate.
+ *
+ * @param rate The minimum required clock frequency
+ */
+ void (*set_rate)(uint32_t clk_rate);
+
+ /**
+ * Called by the driver when it needs to know the current clock frequency.
+ * This happens when the user reads the sysfs file to get the current
+ * clock frequency.
+ *
+ * @return Current clock frequency
+ */
+ uint32_t (*get_rate)(void);
+
+ /**
+ * Called by the driver when it needs to know maximum supported clock frequency.
+ *
+ * @return Maximum supported clock frequency
+ */
+ uint32_t (*get_max_rate)(void);
+
+ /**
+ * Called by the driver when driver is removed with rmmod. The implementation
+ * should clean up all resources including running threads.
+ */
+ void (*stop)(void);
+};
+
+#endif /* MVE_DVFS_H */
diff --git a/drivers/video/arm/v5xx/resource/machine/mve_port_attributes.h b/drivers/video/arm/v5xx/resource/machine/mve_port_attributes.h
new file mode 100644
index 000000000000..f74ea74ebbfd
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/machine/mve_port_attributes.h
@@ -0,0 +1,32 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_PORT_ATTRIBUTES_H
+#define MVE_PORT_ATTRIBUTES_H
+
+/**
+ * This function must return the AXI memory access settings for accessing
+ * pages that are marked with the supplied attribute in the MVE page table.
+ * The driver invokes this function for each attribute when the driver is
+ * initialized and caches the values internally.
+ *
+ * This function pointer is associated with the key
+ * MVE_CONFIG_DEVICE_ATTR_BUS_ATTRIBUTES.
+ *
+ * @param attribute The attribute as stated in the MVE page table
+ * @return AXI memory access settings according to the AXI protocol specification
+ */
+typedef uint32_t (*mve_port_attributes_callback_fptr)(int attribute);
+
+#endif /* MVE_PORT_ATTRIBUTES_H */
diff --git a/drivers/video/arm/v5xx/resource/machine/mve_power_management.h b/drivers/video/arm/v5xx/resource/machine/mve_power_management.h
new file mode 100644
index 000000000000..e700633da08b
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/machine/mve_power_management.h
@@ -0,0 +1,47 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_POWER_MANAGEMENT_H
+#define MVE_POWER_MANAGEMENT_H
+
+/**
+ * An instance of this struct is stored in the key-value pairs vector using the
+ * key MVE_CONFIG_DEVICE_ATTR_POWER_CALLBACKS.
+ */
+struct mve_pm_callback_conf
+{
+ /**
+ * Called by the driver when the VPU is idle and the power to it can be
+ * switched off. The system integrator can decide whether to either do
+ * nothing, just switch off the clocks to the VPU, or to completely power
+ * down the VPU.
+ */
+ void (*power_off_callback)(void);
+
+ /**
+ * Called by the driver when the VPU is about to become active and power
+ * must be supplied. This function must not return until the VPU is powered
+ * and clocked sufficiently for register access to succeed. The return
+ * value specifies whether the VPU was powered down since the call to
+ * power_off_callback. If the VPU state has been lost then this function
+ * must return 1, otherwise it should return 0.
+ *
+ * The return value of the first call to this function is ignored.
+ *
+ * @return 1 if the VPU state may have been lost, 0 otherwise.
+ */
+ int (*power_on_callback)(void);
+};
+
+#endif /* MVE_POWER_MANAGEMENT_H */
diff --git a/drivers/video/arm/v5xx/resource/machine/vexpress-regs.h b/drivers/video/arm/v5xx/resource/machine/vexpress-regs.h
new file mode 100644
index 000000000000..8a0eaf4e388d
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/machine/vexpress-regs.h
@@ -0,0 +1,36 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef VEXPRESS_REGS_H
+#define VEXPRESS_REGS_H
+
+#define VEXP6 0
+#define VEXP7 1
+#define JUNO 2
+
+#if (VEXP7 == HW)
+#define MVE_CORE_BASE 0xFC030000
+#define MVE_CORE_BASE_SIZE 0xFFFF
+#define MVE_IRQ 70
+#elif (JUNO == HW)
+#define MVE_CORE_BASE 0x6F030000
+#define MVE_CORE_BASE_SIZE 0xFFFF
+#define MVE_IRQ 200
+#else
+#define MVE_CORE_BASE (0xE0000000 + 0x1C020000)
+#define MVE_CORE_BASE_SIZE 0xC000
+#define MVE_IRQ 70
+#endif
+
+#endif /* VEXPRESS_REGS_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_circular_buffer.c b/drivers/video/arm/v5xx/resource/mve_rsrc_circular_buffer.c
new file mode 100644
index 000000000000..4f10542b996d
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_circular_buffer.c
@@ -0,0 +1,125 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/slab.h>
+#endif
+
+#include "mve_rsrc_circular_buffer.h"
+#include "mve_rsrc_mem_frontend.h"
+
+struct mver_circular_buffer *mver_circular_buffer_create(uint32_t size)
+{
+ struct mver_circular_buffer *cb;
+
+ cb = (struct mver_circular_buffer *)
+ MVE_RSRC_MEM_ZALLOC(sizeof(struct mver_circular_buffer), GFP_KERNEL);
+ if (NULL == cb)
+ {
+ return NULL;
+ }
+
+ cb->data = (void **)MVE_RSRC_MEM_VALLOC(sizeof(void *) * size);
+ if (NULL == cb->data)
+ {
+ MVE_RSRC_MEM_FREE(cb);
+ return NULL;
+ }
+
+ cb->size = size;
+
+ return cb;
+}
+
+void mver_circular_buffer_destroy(struct mver_circular_buffer *cb)
+{
+ if (NULL != cb)
+ {
+ MVE_RSRC_MEM_VFREE(cb->data);
+ MVE_RSRC_MEM_FREE(cb);
+ }
+}
+
+bool mver_circular_buffer_add(struct mver_circular_buffer *cb, void *item)
+{
+ bool ret = false;
+
+ if ((uint32_t)(cb->w_pos - cb->r_pos) < (uint32_t)cb->size)
+ {
+ cb->data[cb->w_pos++ % cb->size] = item;
+ ret = true;
+ }
+
+ return ret;
+}
+
+bool mver_circular_buffer_remove(struct mver_circular_buffer *cb, void **data)
+{
+ bool ret = false;
+
+ if ((uint32_t)(cb->w_pos - cb->r_pos) > (uint32_t)0)
+ {
+ *data = cb->data[cb->r_pos++ % cb->size];
+ ret = true;
+ }
+
+ return ret;
+}
+
+uint32_t mver_circular_buffer_get_num_entries(struct mver_circular_buffer *cb)
+{
+ return cb->w_pos - cb->r_pos;
+}
+
+bool mver_circular_buffer_peek(struct mver_circular_buffer *cb, void **data)
+{
+ bool ret = false;
+
+ if ((uint32_t)(cb->w_pos - cb->r_pos) > (uint32_t)0)
+ {
+ *data = cb->data[cb->r_pos % cb->size];
+ ret = true;
+ }
+ else
+ {
+ *data = NULL;
+ }
+
+ return ret;
+}
+
+void mver_circular_buffer_remove_all_occurences(struct mver_circular_buffer *cb, void *data)
+{
+ uint32_t start, end, curr;
+
+ start = cb->r_pos;
+ end = cb->w_pos;
+
+ for (curr = start; (uint32_t)(end - curr) > (uint32_t)0; ++curr)
+ {
+ if (data == cb->data[curr % cb->size])
+ {
+ uint32_t i;
+ for (i = curr; (uint32_t)(i - start) > (uint32_t)0; --i)
+ {
+ cb->data[i % cb->size] = cb->data[(i - 1) % cb->size];
+ }
+ start++;
+ }
+ }
+
+ cb->r_pos = start;
+}
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_circular_buffer.h b/drivers/video/arm/v5xx/resource/mve_rsrc_circular_buffer.h
new file mode 100644
index 000000000000..48b5b9d95e86
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_circular_buffer.h
@@ -0,0 +1,95 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_CIRCULAR_BUFFER
+#define MVE_RSRC_CIRCULAR_BUFFER
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#endif
+
+/**
+ * Structure used to represent a circular buffer.
+ */
+struct mver_circular_buffer
+{
+ uint32_t size; /**< Number of 32-bit entries in the circular buffer */
+ uint32_t r_pos; /**< Tail position (read position) */
+ uint32_t w_pos; /**< Head position (write position) */
+
+ void **data; /**< Pointer to the actual circular buffer */
+};
+
+/**
+ * Iterate over all elements in the circular buffer.
+ */
+#define mver_circular_buffer_for_each(index, ptr, type, cb) \
+ for (index = 0; ptr = (type)cb->data[(cb->r_pos + index) % cb->size], index < mver_circular_buffer_get_num_entries(cb); ++index)
+
+/**
+ * Create a circular buffer of the given size.
+ * @param size Number of 32-bit elements in the circular buffer.
+ * @return Pointer to an initialized mver_circular_buffer, NULL on failure
+ */
+struct mver_circular_buffer *mver_circular_buffer_create(uint32_t size);
+
+/**
+ * Destroy a circular buffer. Frees all resources that were allocated during
+ * the creation of the circular buffer. Note that the elements themselves are
+ * not freed.
+ * @param cb The circular buffer to destroy
+ */
+void mver_circular_buffer_destroy(struct mver_circular_buffer *cb);
+
+/**
+ * Add an element to the circular buffer.
+ * @param cb The circular buffer to add the element to
+ * @param item The item to add to the circular buffer
+ * @return True on success, false if the element couldn't be added
+ */
+bool mver_circular_buffer_add(struct mver_circular_buffer *cb, void *item);
+
+/**
+ * Remove the tail element in the circular buffer and return it.
+ * @param cb The circular buffer
+ * @param data This pointer will point to the removed element
+ * @return True if an element was removed. False if the circular buffer was empty
+ */
+bool mver_circular_buffer_remove(struct mver_circular_buffer *cb, void **data);
+
+/**
+ * Get the number of items in the circular buffer.
+ * @param cb The circular buffer
+ * @return The number of items in the circular buffer
+ */
+uint32_t mver_circular_buffer_get_num_entries(struct mver_circular_buffer *cb);
+
+/**
+ * Get the tail element without removing it from the circular buffer.
+ * @param cb The circular buffer
+ * @param data This pointer will point to the tail element
+ * @return True if the circular buffer contains atleast one element, false otherwise
+ */
+bool mver_circular_buffer_peek(struct mver_circular_buffer *cb, void **data);
+
+/**
+ * Remove all occurances of a certain item.
+ * @param cb The circular buffer
+ * @param data The item to be removed
+ */
+void mver_circular_buffer_remove_all_occurences(struct mver_circular_buffer *cb, void *data);
+
+#endif /* MVE_RSRC_CIRCULAR_BUFFER */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_driver.c b/drivers/video/arm/v5xx/resource/mve_rsrc_driver.c
new file mode 100644
index 000000000000..59d1461d6dfa
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_driver.c
@@ -0,0 +1,324 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+#include <asm/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+
+#include "mve_rsrc_mem_frontend.h"
+#include "mve_rsrc_driver.h"
+#include "mve_rsrc_irq.h"
+#include "mve_rsrc_log.h"
+#include "mve_rsrc_register.h"
+#include "mve_rsrc_scheduler.h"
+#include "mve_rsrc_pm.h"
+
+#include "machine/mve_port_attributes.h"
+
+static int mve_dev_major;
+struct mve_rsrc_driver_data mve_rsrc_data;
+
+extern struct mve_config_attribute *mve_device_get_config(void);
+
+#if defined(DEVICETREE)
+/* Mali-V500 can handle physical addresses 40 bits wide */
+static uint64_t mv500_dma_mask = DMA_BIT_MASK(40);
+#endif
+
+static int mver_driver_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int mver_driver_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static struct of_device_id mve_rsrc_of_match[] =
+{
+ {
+ .compatible = "arm,mali-v500",
+ },
+ {
+ .compatible = "arm,mali-v550",
+ },
+ {
+ .compatible = "arm,mali-mve",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mve_rsrc_of_match);
+
+static struct file_operations rsrc_fops =
+{
+ .owner = THIS_MODULE,
+ .open = mver_driver_open,
+ .release = mver_driver_release,
+};
+
+static void reset_hw(void)
+{
+ tCS *regs;
+ uint32_t ncore;
+ uint32_t corelsid_mask;
+
+ regs = mver_reg_get_coresched_bank();
+ ncore = mver_reg_read32(&regs->NCORES);
+ corelsid_mask = 0;
+ for (; ncore > 0; --ncore)
+ {
+ corelsid_mask = (corelsid_mask << 4) | 0xF;
+ }
+ mver_reg_write32(&regs->RESET, 1);
+ while (corelsid_mask != mver_reg_read32(&regs->CORELSID))
+ ;
+
+ /* Let the hardware power down */
+ mver_reg_write32(&regs->CLKFORCE, 0);
+
+ mver_reg_put_coresched_bank(&regs);
+}
+
+static int mver_driver_probe(struct platform_device *pdev)
+{
+ struct mve_rsrc_driver_data *private = &mve_rsrc_data;
+ int ret = 0;
+ struct resource *res;
+ struct resource *irq_res;
+ tCS *regs;
+ mve_port_attributes_callback_fptr attr_fptr;
+
+#ifdef DEVICETREE
+ const struct of_device_id *of_match =
+ of_match_device(mve_rsrc_of_match, &pdev->dev);
+
+ if (NULL == of_match)
+ {
+ /* Driver doesn't support this device */
+ printk(KERN_ERR "MVE: No matching device to Mali-MVE of_node: %p.\n", pdev->dev.of_node);
+ return -EINVAL;
+ }
+#endif
+
+ mve_dev_major = register_chrdev(0, MVE_RSRC_DRIVER_NAME, &rsrc_fops);
+ if (0 > mve_dev_major)
+ {
+ printk(KERN_ERR "MVE: Failed to register the driver \'%s\'.\n", MVE_RSRC_DRIVER_NAME);
+ ret = mve_dev_major;
+ goto error;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (NULL == res)
+ {
+ printk(KERN_ERR "MVE: No Mali-MVE I/O registers defined.\n");
+ ret = -ENXIO;
+ goto error;
+ }
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (NULL == irq_res)
+ {
+ printk(KERN_ERR "MVE: No IRQ defined for Mali-MVE.\n");
+ ret = -ENODEV;
+ goto error;
+ }
+ private->irq_nr = irq_res->start;
+ private->irq_flags = irq_res->flags;
+
+ private->mem_res = request_mem_region(res->start, resource_size(res), MVE_RSRC_DRIVER_NAME);
+ if (!private->mem_res)
+ {
+ printk(KERN_ERR "MVE: Failed to request Mali-MVE memory region.\n");
+ ret = -EBUSY;
+ goto error;
+ }
+
+ private->regs = ioremap_nocache(res->start, resource_size(res));
+ if (NULL == private->regs)
+ {
+ printk(KERN_ERR "MVE: Failed to map Mali-MVE registers.\n");
+ ret = -ENXIO;
+ goto error;
+ }
+
+ private->pdev = pdev;
+ private->irq_enable_count = 0;
+
+#if defined(DEVICETREE)
+ /* There is no way to set the dma_mask using device trees. */
+ pdev->dev.dma_mask = &mv500_dma_mask;
+ pdev->dev.coherent_dma_mask = mv500_dma_mask;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ of_dma_configure(&pdev->dev, pdev->dev.of_node);
+#endif
+#endif
+
+ private->config = mve_device_get_config();
+ if (NULL == private->config)
+ {
+ printk(KERN_ERR "MVE: Failed to request Mali-MVE driver configuration.\n");
+ ret = -ENXIO;
+ goto error;
+ }
+
+ attr_fptr = (mve_port_attributes_callback_fptr)
+ mve_config_get_value(private->config, MVE_CONFIG_DEVICE_ATTR_BUS_ATTRIBUTES);
+ if (NULL == attr_fptr)
+ {
+ printk(KERN_ERR "MVE: Failed to request MVE_CONFIG_DEVICE_ATTR_BUS_ATTRIBUTES.\n");
+ ret = -ENXIO;
+ goto error;
+ }
+
+ /* Fetch the AXI memory access settings */
+ private->port_attributes[0] = attr_fptr(0);
+ private->port_attributes[1] = attr_fptr(1);
+ private->port_attributes[2] = attr_fptr(2);
+ private->port_attributes[3] = attr_fptr(3);
+
+ private->hw_interaction = true;
+
+ mver_reg_init();
+ mve_rsrc_mem_init(&pdev->dev);
+ mve_rsrc_log_init();
+ mver_irq_handler_init(&pdev->dev);
+ mver_pm_init(&pdev->dev);
+ mver_scheduler_init(&pdev->dev);
+
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ reset_hw();
+ regs = mver_reg_get_coresched_bank();
+ private->nlsid = mver_reg_read32(&regs->NLSID);
+ private->ncore = mver_reg_read32(&regs->NCORES);
+ private->fuse = mver_reg_read32(&regs->FUSE);
+ private->hw_version = mver_reg_read32(&regs->VERSION);
+
+ mver_reg_put_coresched_bank(&regs);
+ pm_runtime_put_sync(&pdev->dev);
+
+ platform_set_drvdata(pdev, private);
+
+ printk("MVE resource driver loaded successfully (nlsid=%u, cores=%u, version=0x%X)\n",
+ private->nlsid, private->ncore, private->hw_version);
+
+ return ret;
+
+error:
+ printk(KERN_ERR "Failed to load the driver \'%s\'.\n", MVE_RSRC_DRIVER_NAME);
+ if (NULL != private->regs)
+ {
+ iounmap(private->regs);
+ }
+
+ if (0 < mve_dev_major)
+ {
+ unregister_chrdev(mve_dev_major, MVE_RSRC_DRIVER_NAME);
+ }
+
+ return ret;
+}
+
+static int mver_driver_remove(struct platform_device *pdev)
+{
+ mver_scheduler_deinit(&pdev->dev);
+ mver_irq_handler_deinit(&pdev->dev);
+ mve_rsrc_log_destroy();
+ mve_rsrc_mem_deinit(&pdev->dev);
+ mver_pm_deinit(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ iounmap(mve_rsrc_data.regs);
+ release_mem_region(mve_rsrc_data.mem_res->start, resource_size(mve_rsrc_data.mem_res));
+ unregister_chrdev(mve_dev_major, MVE_RSRC_DRIVER_NAME);
+
+ printk("MVE resource driver unloaded successfully\n");
+
+ return 0;
+}
+
+static const struct dev_pm_ops rsrc_pm =
+{
+ .suspend = mver_pm_suspend,
+ .resume = mver_pm_resume,
+ .runtime_suspend = mver_pm_runtime_suspend,
+ .runtime_resume = mver_pm_runtime_resume,
+ .runtime_idle = mver_pm_runtime_idle,
+};
+
+static struct platform_driver mv500_rsrc_driver =
+{
+ .probe = mver_driver_probe,
+ .remove = mver_driver_remove,
+
+ .driver = {
+ .name = MVE_RSRC_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &rsrc_pm,
+ .of_match_table = mve_rsrc_of_match,
+ },
+};
+
+#ifndef DEVICETREE
+extern void mver_init_machine(void);
+extern void mver_deinit_machine(void);
+#endif
+
+static int __init mver_driver_init(void)
+{
+ platform_driver_register(&mv500_rsrc_driver);
+
+#ifndef DEVICETREE
+ /* Driver built without device tree (DT) support */
+ mver_init_machine();
+#endif
+
+ return 0;
+}
+
+static void __exit mver_driver_exit(void)
+{
+ platform_driver_unregister(&mv500_rsrc_driver);
+
+#ifndef DEVICETREE
+ /* Driver built without device tree (DT) support */
+ mver_deinit_machine();
+#endif
+
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_INFO, "MVE resource driver unregistered");
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Mali-V500 resource driver");
+
+module_init(mver_driver_init);
+module_exit(mver_driver_exit);
+
+EXPORT_SYMBOL(mve_rsrc_data);
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_driver.h b/drivers/video/arm/v5xx/resource/mve_rsrc_driver.h
new file mode 100644
index 000000000000..df26a925e329
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_driver.h
@@ -0,0 +1,75 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_DRIVER_H
+#define MVE_RSRC_DRIVER_H
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "machine/mve_config.h"
+#endif
+
+/** @brief Structure for holding resource driver information */
+struct mve_rsrc_driver_data
+{
+ /** Pointer to the platform device */
+ struct platform_device *pdev;
+ /** Assigned irq number */
+ int irq_nr;
+ /** Assigned irq flags */
+ int irq_flags;
+ /** Pointer to the memory-mapped registers */
+ void __iomem *regs;
+ /** Pointer to the memory resource */
+ struct resource *mem_res;
+ /** Number of cores supported by the hardware */
+ int ncore;
+ /** Number of logical sessions supported by the hardware */
+ int nlsid;
+
+ /** Track hardware interaction */
+ bool hw_interaction;
+ /** Reference count for IRQ enable/disable */
+ int irq_enable_count;
+
+ /** Track configuration data */
+ struct mve_config_attribute *config;
+ /** Track port-attributes */
+ uint32_t port_attributes[4];
+
+#ifdef UNIT
+ /** Number of interrupts. */
+ uint32_t interrupts;
+#endif
+
+ /** Hardware fuse(s) state */
+ uint32_t fuse;
+
+ /** Hardware version */
+ uint32_t hw_version;
+};
+
+#ifndef NELEMS
+#define NELEMS(a) (sizeof(a) / sizeof((a)[0]))
+#endif
+
+#define MVE_RSRC_DRIVER_NAME "mali-v500-rsrc"
+
+extern struct mve_rsrc_driver_data mve_rsrc_data;
+
+#endif /* MVE_RSRC_DRIVER_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_dvfs.c b/drivers/video/arm/v5xx/resource/mve_rsrc_dvfs.c
new file mode 100644
index 000000000000..e4af236d2955
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_dvfs.c
@@ -0,0 +1,832 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/kthread.h>
+#include <linux/printk.h>
+#include <linux/semaphore.h>
+#include <linux/sysfs.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#include <linux/export.h>
+#endif
+
+#include "mve_rsrc_scheduler.h"
+#include "mve_rsrc_dvfs.h"
+#include "mve_rsrc_pm.h"
+
+#if defined(DISABLE_DVFS)
+uint32_t max_freq = 0;
+
+void mver_dvfs_init(struct device *dev,
+ const mver_dvfs_get_session_status_fptr get_session_status_callback)
+{
+ max_freq = mver_pm_read_max_frequency();
+}
+
+void mver_dvfs_deinit(struct device *dev)
+{
+ /* Nothing to do */
+}
+
+bool mver_dvfs_register_session(const mver_session_id session_id)
+{
+ /* Request the maximum frequency. It's then up to the integration layer to
+ * decide which frequency is actually used */
+ mver_pm_request_frequency(max_freq);
+
+ return true;
+}
+
+void mver_dvfs_unregister_session(const mver_session_id session_id)
+{
+ /* Nothing to do */
+}
+
+void mver_dvfs_request_max_frequency(void)
+{
+ /* Nothing to do */
+}
+
+#else /* DISABLE_DVFS */
+
+#include "mve_rsrc_driver.h"
+#include "mve_rsrc_mem_frontend.h"
+#include "mve_rsrc_log.h"
+
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+#define DVFS_DEBUG_MODE 1
+#else
+#define DVFS_DEBUG_MODE 0
+#endif
+
+/**
+ * Default value for an interval between frequency updates in milliseconds.
+ * It could be overwritten by user in debug build when sysfs is enabled.
+ */
+#define POLL_INTERVAL_MS (100)
+
+/* Adjustment step in percents of maximum supported frequency */
+#define UP_STEP_PERCENT (10)
+#define DOWN_STEP_PERCENT (5)
+
+/**
+ * Structure used by DVFS module to keep track of session usage and to
+ * take decisions about power management.
+ *
+ * Currently the only parameter taken into consideration is an amount of
+ * output buffers enqueued in FW for each session. DVFS tries to keep this
+ * parameter equal to 1 for all sessions. If some session has more than one
+ * enqueued buffer, it means that a client is waiting for more than one
+ * frame and the clock frequency should be increased. If some session has
+ * no buffers enqueued, it means that the client is not waiting for
+ * anything and the clock frequency could be decreased. Priority is given
+ * to frequency increasing (when more than one session is registered).
+ */
+struct session
+{
+ mver_session_id session_id;
+ bool to_remove;
+#if defined(CONFIG_SYSFS)
+ struct mver_dvfs_session_status status;
+#endif
+ struct list_head list;
+};
+
+/* A list containing all registered sessions */
+static LIST_HEAD(sessions);
+
+/* Semaphore used to prevent concurrent access to DVFS internal structures */
+static struct semaphore dvfs_sem;
+
+/* DVFS polling task */
+static struct task_struct *dvfs_task = NULL;
+static wait_queue_head_t dvfs_wq;
+
+/**
+ * A callback used to query session status. DVFS uses sessions status to
+ * decide if clock frequency should be updated.
+ */
+static mver_dvfs_get_session_status_fptr get_session_status;
+
+/* Frequency limits */
+static uint32_t max_freq;
+static uint32_t up_step_freq;
+static uint32_t down_step_freq;
+
+/* Flag used to prevent usage of DVFS module when it was not initialized */
+static bool initialized = false;
+
+/* Flag used to indicate that DVFS module is going to shut itself down */
+static bool shutdown = false;
+
+/**
+ * DVFS polling interval - an interval between frequency updates in milliseconds.
+ * It is a constant value for non-debug and non-sysfs builds.
+ */
+#if (1 == DVFS_DEBUG_MODE)
+static int poll_interval_ms = POLL_INTERVAL_MS;
+#else
+static const int poll_interval_ms = POLL_INTERVAL_MS;
+#endif
+
+#if (1 == DVFS_DEBUG_MODE)
+/* Flag used to enable/disable DVFS in debug builds */
+static atomic_t control_enabled = ATOMIC_INIT(1);
+
+/**
+ * Counters used for debugging/verification purposes.
+ */
+
+/* Amount of times clock frequency was changed by DVFS */
+static atomic_long_t changes_cnt = ATOMIC_LONG_INIT(0);
+
+/* Amount of times burst mode was used by DVFS */
+static atomic_long_t burst_cnt = ATOMIC_LONG_INIT(0);
+#endif
+
+static bool allocate_session(const mver_session_id session_id);
+static void free_session(struct session *session);
+static struct session *get_session(const mver_session_id session_id);
+
+/**
+ * Warm up VPU.
+ *
+ * This function increases VPU clock frequency for requested amount
+ * of steps when possible.
+ *
+ * @param steps Requested amount of steps.
+ */
+static void warm_up(const int steps)
+{
+ uint32_t old = mver_pm_read_frequency();
+ uint32_t new;
+#if (1 == DVFS_DEBUG_MODE)
+ bool do_burst = false;
+#endif
+
+ /**
+ * If 3 or more steps are requested, we are far behind required
+ * performance level.
+ */
+ if (steps > 2)
+ {
+ new = max_freq;
+#if (1 == DVFS_DEBUG_MODE)
+ do_burst = true;
+#endif
+ }
+ else
+ {
+ new = min(old + steps * up_step_freq, max_freq);
+ }
+
+ if (old != new)
+ {
+ mver_pm_request_frequency(new);
+#if (1 == DVFS_DEBUG_MODE)
+ atomic_long_inc(&changes_cnt);
+ if (do_burst)
+ {
+ atomic_long_inc(&burst_cnt);
+ }
+#endif
+ }
+}
+
+/**
+ * Cool down VPU.
+ *
+ * This function increases VPU clock frequency if possible.
+ */
+static void cool_down(void)
+{
+ int old_freq = (int)mver_pm_read_frequency();
+ int new_freq;
+
+ new_freq = max(down_step_freq, old_freq - down_step_freq);
+
+ if (old_freq != new_freq)
+ {
+ mver_pm_request_frequency(new_freq);
+#if (1 == DVFS_DEBUG_MODE)
+ atomic_long_inc(&changes_cnt);
+#endif
+ }
+}
+
+/**
+ * Update sessions list and VPU clock frequency.
+ *
+ * This function queries the state of all registered sessions and adjusts
+ * VPU clock frequency to meet their needs when dvfs_control is enabled.
+ * When SYSFS is enabled, the function also stores the status of all sessions
+ * so it could be retrieved by the user.
+ *
+ * This function must be called when dvfs_sem semaphore IS NOT locked.
+ */
+static void update_sessions(void)
+{
+ struct list_head *entry;
+ struct list_head *safe;
+ struct session *session;
+ struct mver_dvfs_session_status status;
+ bool status_received;
+ unsigned int buf_max = 0;
+ unsigned int buf_min = UINT_MAX;
+ int sem_failed;
+
+ sem_failed = down_interruptible(&dvfs_sem);
+ if (sem_failed)
+ {
+ return;
+ }
+
+ list_for_each_safe(entry, safe, &sessions)
+ {
+ session = list_entry(entry, struct session, list);
+
+ /**
+ * To avoid potential dead lock we release dvfs_sem before a call to
+ * get_session_status() callback. After a return from the callback
+ * we have to take dvfs_sem again and to verify that current session
+ * was not unregistered by the scheduler while we were sleeping.
+ */
+ up(&dvfs_sem);
+ status_received = get_session_status(session->session_id, &status);
+ sem_failed = down_interruptible(&dvfs_sem);
+ if (sem_failed)
+ {
+ return;
+ }
+
+ if (shutdown)
+ {
+ up(&dvfs_sem);
+ return;
+ }
+
+ if (session->to_remove)
+ {
+ free_session(session);
+ continue;
+ }
+
+ if (!status_received)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_WARNING,
+ "DVFS failed to retrieve status for the session. Session was removed? session=%p",
+ session->session_id);
+ continue;
+ }
+
+ if (status.restricting_buffer_count > buf_max)
+ {
+ buf_max = status.restricting_buffer_count;
+ }
+ if (status.restricting_buffer_count < buf_min)
+ {
+ buf_min = status.restricting_buffer_count;
+ }
+
+#if defined(CONFIG_SYSFS)
+ session->status = status;
+#endif
+ }
+
+ up(&dvfs_sem);
+
+#if (1 == DVFS_DEBUG_MODE)
+ if (0 == atomic_read(&control_enabled))
+ {
+ return;
+ }
+#endif
+
+ if (buf_max > 1)
+ {
+ warm_up(buf_max);
+ }
+ else if (buf_min < 1)
+ {
+ cool_down();
+ }
+}
+
+/**
+ * Allocate and register a session in DVFS module.
+ *
+ * This function allocates needed resources for the session and registers
+ * it in the module.
+ *
+ * This function must be called when dvfs_sem semaphore IS locked.
+ *
+ * @param session_is Session id
+ * @return True when registration was successful,
+ * False otherwise.
+ */
+static bool allocate_session(const mver_session_id session_id)
+{
+ struct session *session;
+
+ session = MVE_RSRC_MEM_CACHE_ALLOC(sizeof(*session), GFP_KERNEL);
+ if (NULL == session)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_WARNING,
+ "DVFS is unable to allocate memory for a new session. session=%p",
+ session_id);
+ return false;
+ }
+
+ session->session_id = session_id;
+
+ INIT_LIST_HEAD(&session->list);
+ list_add(&session->list, &sessions);
+
+ return true;
+}
+
+/**
+ * Unregister a session from DVFS module.
+ *
+ * When session is not NULL, the function releases all previously allocated
+ * resources for the session and unregisters it from DVFS.
+ *
+ * This function must be called when dvfs_sem semaphore IS locked.
+ *
+ * @param session Session or NULL
+ */
+static void free_session(struct session *session)
+{
+ if (NULL == session)
+ {
+ return;
+ }
+ list_del(&session->list);
+ MVE_RSRC_MEM_CACHE_FREE(session, sizeof(*session));
+}
+
+/**
+ * Find a session with provided session_id.
+ *
+ * This function tries to find previously registered session with provided
+ * session_id.
+ *
+ * This function must be called when dvfs_sem semaphore IS locked.
+ *
+ * @param session_id Session id
+ * @return pointer to session structure when a session was found,
+ * NULL when a session was not found.
+ */
+static struct session *get_session(const mver_session_id session_id)
+{
+ struct list_head *entry;
+ struct session *session;
+ list_for_each(entry, &sessions)
+ {
+ session = list_entry(entry, struct session, list);
+ if (session->session_id == session_id)
+ {
+ return session;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * DVFS polling thread.
+ *
+ * This function is executed in a separate kernel thread. It updates clock
+ * frequency every poll_interval_ms milliseconds.
+ */
+static int dvfs_thread(void *v)
+{
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_INFO, "DVFS polling thread started");
+ while (!kthread_should_stop())
+ {
+ wait_event_interruptible(dvfs_wq, list_empty(&sessions) == 0 || shutdown);
+ update_sessions();
+ msleep_interruptible(poll_interval_ms);
+ }
+
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_INFO, "DVFS polling thread finished");
+ return 0;
+}
+
+/**
+ * Return percent percents from a value val.
+ */
+static uint32_t ratio(const uint32_t val, const uint32_t percent)
+{
+ return (val * percent) / 100;
+}
+
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+/**
+ * Print DVFS statistics to sysfs attribute.
+ *
+ * Used for debugging/verification purposes.
+ */
+static ssize_t sysfs_print_stats(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t num = 0;
+ struct list_head *entry;
+ struct session *session;
+ uint32_t freq = mver_pm_read_frequency();
+
+ num += scnprintf(buf + num, PAGE_SIZE - num,
+ "freq: %4u, max_freq: %4u, up_step_freq: %3u, down_step_freq: %3u",
+ freq, max_freq, up_step_freq, down_step_freq);
+#if (1 == DVFS_DEBUG_MODE)
+ num += scnprintf(buf + num, PAGE_SIZE - num,
+ ", enabled: %1u, poll_interval_ms: %3u, changes_cnt: %10lu, burst_cnt: %10lu",
+ atomic_read(&control_enabled), poll_interval_ms,
+ atomic_long_read(&changes_cnt), atomic_long_read(&burst_cnt));
+#endif
+ num += scnprintf(buf + num, PAGE_SIZE - num, "\n");
+ list_for_each(entry, &sessions)
+ {
+ session = list_entry(entry, struct session, list);
+ num += scnprintf(buf + num, PAGE_SIZE - num,
+ "%p: out_buf: %02u\n",
+ session->session_id, session->status.restricting_buffer_count);
+ }
+
+ return num;
+}
+
+#if (1 == DVFS_DEBUG_MODE)
+/**
+ * Print DVFS enabling status to sysfs attribute.
+ *
+ * Used for debugging/verification purposes.
+ */
+static ssize_t sysfs_print_enabled(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t num = 0;
+ num += scnprintf(buf, PAGE_SIZE, "%u", atomic_read(&control_enabled) ? 1 : 0);
+ return num;
+}
+
+/**
+ * Set DVFS enabling status from sysfs attribute.
+ *
+ * Used for debugging/verification purposes.
+ */
+ssize_t sysfs_set_enabled(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int failed;
+ int enabled;
+ failed = kstrtouint(buf, 10, &enabled);
+ if (!failed)
+ {
+ atomic_set(&control_enabled, enabled);
+ }
+ return (failed) ? failed : count;
+}
+
+/**
+ * Print current clock frequency to sysfs attribute.
+ *
+ * Used for debugging/verification purposes.
+ */
+static ssize_t sysfs_print_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t num = 0;
+ uint32_t freq = mver_pm_read_frequency();
+ num += scnprintf(buf, PAGE_SIZE, "%u", freq);
+ return num;
+}
+
+/**
+ * Set current clock frequency from sysfs attribute.
+ *
+ * Used for debugging/verification purposes.
+ */
+ssize_t sysfs_set_freq(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int failed;
+ unsigned int freq;
+ failed = kstrtouint(buf, 10, &freq);
+ if (!failed)
+ {
+ mver_pm_request_frequency((uint32_t)freq);
+ }
+ return (failed) ? failed : count;
+}
+
+/**
+ * Set polling interval from sysfs attribute.
+ *
+ * Used for debugging/verification purposes.
+ */
+ssize_t sysfs_set_poll_interval_ms(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int failed;
+ failed = kstrtouint(buf, 10, &poll_interval_ms);
+ return (failed) ? failed : count;
+}
+
+/**
+ * Set up_step value from sysfs attribute.
+ *
+ * Used for debugging/verification purposes.
+ */
+ssize_t sysfs_set_up_step_percent(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int failed;
+ unsigned int up_step_percent;
+ failed = kstrtouint(buf, 10, &up_step_percent);
+ if (!failed)
+ {
+ up_step_freq = ratio(max_freq, up_step_percent);
+ }
+ return (failed) ? failed : count;
+}
+
+/**
+ * Set down_step value from sysfs attribute.
+ *
+ * Used for debugging/verification purposes.
+ */
+ssize_t sysfs_set_down_step_percent(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int failed;
+ unsigned int down_step_percent;
+ failed = kstrtouint(buf, 10, &down_step_percent);
+ if (!failed)
+ {
+ down_step_freq = ratio(max_freq, down_step_percent);
+ }
+ return (failed) ? failed : count;
+}
+#endif /* DVFS_DEBUG_MODE */
+
+/* Sysfs attributes used to debug/verify DVFS module */
+static struct device_attribute sysfs_files[] =
+{
+ __ATTR(dvfs_stats, S_IRUGO, sysfs_print_stats, NULL),
+#if (1 == DVFS_DEBUG_MODE)
+ __ATTR(dvfs_enabled, (S_IRUGO | S_IWUSR), sysfs_print_enabled, sysfs_set_enabled),
+ __ATTR(dvfs_freq, (S_IRUGO | S_IWUSR), sysfs_print_freq, sysfs_set_freq),
+ __ATTR(dvfs_poll_interval_ms, S_IWUSR, NULL, sysfs_set_poll_interval_ms),
+ __ATTR(dvfs_up_step_percent, S_IWUSR, NULL, sysfs_set_up_step_percent),
+ __ATTR(dvfs_down_step_percent, S_IWUSR, NULL, sysfs_set_down_step_percent),
+#endif
+};
+
+/**
+ * Register all DVFS attributes in sysfs subsystem
+ */
+static void sysfs_register_devices(struct device *dev)
+{
+ int err;
+ int i = NELEMS(sysfs_files);
+
+ while (i--)
+ {
+ err = device_create_file(dev, &sysfs_files[i]);
+ if (err < 0)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_ERROR,
+ "DVFS is unable to create sysfs file. name=%s",
+ sysfs_files[i].attr.name);
+ }
+ }
+}
+
+/**
+ * Remove DVFS attributes from sysfs subsystem
+ */
+static void sysfs_unregister_devices(struct device *dev)
+{
+ int i = NELEMS(sysfs_files);
+
+ while (i--)
+ {
+ device_remove_file(dev, &sysfs_files[i]);
+ }
+}
+#endif /* CONFIG_SYSFS && _DEBUG */
+
+/**
+ * Initialize the DVFS module.
+ *
+ * Must be called before any other function in this module.
+ *
+ * @param dev Device
+ * @param get_session_status_callback Callback to query session status
+ */
+void mver_dvfs_init(struct device *dev,
+ const mver_dvfs_get_session_status_fptr get_session_status_callback)
+{
+ if (!initialized)
+ {
+ sema_init(&dvfs_sem, 1);
+
+ max_freq = mver_pm_read_max_frequency();
+ up_step_freq = ratio(max_freq, UP_STEP_PERCENT);
+ down_step_freq = ratio(max_freq, DOWN_STEP_PERCENT);
+
+ init_waitqueue_head(&dvfs_wq);
+
+ dvfs_task = kthread_run(dvfs_thread, NULL, "dvfs");
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+ if (NULL != dev)
+ {
+ sysfs_register_devices(dev);
+ }
+#endif
+ get_session_status = get_session_status_callback;
+ initialized = true;
+ shutdown = false;
+ }
+ else
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_ERROR,
+ "Attempt to initialize DVFS twice");
+ }
+}
+
+/**
+ * Deinitialize the DVFS module.
+ *
+ * All remaining sessions will be unregistered.
+ *
+ * @param dev Device
+ */
+void mver_dvfs_deinit(struct device *dev)
+{
+ int sem_failed;
+ struct list_head *entry;
+ struct list_head *safe;
+ struct session *session;
+
+ if (!initialized)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_ERROR,
+ "Attempt to deinitialize DVFS when it was not initialized");
+ return;
+ }
+
+ sem_failed = down_interruptible(&dvfs_sem);
+ shutdown = true;
+ if (!sem_failed)
+ {
+ up(&dvfs_sem);
+ }
+
+ wake_up_interruptible(&dvfs_wq);
+ kthread_stop(dvfs_task);
+
+ sem_failed = down_interruptible(&dvfs_sem);
+ list_for_each_safe(entry, safe, &sessions)
+ {
+ session = list_entry(entry, struct session, list);
+ free_session(session);
+ }
+
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+ if (NULL != dev)
+ {
+ sysfs_unregister_devices(dev);
+ }
+#endif
+
+ initialized = false;
+ if (!sem_failed)
+ {
+ up(&dvfs_sem);
+ }
+}
+
+/**
+ * Register session in the DFVS module.
+ *
+ * @param session_id Session id
+ * @return True when registration was successful,
+ * False, otherwise
+ */
+bool mver_dvfs_register_session(const mver_session_id session_id)
+{
+ bool success = false;
+ int sem_failed;
+
+ if (!initialized)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_ERROR,
+ "DVFS module was not initialized");
+ return false;
+ }
+
+ sem_failed = down_interruptible(&dvfs_sem);
+ if (sem_failed)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_ERROR,
+ "DVFS semaphore was not obtained, sem_failed=%d",
+ sem_failed);
+ return false;
+ }
+
+ if (shutdown)
+ {
+ up(&dvfs_sem);
+ return false;
+ }
+
+ success = allocate_session(session_id);
+ up(&dvfs_sem);
+
+ if (success)
+ {
+ mver_dvfs_request_max_frequency();
+ }
+
+ wake_up_interruptible(&dvfs_wq);
+
+ return success;
+}
+
+/**
+ * Unregister session from the DFVS module.
+ *
+ * Usage of corresponding session is not permitted after this call.
+ * @param session_id Session id
+ */
+void mver_dvfs_unregister_session(const mver_session_id session_id)
+{
+ struct session *session;
+ int sem_failed;
+
+ if (!initialized)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_ERROR,
+ "DVFS module was not initialized");
+ return;
+ }
+
+ sem_failed = down_interruptible(&dvfs_sem);
+ if (sem_failed)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_ERROR,
+ "DVFS semaphore was not obtained, %d",
+ sem_failed);
+ return;
+ }
+
+ session = get_session(session_id);
+ if (NULL != session)
+ {
+ session->to_remove = true;
+ }
+
+ up(&dvfs_sem);
+}
+
+/**
+ * Request maximum clock frequency. This function is usually called in situations
+ * when the client requests to increase the operating rate.
+ */
+void mver_dvfs_request_max_frequency(void)
+{
+ bool adjust = true;
+
+#if (1 == DVFS_DEBUG_MODE)
+ /* Has DVFS been disabled through the sysfs interface? */
+ adjust = atomic_read(&control_enabled);
+#endif
+
+ if (false != adjust)
+ {
+ mver_pm_request_frequency(max_freq);
+ }
+}
+
+#endif
+
+EXPORT_SYMBOL(mver_dvfs_request_max_frequency);
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_dvfs.h b/drivers/video/arm/v5xx/resource/mve_rsrc_dvfs.h
new file mode 100644
index 000000000000..b123760c6ee8
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_dvfs.h
@@ -0,0 +1,85 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_DVFS_H
+#define MVE_RSRC_DVFS_H
+
+/**
+ * Structure which represents session status
+ * It is used by DVFS to take a decision about power management
+ */
+struct mver_dvfs_session_status
+{
+ /* Amount of restricting buffers enqueued to the FW. Restricting buffers
+ * are what usually limits the current work. For a decoder, this is the
+ * number of output buffers (limited by the display refresh rate). For an
+ * encoder, the restricting buffers are the input buffers (limited by the
+ * rate at which the camera generates images). */
+ int restricting_buffer_count;
+};
+
+/**
+ * Function pointer to query session status
+ *
+ * @param session_id Session id
+ * @param status Pointer to a structure where the status should be returned
+ * @return True when query was successful,
+ * False otherwise
+ */
+typedef bool (*mver_dvfs_get_session_status_fptr)(const mver_session_id session_id, struct mver_dvfs_session_status *status);
+
+/**
+ * Initialize the DVFS module.
+ *
+ * Must be called before any other function in this module.
+ *
+ * @param dev Device
+ * @param get_session_status_callback Callback to query session status
+ */
+void mver_dvfs_init(struct device *dev,
+ const mver_dvfs_get_session_status_fptr get_session_status_callback);
+
+/**
+ * Deinitialize the DVFS module.
+ *
+ * All remaining sessions will be unregistered.
+ *
+ * @param dev Device
+ */
+void mver_dvfs_deinit(struct device *dev);
+
+/**
+ * Register session in the DFVS module.
+ *
+ * @param session_id Session id
+ * @return True when registration was successful,
+ * False, otherwise
+ */
+bool mver_dvfs_register_session(const mver_session_id session_id);
+
+/**
+ * Unregister session from the DFVS module.
+ *
+ * Usage of corresponding session is not permitted after this call.
+ * @param session_id Session id
+ */
+void mver_dvfs_unregister_session(const mver_session_id session_id);
+
+/**
+ * Request maximum clock frequency. This function is usually called in situations
+ * when the client requests to increase the operating rate.
+ */
+void mver_dvfs_request_max_frequency(void);
+
+#endif /* MVE_RSRC_DVFS_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_irq.c b/drivers/video/arm/v5xx/resource/mve_rsrc_irq.c
new file mode 100644
index 000000000000..fd3a797e4e85
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_irq.c
@@ -0,0 +1,230 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#endif
+
+#include "mve_rsrc_irq.h"
+#include "mve_rsrc_register.h"
+#include "mve_rsrc_driver.h"
+#include "mve_rsrc_scheduler.h"
+#include "mve_rsrc_log.h"
+
+#ifndef CSTD_UNUSED
+#define CSTD_UNUSED(x) ((void)(x))
+#endif
+
+#if defined(_DEBUG)
+static uint32_t irq_delay = 0;
+#endif
+
+/** @brief Information required for the bottom half of the interrupt handler */
+struct mver_irq_bottom_half
+{
+ struct work_struct work; /**< Work-structure. */
+ unsigned long irq; /**< IRQ vector. */
+};
+
+static struct mver_irq_bottom_half bottom_half;
+static struct workqueue_struct *mver_work_queue;
+
+/**
+ * This function is executed in process context.
+ */
+void mver_irq_handler_bottom(struct work_struct *work)
+{
+ struct mver_irq_bottom_half *bottom = (struct mver_irq_bottom_half *)work;
+ uint32_t lsid;
+
+ CSTD_UNUSED(work);
+
+ for (lsid = 0; lsid < mve_rsrc_data.nlsid; lsid++)
+ {
+ int irq_set = test_and_clear_bit(lsid, &bottom->irq);
+
+ if (irq_set != 0)
+ {
+#if defined(_DEBUG) && !defined(EMULATOR)
+ mdelay(irq_delay);
+#endif
+ mver_scheduler_handle_irq(lsid);
+ }
+ }
+}
+
+/**
+ * This function is executed in interrupt context.
+ */
+irqreturn_t mver_irq_handler_top(int irq, void *dev_id)
+{
+ uint32_t irq_vector;
+ tCS *regs;
+ unsigned lsid;
+ irqreturn_t ret = IRQ_NONE;
+
+ CSTD_UNUSED(irq);
+ CSTD_UNUSED(dev_id);
+
+ /* Read IRQ register to detect which LSIDs that triggered the interrupt. */
+ regs = mver_reg_get_coresched_bank_irq();
+ irq_vector = mver_reg_read32(&regs->IRQVE);
+
+ for (lsid = 0; irq_vector != 0; lsid++, irq_vector >>= 1)
+ {
+ if (irq_vector & 0x1)
+ {
+ /* Set bit that this IRQ has been triggered and needs processing by bottom half. */
+ set_bit(lsid, &bottom_half.irq);
+
+ /* LSID generated interrupt. Clear it! */
+ mver_reg_write32(&regs->LSID[lsid].IRQVE, 0);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ queue_work(mver_work_queue, &bottom_half.work);
+
+#ifdef UNIT
+ mve_rsrc_data.interrupts++;
+#endif
+
+ return ret;
+}
+
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+
+static ssize_t sysfs_read_irq_delay(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ uint32_t delay;
+
+ delay = irq_delay;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", delay);
+}
+
+static ssize_t sysfs_write_irq_delay(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ long delay;
+
+ if (0 == kstrtol(buf, 0, &delay))
+ {
+ irq_delay = delay;
+ }
+ else
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Unable to apply value to irq_delay.");
+ }
+
+ return count;
+}
+
+static struct device_attribute sysfs_files[] =
+{
+ __ATTR(irq_delay, S_IRUGO, sysfs_read_irq_delay, sysfs_write_irq_delay)
+};
+
+#endif /* defined(CONFIG_SYSFS) && defined(_DEBUG) */
+
+void mver_irq_handler_init(struct device *dev)
+{
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+ int err;
+#endif
+ bottom_half.irq = 0;
+ INIT_WORK(&bottom_half.work, mver_irq_handler_bottom);
+
+ mver_work_queue = (struct workqueue_struct *)create_workqueue("mv500-wq");
+
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+ err = device_create_file(dev, &sysfs_files[0]);
+ if (0 > err)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Unable to create irq_delay sysfs file.");
+ }
+#endif
+
+#ifdef UNIT
+ mve_rsrc_data.interrupts = 0;
+#endif
+}
+
+void mver_irq_handler_deinit(struct device *dev)
+{
+ flush_workqueue(mver_work_queue);
+ destroy_workqueue(mver_work_queue);
+
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+ device_remove_file(dev, &sysfs_files[0]);
+#endif
+}
+
+void mver_irq_signal_mve(mver_session_id session)
+{
+ if (false != mve_rsrc_data.hw_interaction)
+ {
+ tCS *regs = mver_reg_get_coresched_bank();
+ enum LSID lsid;
+
+ lsid = mver_scheduler_get_session_lsid(session);
+
+ if (lsid != NO_LSID)
+ {
+ mver_reg_write32(&regs->LSID[lsid].IRQHOST, 1);
+ }
+
+ mver_reg_put_coresched_bank(&regs);
+ }
+}
+
+int mver_irq_enable(void)
+{
+ if (-1 != mve_rsrc_data.irq_nr && 0 == mve_rsrc_data.irq_enable_count++)
+ {
+ int irq_trigger = mve_rsrc_data.irq_flags & IRQF_TRIGGER_MASK;
+ int ret = request_irq(mve_rsrc_data.irq_nr, mver_irq_handler_top,
+ IRQF_SHARED | irq_trigger,
+ MVE_RSRC_DRIVER_NAME, &mve_rsrc_data);
+ if (0 != ret)
+ {
+ mve_rsrc_data.irq_enable_count--;
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+void mver_irq_disable(void)
+{
+ if (-1 != mve_rsrc_data.irq_nr && 1 == mve_rsrc_data.irq_enable_count--)
+ {
+ free_irq(mve_rsrc_data.irq_nr, &mve_rsrc_data);
+ }
+}
+
+EXPORT_SYMBOL(mver_irq_signal_mve);
+EXPORT_SYMBOL(mver_irq_enable);
+EXPORT_SYMBOL(mver_irq_disable);
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_irq.h b/drivers/video/arm/v5xx/resource/mve_rsrc_irq.h
new file mode 100644
index 000000000000..1b005798618c
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_irq.h
@@ -0,0 +1,73 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_IRQ_H
+#define MVE_IRQ_H
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/workqueue.h>
+#include <linux/irqreturn.h>
+#endif
+
+#include "mve_rsrc_scheduler.h"
+
+/**
+ * Bottom half interrupt handler. For each LSID, checks whether an interrupt
+ * has occurred. If this is the case, it checks for incoming messages, buffers
+ * and RCP calls.
+ */
+void mver_irq_handler_bottom(struct work_struct *work);
+
+/**
+ * Top half interrupt handler. Receives interrupts, clears them and starts
+ * the bottom half handler.
+ */
+irqreturn_t mver_irq_handler_top(int irq, void *dev_id);
+
+/**
+ * Initialize the interrupt handler. This function creates a work queue that
+ * takes the responsibility as the bottom half interrupt handler.
+ * @param dev Device parameter received from the kernel.
+ */
+void mver_irq_handler_init(struct device *dev);
+
+/**
+ * Tear down the interrupt handler. This function destroys the work queue
+ * that was created for the bottom half interrupt handler.
+ * @param dev Device parameter received from the kernel.
+ */
+void mver_irq_handler_deinit(struct device *dev);
+
+/**
+ * Send an interrupt to the MVE.
+ * @param session_id The logical session ID of the session to interrupt.
+ */
+void mver_irq_signal_mve(mver_session_id session_id);
+
+/**
+ * Enable IRQ reception from the MVE. Note that enable and disable are reference
+ * counted.
+ * @return 0 on success, standard Linux error code on failure (see errno.h).
+ */
+int mver_irq_enable(void);
+
+/**
+ * Disable IRQ reception from the MVE. Note that enable and disable are reference
+ * counted.
+ */
+void mver_irq_disable(void);
+
+#endif /* MVE_IRQ_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_log.c b/drivers/video/arm/v5xx/resource/mve_rsrc_log.c
new file mode 100644
index 000000000000..0061fcda0670
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_log.c
@@ -0,0 +1,1549 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+/******************************************************************************
+ * Includes
+ ******************************************************************************/
+
+#include "mve_rsrc_log.h"
+#include "mve_rsrc_log_ram.h"
+#include "mve_rsrc_mem_frontend.h"
+
+#ifdef __KERNEL__
+#include <asm/uaccess.h>
+#include <linux/aio.h>
+#include <linux/debugfs.h>
+#include <linux/dcache.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/un.h>
+#include <linux/version.h>
+#else
+#include "emulator_userspace.h"
+#endif /* __KERNEL */
+
+/******************************************************************************
+ * Defines
+ ******************************************************************************/
+
+#ifndef UNUSED
+#define UNUSED(x) (void)x
+#endif /* UNUSED */
+
+#ifdef _BullseyeCoverage
+ #define BullseyeCoverageOff \
+ # pragma BullseyeCoverage off
+ #define BullseyeCoverageOn \
+ # pragma BullseyeCoverage on
+#else
+ #define BullseyeCoverageOff
+ #define BullseyeCoverageOn
+#endif
+
+/******************************************************************************
+ * Types
+ ******************************************************************************/
+
+/******************************************************************************
+ * Variables
+ ******************************************************************************/
+
+static struct mve_rsrc_log log;
+
+/* When adding a new log group, please update the functions
+ * suppress_kernel_logs() and restore_kernel_logs()
+ * which are defined in driver/test/system/mve_suite_common.c
+ * accordingly.
+ */
+
+struct mve_rsrc_log_group mve_rsrc_log;
+struct mve_rsrc_log_group mve_rsrc_log_scheduler;
+struct mve_rsrc_log_group mve_rsrc_log_fwif;
+struct mve_rsrc_log_group mve_rsrc_log_session;
+
+EXPORT_SYMBOL(mve_rsrc_log);
+EXPORT_SYMBOL(mve_rsrc_log_fwif);
+EXPORT_SYMBOL(mve_rsrc_log_session);
+
+static struct mve_rsrc_log_drain drain_dmesg;
+static struct mve_rsrc_log_drain_ram drain_ram0;
+#ifndef __KERNEL__
+static struct mve_rsrc_log_drain_file drain_file;
+#endif
+
+#ifdef MVE_LOG_ALOG_ENABLE
+static struct mve_rsrc_log_drain_alog drain_alog;
+#endif /* MVE_LOG_ALOG_ENABLE */
+
+#ifdef MVE_LOG_FTRACE_ENABLE
+static struct mve_rsrc_log_drain drain_ftrace;
+
+/**
+ * Map severity to string.
+ */
+static const char *severity_to_name[] =
+{
+ "Panic",
+ "Error",
+ "Warning",
+ "Info",
+ "Debug",
+ "Verbose"
+};
+#endif /* MVE_LOG_FTRACE_ENABLE */
+
+/**
+ * Map severity to kernel log level.
+ */
+static const char *severity_to_kern_level[] =
+{
+ KERN_EMERG,
+ KERN_ERR,
+ KERN_WARNING,
+ KERN_NOTICE,
+ KERN_INFO,
+ KERN_DEBUG
+};
+
+#ifdef EMULATOR
+static const char *severity_to_a_level[] =
+{
+ "F",
+ "E",
+ "W",
+ "N",
+ "I",
+ "D"
+};
+#endif
+
+/******************************************************************************
+ * Static functions
+ ******************************************************************************/
+
+/******************************************************************************
+ * Log
+ *
+ * Directory i_node->i_private
+ * --------------------------------------------------------
+ * mve struct mve_rsrc_log *
+ * +-- group
+ * | +-- <group> struct mve_rsrc_log_group *
+ * | +-- severity
+ * | +-- drain
+ * +-- drain
+ * +-- <drain> struct mve_rsrc_log_drain *
+ *
+ ******************************************************************************/
+
+BullseyeCoverageOff
+
+/**
+ * Trim of trailing new line.
+ *
+ * @param str Pointer to string.
+ */
+static void trim(char *str)
+{
+ size_t len = strlen(str);
+
+ while (len-- > 0)
+ {
+ if (str[len] != '\n')
+ {
+ break;
+ }
+
+ str[len] = '\0';
+ }
+}
+
+/**
+ * Search for child dentry with matching name.
+ *
+ * @param parent Pointer to parent dentry.
+ * @param name Name of dentry to look for.
+ * @return Pointer to dentry, NULL if not found.
+ */
+static struct dentry *lookup(struct dentry *parent, const char *name)
+{
+ struct dentry *child;
+
+ /* Loop over directory entries in mve/drain/. */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) || defined(EMULATOR)
+ list_for_each_entry(child, &parent->d_subdirs, d_child)
+#else
+ list_for_each_entry(child, &parent->d_subdirs, d_u.d_child)
+#endif
+ {
+ if (strcmp(name, child->d_name.name) == 0)
+ {
+ return child;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Read handle function for mve/group/<group>/drain. The function returns the name
+ * of the currently configured drain.
+ */
+static ssize_t readme_read(struct file *file, char __user *user_buffer, size_t count, loff_t *position)
+{
+ static const char msg[] =
+ "LOG GROUPS\n"
+ "\n"
+ "The avaible log groups can be found under 'group'.\n"
+ "$ ls group\n"
+ "\n"
+ "SEVERITY LEVELS\n"
+ " 0 - Panic\n"
+ " 1 - Error\n"
+ " 2 - Warning\n"
+ " 3 - Info\n"
+ " 4 - Debug\n"
+ " 5 - Verbose\n"
+ "\n"
+ "The severity level for a log group can be read and set at runtime.\n"
+ "$ cat group/general/severity\n"
+ "$ echo 3 > group/general/severity\n";
+
+ return simple_read_from_buffer(user_buffer, count, position, msg, sizeof(msg));
+}
+
+/**
+ * Read handle function for mve/group/<group>/drain. The function returns the name
+ * of the currently configured drain.
+ */
+static ssize_t group_drain_read(struct file *file, char __user *user_buffer, size_t count, loff_t *position)
+{
+ /* File path mve/group/<group>/drain. */
+ struct mve_rsrc_log_group *group = file->f_path.dentry->d_parent->d_inode->i_private;
+ struct mve_rsrc_log_drain *drain = group->drain;
+ char name[100];
+ size_t len;
+
+ if (drain == NULL || drain->dentry == NULL)
+ {
+ printk(KERN_ERR "MVE: No drain assigned to log group.\n");
+ return -EINVAL;
+ }
+
+ len = scnprintf(name, sizeof(name), "%s\n", drain->dentry->d_name.name);
+
+ return simple_read_from_buffer(user_buffer, count, position, name, len);
+}
+
+/**
+ * Write handle function for mve/group/<group>/drain. The function sets the drain
+ * for the group. If the drain does not match any registered drain, then error is
+ * returned to user space.
+ */
+static ssize_t group_drain_write(struct file *file, const char __user *user_buffer, size_t count, loff_t *position)
+{
+ /* File path mve/group/<group>/drain. */
+ struct mve_rsrc_log_group *group = file->f_path.dentry->d_parent->d_inode->i_private;
+ struct mve_rsrc_log *log = file->f_path.dentry->d_parent->d_parent->d_parent->d_inode->i_private;
+ struct dentry *dentry;
+ char drain_str[100];
+ ssize_t size;
+
+ /* Check that input is not larger that path buffer. */
+ if (count > (sizeof(drain_str) - 1))
+ {
+ printk(KERN_ERR "MVE: Input overflow.\n");
+
+ return -EINVAL;
+ }
+
+ /* Append input to path. */
+ size = simple_write_to_buffer(drain_str, sizeof(drain_str) - 1, position, user_buffer, count);
+ drain_str[count] = '\0';
+ trim(drain_str);
+
+ dentry = lookup(log->drain_dir, drain_str);
+
+ if (IS_ERR_OR_NULL(dentry))
+ {
+ printk(KERN_ERR "MVE: No drain matching '%s'.\n", drain_str);
+ return -EINVAL;
+ }
+
+ /* Assign drain to log group. */
+ group->drain = dentry->d_inode->i_private;
+
+ return size;
+}
+
+/**
+ * Read the RAM buffer.
+ */
+static ssize_t drain_ram_read(struct mve_rsrc_log_drain_ram *drain, char __user *user_buffer, size_t count, loff_t *position, size_t pos)
+{
+ ssize_t n = 0;
+
+ /* Make sure position is not beyond end of file. */
+ if (*position > pos)
+ {
+ return -EINVAL;
+ }
+
+ /* If position is more than BUFFER_SIZE bytes behind, then fast forward to current position minus BUFFER_SIZE. */
+ if ((pos - *position) > drain->buffer_size)
+ {
+ *position = pos - drain->buffer_size;
+ }
+
+ /* Copy data to user space. */
+ while ((n < count) && (*position < pos))
+ {
+ size_t offset;
+ size_t length;
+
+ /* Offset in circular buffer. */
+ offset = *position & (drain->buffer_size - 1);
+
+ /* Available number of bytes. */
+ length = min((size_t)(pos - *position), count - n);
+
+ /* Make sure length does not go beyond end of circular buffer. */
+ length = min(length, drain->buffer_size - offset);
+
+ /* Copy data from kernel- to user space. */
+ length -= copy_to_user(&user_buffer[n], &drain->buf[offset], length);
+
+ /* No bytes were copied. Return error. */
+ if (length == 0)
+ {
+ return -EINVAL;
+ }
+
+ *position += length;
+ n += length;
+ }
+
+ return n;
+}
+
+/**
+ * Blocking read of the RAM buffer.
+ */
+static ssize_t drain_ram_read_block(struct mve_rsrc_log_drain_ram *drain, char __user *user_buffer, size_t count, loff_t *position, size_t *pos)
+{
+ /* Block until there is data available. */
+ while (*position == *pos)
+ {
+ int ret;
+
+ ret = wait_event_interruptible(drain->queue, *position < *pos);
+ if (ret != 0)
+ {
+ return -EINTR;
+ }
+ }
+
+ return drain_ram_read(drain, user_buffer, count, position, *pos);
+}
+
+/**
+ * Non blocking read of the RAM buffer.
+ */
+static ssize_t drain_ram_read_msg(struct file *file, char __user *user_buffer, size_t count, loff_t *position)
+{
+ struct mve_rsrc_log_drain_ram *drain = file->f_path.dentry->d_parent->d_inode->i_private;
+
+ return drain_ram_read(drain, user_buffer, count, position, drain->write_pos);
+}
+
+/**
+ * Blocking read of the RAM buffer.
+ */
+static ssize_t drain_ram_read_pipe(struct file *file, char __user *user_buffer, size_t count, loff_t *position)
+{
+ struct mve_rsrc_log_drain_ram *drain = file->f_path.dentry->d_parent->d_inode->i_private;
+
+ return drain_ram_read_block(drain, user_buffer, count, position, &drain->write_pos);
+}
+
+/**
+ * Blocking read of the RAM buffer. Write position is only updated when an error is reported.
+ */
+static ssize_t drain_ram_read_flush_on_error(struct file *file, char __user *user_buffer, size_t count, loff_t *position)
+{
+ struct mve_rsrc_log_drain_ram *drain = file->f_path.dentry->d_parent->d_inode->i_private;
+
+ return drain_ram_read_block(drain, user_buffer, count, position, &drain->write_error_pos);
+}
+
+/**
+ * Copy data from iterator.
+ */
+static ssize_t copy_iterator(void *dst, struct iov_iter *iter, size_t size)
+{
+ size_t s = 0;
+
+ while ((s < size) && (iter->nr_segs > 0))
+ {
+ size_t len = min(size - s, iter->iov->iov_len - iter->iov_offset);
+ int ret;
+
+ ret = copy_from_user((char *)dst + s, (char *)iter->iov->iov_base + iter->iov_offset, len);
+ if (ret != 0)
+ {
+ return -EFAULT;
+ }
+
+ s += len;
+ iter->iov_offset += len;
+
+ if (iter->iov_offset >= iter->iov->iov_len)
+ {
+ iter->iov_offset = 0;
+ iter->iov++;
+ iter->nr_segs--;
+ }
+ }
+
+ return s;
+}
+
+/**
+ * Write data to RAM buffer.
+ */
+static ssize_t drain_ram_write_iter(struct kiocb *iocb, struct iov_iter *iov_iter)
+{
+ struct file *file = iocb->ki_filp;
+ struct mve_rsrc_log_drain_ram *drain_ram = file->f_path.dentry->d_parent->d_inode->i_private;
+ struct mve_log_header header;
+ struct iov_iter iter;
+ size_t length;
+ size_t len;
+ size_t pos;
+ size_t ret;
+ int sem_taken;
+
+ /* Calculate total length. */
+ length = iov_length(iov_iter->iov, iov_iter->nr_segs);
+
+ iter = *iov_iter;
+ ret = copy_iterator(&header, &iter, sizeof(header));
+ if (ret != sizeof(header))
+ {
+ printk(KERN_ERR "MVE: Not enough data to read RAM header.\n");
+ return -EFAULT;
+ }
+
+ /* Check that magic is correct. */
+ if (header.magic != MVE_LOG_MAGIC)
+ {
+ printk(KERN_ERR "MVE: RAM header does not contain magic word.\n");
+ return -EFAULT;
+ }
+
+ /* Verify length of header. */
+ if ((header.length + sizeof(header)) != length)
+ {
+ printk(KERN_ERR "MVE: RAM header has incorrect length. header.length+%zu=%zu, length=%zu.\n", sizeof(header), header.length + sizeof(header), length);
+ return -EFAULT;
+ }
+
+ /* Check that message length is not larger than RAM buffer. */
+ if (length > drain_ram->buffer_size)
+ {
+ printk(KERN_ERR "MVE: Logged data larger than output buffer. length=%zu, buffer_length=%zu.\n", length, (size_t)drain_ram->buffer_size);
+ return -EFAULT;
+ }
+
+ sem_taken = down_interruptible(&drain_ram->sem);
+
+ pos = drain_ram->write_pos & (drain_ram->buffer_size - 1);
+ len = length;
+
+ /* Loop over scatter input. */
+ while (len > 0)
+ {
+ size_t n = min(len, drain_ram->buffer_size - pos);
+
+ ret = copy_iterator(&drain_ram->buf[pos], iov_iter, n);
+ if (ret != n)
+ {
+ printk(KERN_ERR "MVE: Failed to copy data from user space.\n");
+ if (0 == sem_taken)
+ {
+ up(&drain_ram->sem);
+ }
+ return -EFAULT;
+ }
+
+ len -= ret;
+ pos = (pos + ret) & (drain_ram->buffer_size - 1);
+ }
+
+ /* Update write_pos to 4 byte aligned length */
+ drain_ram->write_pos += (length + 3) & ~3;
+
+ /* Flush RAM buffer if severity exceeds configured severity. */
+ if (header.severity <= drain_ram->severity)
+ {
+ drain_ram->write_error_pos = drain_ram->write_pos;
+ }
+
+ if (0 == sem_taken)
+ {
+ up(&drain_ram->sem);
+ }
+ wake_up_interruptible(&drain_ram->queue);
+
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+/**
+ * Write data to RAM buffer.
+ */
+static ssize_t drain_ram_aio_write(struct kiocb *iocb, const struct iovec *vec, unsigned long count, loff_t offset)
+{
+ struct iov_iter iter = { 0 };
+
+ /* iter.type = ITER_IOVEC; */
+ iter.iov_offset = 0;
+ iter.count = iov_length(vec, count);
+ iter.iov = vec;
+ iter.nr_segs = count;
+
+ return drain_ram_write_iter(iocb, &iter);
+}
+#endif
+
+/**
+ * Handle IOCTL.
+ */
+static long drain_ram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct mve_rsrc_log_drain_ram *drain_ram = file->f_path.dentry->d_parent->d_inode->i_private;
+
+ switch (cmd)
+ {
+ case MVE_LOG_IOCTL_CLEAR:
+ drain_ram->read_pos = drain_ram->write_pos;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Open file handle function.
+ */
+static int drain_ram_open(struct inode *inode, struct file *file)
+{
+ struct mve_rsrc_log_drain_ram *drain_ram = file->f_path.dentry->d_parent->d_inode->i_private;
+
+ file->f_pos = drain_ram->read_pos;
+
+ return 0;
+}
+
+BullseyeCoverageOn
+
+/**
+ * Add a group with given name to log.
+ *
+ * @param log Pointer to log.
+ * @param name Name of group.
+ * @param group Pointer to group.
+ */
+static void mve_rsrc_log_group_add(struct mve_rsrc_log *log, const char *name, struct mve_rsrc_log_group *group)
+{
+ static const struct file_operations group_drain_fops =
+ {
+ .read = group_drain_read,
+ .write = group_drain_write
+ };
+ struct dentry *dentry;
+
+ /* Create <group> directory. */
+ group->dentry = debugfs_create_dir(name, log->group_dir);
+ if (IS_ERR_OR_NULL(group->dentry))
+ {
+ printk(KERN_ERR "MVE: Failed to create '%s' dir.\n", name);
+ return;
+ }
+
+ /* Store reference to group object in inode private data. */
+ group->dentry->d_inode->i_private = group;
+
+ /* Create <group>/severity. */
+ dentry = debugfs_create_u32("severity", 0600, group->dentry, &group->severity);
+ if (IS_ERR_OR_NULL(dentry))
+ {
+ printk(KERN_ERR "MVE: Failed to create '%s/severity' value.\n", name);
+ return;
+ }
+
+ /* Create <group>/drain. */
+ dentry = debugfs_create_file("drain", 0600, group->dentry, NULL, &group_drain_fops);
+ if (IS_ERR_OR_NULL(dentry))
+ {
+ printk(KERN_ERR "MVE: Failed to create '%s/severity' value.\n", name);
+ return;
+ }
+}
+
+/**
+ * Add drain to log.
+ *
+ * @param log Pointer to log.
+ * @param name Name of drain.
+ * @param drain Pointer to drain.
+ */
+static void mve_rsrc_log_drain_add(struct mve_rsrc_log *log, const char *name, struct mve_rsrc_log_drain *drain)
+{
+ /* Create <drain> directory. */
+ drain->dentry = debugfs_create_dir(name, log->drain_dir);
+ if (IS_ERR_OR_NULL(drain->dentry))
+ {
+ printk(KERN_ERR "MVE: Failed to create '%s' dir.\n", name);
+ return;
+ }
+
+ /* Store pointer to drain object in inode private data. */
+ drain->dentry->d_inode->i_private = drain;
+}
+
+/**
+ * Derived function to add RAM drain to log.
+ *
+ * @param log Pointer to log.
+ * @param name Name of drain.
+ * @param drain Pointer to drain.
+ */
+static void mve_rsrc_log_drain_ram_add(struct mve_rsrc_log *log, const char *name, struct mve_rsrc_log_drain_ram *drain)
+{
+ static const struct file_operations drain_ram_msg =
+ {
+ .read = drain_ram_read_msg,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+ .aio_write = drain_ram_aio_write,
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ .write_iter = drain_ram_write_iter,
+#endif
+ .open = drain_ram_open,
+ .unlocked_ioctl = drain_ram_ioctl
+ };
+ static const struct file_operations drain_ram_pipe =
+ {
+ .read = drain_ram_read_pipe,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+ .aio_write = drain_ram_aio_write,
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ .write_iter = drain_ram_write_iter,
+#endif
+ .open = drain_ram_open,
+ .unlocked_ioctl = drain_ram_ioctl
+ };
+ static const struct file_operations drain_ram_flush_on_error =
+ {
+ .read = drain_ram_read_flush_on_error,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+ .aio_write = drain_ram_aio_write,
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ .write_iter = drain_ram_write_iter,
+#endif
+ .open = drain_ram_open,
+ .unlocked_ioctl = drain_ram_ioctl
+ };
+ struct dentry *dentry;
+
+ mve_rsrc_log_drain_add(log, name, &drain->base);
+
+ /* Create non blocking dentry. */
+ dentry = debugfs_create_file("msg", 0622, drain->base.dentry, NULL, &drain_ram_msg);
+ if (IS_ERR_OR_NULL(dentry))
+ {
+ printk(KERN_ERR "MVE: Failed to create '%s/msg.\n", name);
+ return;
+ }
+
+ /* Create blocking dentry. */
+ dentry = debugfs_create_file("pipe", 0622, drain->base.dentry, NULL, &drain_ram_pipe);
+ if (IS_ERR_OR_NULL(dentry))
+ {
+ printk(KERN_ERR "MVE: Failed to create '%s/pipe.\n", name);
+ return;
+ }
+
+ /* Create blocking dentry. */
+ dentry = debugfs_create_file("pipe_on_error", 0622, drain->base.dentry, NULL, &drain_ram_flush_on_error);
+ if (IS_ERR_OR_NULL(dentry))
+ {
+ printk(KERN_ERR "MVE: Failed to create '%s/pipe_on_error.\n", name);
+ return;
+ }
+
+ /* Create <group>/severity. */
+ dentry = debugfs_create_u32("severity", 0600, drain->base.dentry, &drain->severity);
+ if (IS_ERR_OR_NULL(dentry))
+ {
+ printk(KERN_ERR "MVE: Failed to create '%s/severity' value.\n", name);
+ return;
+ }
+}
+
+/******************************************************************************
+ * Log Group
+ ******************************************************************************/
+
+/**
+ * Group constructor.
+ *
+ * @param group Pointer to group.
+ * @param tag Name of group, to be used in log messages.
+ * @param severity Minimum severity to output log message.
+ * @param drain Pointer to drain.
+ */
+static void mve_rsrc_log_group_construct(struct mve_rsrc_log_group *group, const char *tag, const enum mve_rsrc_log_severity severity, struct mve_rsrc_log_drain *drain)
+{
+ group->tag = tag;
+ group->severity = severity;
+ group->drain = drain;
+}
+
+/**
+ * Group destructor.
+ *
+ * @param group Pointer to group.
+ */
+static void mve_rsrc_log_group_destruct(struct mve_rsrc_log_group *group)
+{
+ UNUSED(group);
+}
+
+/******************************************************************************
+ * Log Drain
+ ******************************************************************************/
+
+BullseyeCoverageOff
+
+#ifdef MVE_LOG_FTRACE_ENABLE
+static void mve_rsrc_log_drain_ftrace_print(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, const char *tag, const char *msg, const unsigned n_args, ...)
+{
+ va_list args;
+ char fmt[1000];
+
+ severity = min((int)severity, MVE_LOG_VERBOSE);
+
+ snprintf(fmt, sizeof(fmt), "%s %s: %s\n", severity_to_name[severity], tag, msg);
+ fmt[sizeof(fmt) - 1] = '\0';
+
+ va_start(args, n_args);
+ ftrace_vprintk(fmt, args);
+ va_end(args);
+}
+
+static void mve_rsrc_log_drain_ftrace_data(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, struct iovec *vec, size_t count)
+{
+ size_t i;
+
+ trace_printk("count=%zu\n", count);
+
+ for (i = 0; i < count; ++i)
+ {
+ const char *p = vec[i].iov_base;
+ size_t length = vec[i].iov_len;
+
+ trace_printk(" length=%zu\n", length);
+
+ while (length > 0)
+ {
+ size_t j = min(length, (size_t)32);
+ char buf[3 + j * 3 + 1];
+ size_t n = 0;
+
+ length -= j;
+
+ n += scnprintf(&buf[n], sizeof(buf) - n, " ");
+
+ while (j-- > 0)
+ {
+ n += scnprintf(&buf[n], sizeof(buf) - n, " %02x", *p++);
+ }
+
+ trace_printk("%s\n", buf);
+ }
+ }
+}
+#endif /* MVE_LOG_FTRACE_ENABLE */
+
+/* The dmesg print functions are designed as fallback functions if the system doesn't
+ * have alog functionality. These functions will therefore most likely never be used.
+ * Removing them from code coverage */
+
+static void mve_rsrc_log_drain_dmesg_print(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, const char *tag, const char *msg, const unsigned n_args, ...)
+{
+ va_list args;
+ char fmt[1000];
+
+ severity = min((int)severity, MVE_LOG_VERBOSE);
+
+ snprintf(fmt, sizeof(fmt), "%s%s: %s\n", severity_to_kern_level[severity], tag, msg);
+ fmt[sizeof(fmt) - 1] = '\0';
+
+ va_start(args, n_args);
+ vprintk(fmt, args);
+ va_end(args);
+}
+
+static void mve_rsrc_log_drain_dmesg_data(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, struct iovec *vec, size_t count)
+{
+ size_t i;
+
+ printk("count=%zu\n", count);
+
+ for (i = 0; i < count; ++i)
+ {
+ const char *p = vec[i].iov_base;
+ size_t length = vec[i].iov_len;
+
+ printk(" length=%zu\n", length);
+
+ while (length > 0)
+ {
+ size_t j = min(length, (size_t)32);
+ char buf[3 + j * 3 + 1];
+ size_t n = 0;
+
+ length -= j;
+
+ n += scnprintf(&buf[n], sizeof(buf) - n, " ");
+
+ while (j-- > 0)
+ {
+ n += scnprintf(&buf[n], sizeof(buf) - n, " %02x", *p++);
+ }
+
+ printk("%s\n", buf);
+ }
+ }
+}
+BullseyeCoverageOn
+
+#ifdef MVE_LOG_ALOG_ENABLE
+enum android_log_id
+{
+ LOG_ID_MAIN = 0,
+ LOG_ID_RADIO = 1,
+ LOG_ID_EVENTS = 2,
+ LOG_ID_SYSTEM = 3,
+ LOG_ID_CRASH = 4,
+ LOG_ID_KERNEL = 5,
+ LOG_ID_MAX
+};
+
+enum android_log_priority
+{
+ ANDROID_LOG_UNKNOWN = 0,
+ ANDROID_LOG_DEFAULT,
+ ANDROID_LOG_VERBOSE,
+ ANDROID_LOG_DEBUG,
+ ANDROID_LOG_INFO,
+ ANDROID_LOG_WARN,
+ ANDROID_LOG_ERROR,
+ ANDROID_LOG_FATAL,
+ ANDROID_LOG_SILENT
+};
+
+struct android_log_time
+{
+ uint32_t tv_sec;
+ uint32_t tv_nsec;
+}
+__attribute__((__packed__));
+
+struct android_log_header_t
+{
+ uint8_t id;
+ uint16_t tid;
+ struct android_log_time realtime;
+ unsigned char priority;
+}
+__attribute__((__packed__));
+
+#ifdef EMULATOR
+static void mve_rsrc_log_drain_alog_print(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, const char *tag, const char *msg, const unsigned n_args, ...)
+{
+ va_list args;
+ char fmt[1000];
+ char buf[64];
+ struct timespec timespec;
+ struct tm tm;
+
+ getnstimeofday(&timespec);
+
+ localtime_r(&timespec.tv_sec, &tm);
+
+ snprintf(buf, sizeof(buf), "%02d-%02d %02d:%02d:%02d.%03u %5u %5u ", tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, (unsigned)(timespec.tv_nsec / 1000000),
+ (unsigned short)getpid(), (unsigned short)pthread_self());
+
+ severity = min((int)severity, MVE_LOG_VERBOSE);
+
+ snprintf(fmt, sizeof(fmt), "%s%s%s %s: %s\n", severity_to_kern_level[severity], buf, severity_to_a_level[severity], tag, msg);
+ fmt[sizeof(fmt) - 1] = '\0';
+
+ va_start(args, n_args);
+ vprintf(fmt, args);
+ va_end(args);
+}
+
+static void mve_rsrc_log_drain_alog_data(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, struct iovec *vec, size_t count)
+{
+ size_t i;
+
+ printf("count=%zu\n", count);
+
+ for (i = 0; i < count; ++i)
+ {
+ const char *p = vec[i].iov_base;
+ size_t length = vec[i].iov_len;
+
+ printf(" length=%zu\n", length);
+
+ while (length > 0)
+ {
+ size_t j = min(length, (size_t)32);
+ char buf[3 + j * 3 + 1];
+ size_t n = 0;
+
+ length -= j;
+
+ n += scnprintf(&buf[n], sizeof(buf) - n, " ");
+
+ while (j-- > 0)
+ {
+ n += scnprintf(&buf[n], sizeof(buf) - n, " %02x", *p++);
+ }
+
+ printf("%s\n", buf);
+ }
+ }
+}
+#else
+static int mve_rsrc_log_drain_alog_connect(struct mve_rsrc_log_drain_alog *drain)
+{
+ struct socket *socket;
+ struct sockaddr_un addr;
+ int ret;
+
+ /* Create Unix socket. */
+ ret = sock_create(PF_UNIX, SOCK_DGRAM, 0, &socket);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "MVE: Failed to create socket. error=%d.\n", ret);
+ return ret;
+ }
+
+ /* Connect to socket. */
+ addr.sun_family = AF_UNIX;
+ strcpy(addr.sun_path, "/dev/socket/logdw");
+
+ ret = socket->ops->connect(socket, (struct sockaddr *)&addr, sizeof(addr), 0);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "MVE: Failed to connect to socket. error=%d.\n", ret);
+ sock_release(socket);
+ return ret;
+ }
+
+ drain->socket = socket;
+
+ return 0;
+}
+
+static int mve_rsrc_log_drain_alog_send(struct mve_rsrc_log_drain_alog *drain, enum mve_rsrc_log_severity severity, struct iovec *vec, size_t count)
+{
+ static const int severity_to_priority[] = { ANDROID_LOG_FATAL, ANDROID_LOG_ERROR, ANDROID_LOG_WARN, ANDROID_LOG_INFO, ANDROID_LOG_DEBUG, ANDROID_LOG_VERBOSE };
+ struct kvec kvec[count + 1];
+ struct android_log_header_t header;
+ struct msghdr msg;
+ struct timespec timespec;
+ int len;
+ int ret;
+ int i;
+
+ getnstimeofday(&timespec);
+
+ /* Make sure severity does not overflow. */
+ severity = min((int)severity, MVE_LOG_VERBOSE);
+
+ /* Fill in header. */
+ header.id = LOG_ID_MAIN;
+ header.tid = 0;
+ header.realtime.tv_sec = timespec.tv_sec;
+ header.realtime.tv_nsec = timespec.tv_nsec;
+ header.priority = severity_to_priority[severity];
+
+ /* Fill in io vector. */
+ kvec[0].iov_base = &header;
+ kvec[0].iov_len = sizeof(header);
+
+ for (i = 0, len = sizeof(header); i < count; ++i)
+ {
+ kvec[i + 1].iov_base = vec[i].iov_base;
+ kvec[i + 1].iov_len = vec[i].iov_len;
+ len += vec[i].iov_len;
+ }
+
+ /* Initialize message. */
+ memset(&msg, 0, sizeof(msg));
+
+ ret = down_interruptible(&drain->sem);
+ if (ret != 0)
+ {
+ return -EINTR;
+ }
+
+ /* Connect to socket if that has not been done before. */
+ if (drain->socket == NULL)
+ {
+ ret = mve_rsrc_log_drain_alog_connect(drain);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "MVE: Failed to connect to Android log daemon socket. error=%d.\n", ret);
+ goto out;
+ }
+ }
+
+ ret = kernel_sendmsg(drain->socket, &msg, kvec, count + 1, len);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "MVE: Failed to send message to logd. error=%d.\n", ret);
+ sock_release(drain->socket);
+ drain->socket = NULL;
+ }
+
+out:
+ up(&drain->sem);
+
+ return ret;
+}
+
+static void mve_rsrc_log_drain_alog_data(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, struct iovec *vec, size_t count)
+{
+ static const char tag[] = "MVE";
+ struct mve_rsrc_log_drain_alog *alog_drain = (struct mve_rsrc_log_drain_alog *)drain;
+ struct iovec v[2];
+ size_t i;
+ char buf[1000];
+ int n = 0;
+
+ n += scnprintf(&buf[n], sizeof(buf) - n, "count=%zu\n", count);
+
+ for (i = 0; i < count; ++i)
+ {
+ const char *p = vec[i].iov_base;
+ size_t length = vec[i].iov_len;
+
+ n += scnprintf(&buf[n], sizeof(buf) - n, " length=%zu\n", length);
+
+ while (length > 0)
+ {
+ size_t j = min(length, (size_t)32);
+
+ length -= j;
+
+ n += scnprintf(&buf[n], sizeof(buf) - n, " ");
+
+ while (j-- > 0)
+ {
+ n += scnprintf(&buf[n], sizeof(buf) - n, "%02x ", *p++);
+ }
+
+ n += scnprintf(&buf[n], sizeof(buf) - n, "\n");
+ }
+ }
+
+ buf[sizeof(buf) - 1] = '\0';
+
+ v[0].iov_base = (void *)tag;
+ v[0].iov_len = sizeof(tag);
+
+ v[1].iov_base = buf;
+ v[1].iov_len = n + 1;
+
+ mve_rsrc_log_drain_alog_send(alog_drain, severity, v, 2);
+}
+
+static void mve_rsrc_log_drain_alog_print(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, const char *tag, const char *fmt, const unsigned n_args, ...)
+{
+ struct mve_rsrc_log_drain_alog *alog_drain = (struct mve_rsrc_log_drain_alog *)drain;
+ struct iovec vec[2];
+ va_list args;
+ char msg[1000];
+ size_t n;
+
+ va_start(args, n_args);
+ n = vscnprintf(msg, sizeof(msg), fmt, args);
+ va_end(args);
+
+ vec[0].iov_base = (void *)tag;
+ vec[0].iov_len = strlen(tag) + 1;
+
+ vec[1].iov_base = msg;
+ vec[1].iov_len = n + 1;
+
+ mve_rsrc_log_drain_alog_send(alog_drain, severity, vec, 2);
+}
+#endif
+#endif /* MVE_LOG_ALOG_ENABLE */
+
+BullseyeCoverageOff
+
+/* The RAM drain is not used for the emulator. Therefore switching off code coverage for
+ * the following two drain ram functions. */
+static void mve_rsrc_log_drain_ram_data(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, struct iovec *vec, size_t count)
+{
+ struct mve_rsrc_log_drain_ram *drain_ram = (struct mve_rsrc_log_drain_ram *)drain;
+ size_t i;
+ size_t length;
+ size_t pos;
+ int sem_taken;
+
+ /* Calculate the total length of the output. */
+ for (i = 0, length = 0; i < count; ++i)
+ {
+ length += vec[i].iov_len;
+ }
+
+ /* Round up to next 32-bit boundary. */
+ length = (length + 3) & ~3;
+
+ if (length > drain_ram->buffer_size)
+ {
+ printk(KERN_ERR "MVE: Logged data larger than output buffer. length=%zu, buffer_length=%zu.\n", length, (size_t)drain_ram->buffer_size);
+ return;
+ }
+
+ sem_taken = down_interruptible(&drain_ram->sem);
+
+ pos = drain_ram->write_pos & (drain_ram->buffer_size - 1);
+
+ /* Loop over scatter input. */
+ for (i = 0; i < count; ++i)
+ {
+ const char *buf = vec[i].iov_base;
+ size_t len = vec[i].iov_len;
+
+ /* Copy log message to output buffer. */
+ while (len > 0)
+ {
+ size_t n = min(len, drain_ram->buffer_size - pos);
+
+ memcpy(&drain_ram->buf[pos], buf, n);
+
+ len -= n;
+ buf += n;
+ pos = (pos + n) & (drain_ram->buffer_size - 1);
+ }
+ }
+
+ /* Update write_pos. Length has already been 4 byte aligned */
+ drain_ram->write_pos += length;
+
+ /* Flush RAM buffer if severity exceeds configured severity. */
+ if (severity <= drain_ram->severity)
+ {
+ drain_ram->write_error_pos = drain_ram->write_pos;
+ }
+
+ if (0 == sem_taken)
+ {
+ up(&drain_ram->sem);
+ }
+
+ wake_up_interruptible(&drain_ram->queue);
+}
+
+static void mve_rsrc_log_drain_ram_print(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, const char *tag, const char *msg, const unsigned n_args, ...)
+{
+ char buf[1000];
+ va_list args;
+ size_t n = 0;
+ struct mve_log_header header;
+ struct iovec vec[2];
+ struct timespec timespec;
+
+ /* Write the log message. */
+ va_start(args, n_args);
+ n += vscnprintf(buf, sizeof(buf), msg, args);
+ va_end(args);
+
+ getnstimeofday(&timespec);
+
+ header.magic = MVE_LOG_MAGIC;
+ header.length = n;
+ header.type = MVE_LOG_TYPE_TEXT;
+ header.severity = severity;
+ header.timestamp.sec = timespec.tv_sec;
+ header.timestamp.nsec = timespec.tv_nsec;
+
+ vec[0].iov_base = &header;
+ vec[0].iov_len = sizeof(header);
+
+ vec[1].iov_base = buf;
+ vec[1].iov_len = n;
+
+ mve_rsrc_log_drain_ram_data(drain, severity, vec, 2);
+}
+
+BullseyeCoverageOn
+
+/**
+ * Drain constructor.
+ *
+ * @param drain Pointer to drain.
+ * @param print Print function pointer.
+ * @param data Data function pointer.
+ */
+static void mve_rsrc_log_drain_construct(struct mve_rsrc_log_drain *drain, mve_rsrc_print_fptr print, mve_rsrc_data_fptr data)
+{
+ drain->print = print;
+ drain->data = data;
+}
+
+/**
+ * Drain destructor.
+ */
+static void mve_rsrc_log_drain_destruct(struct mve_rsrc_log_drain *drain)
+{
+ UNUSED(drain);
+}
+
+/**
+ * RAM drain constructor.
+ *
+ * @param drain Pointer to drain.
+ * @param print Print function pointer.
+ * @param data Data function pointer.
+ */
+static void mve_rsrc_log_drain_ram_construct(struct mve_rsrc_log_drain_ram *drain, mve_rsrc_print_fptr print, mve_rsrc_data_fptr data, size_t buffer_size, enum mve_rsrc_log_severity severity)
+{
+ mve_rsrc_log_drain_construct(&drain->base, print, data);
+
+ drain->buf = MVE_RSRC_MEM_VALLOC(buffer_size);
+ *(size_t *) &drain->buffer_size = buffer_size;
+ drain->read_pos = 0;
+ drain->write_pos = 0;
+ drain->write_error_pos = 0;
+ drain->severity = severity;
+ init_waitqueue_head(&drain->queue);
+ sema_init(&drain->sem, 1);
+}
+
+/**
+ * RAM drain destructor.
+ *
+ * @param drain Pointer to drain.
+ */
+static void mve_rsrc_log_drain_ram_destruct(struct mve_rsrc_log_drain_ram *drain)
+{
+ MVE_RSRC_MEM_VFREE(drain->buf);
+
+ mve_rsrc_log_drain_destruct(&drain->base);
+}
+
+#ifndef __KERNEL__
+static void mve_rsrc_log_drain_file_data(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, struct iovec *vec, size_t count)
+{
+ struct mve_rsrc_log_drain_file *drain_file = (struct mve_rsrc_log_drain_file *)drain;
+ size_t i;
+ size_t total_length = 0;
+ size_t padding;
+ uint8_t padding_data[4] = { 0 };
+
+ sem_wait(&drain_file->file_sem);
+
+ /* Loop over scatter input. */
+ for (i = 0; i < count; ++i)
+ {
+ /* Write log message to output file */
+ fwrite(vec[i].iov_base, 1, vec[i].iov_len, drain_file->fp);
+ total_length += vec[i].iov_len;
+ }
+
+ /* Calculate the amount of padding that must be added to the end of the entry */
+ padding = ((total_length + 3) & ~3) - total_length;
+ fwrite(padding_data, 1, padding, drain_file->fp);
+
+ /* Flush output to file. */
+ fflush(drain_file->fp);
+
+ sem_post(&drain_file->file_sem);
+}
+
+static void mve_rsrc_log_drain_file_print(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, const char *tag, const char *msg, const unsigned n_args, ...)
+{
+ char buf[1000];
+ va_list args;
+ size_t n = 0;
+ struct mve_log_header header;
+ struct iovec vec[2];
+ struct timespec timespec;
+
+ /* Write the log message. */
+ va_start(args, n_args);
+ n += vscnprintf(buf, sizeof(buf), msg, args);
+ va_end(args);
+
+ getnstimeofday(&timespec);
+
+ header.magic = MVE_LOG_MAGIC;
+ header.length = n;
+ header.type = MVE_LOG_TYPE_TEXT;
+ header.severity = severity;
+ header.timestamp.sec = timespec.tv_sec;
+ header.timestamp.nsec = timespec.tv_nsec;
+
+ vec[0].iov_base = &header;
+ vec[0].iov_len = sizeof(header);
+
+ vec[1].iov_base = buf;
+ vec[1].iov_len = n;
+
+ mve_rsrc_log_drain_file_data(drain, severity, vec, 2);
+}
+
+static void mve_rsrc_log_drain_file_construct(struct mve_rsrc_log_drain_file *drain, mve_rsrc_print_fptr print, mve_rsrc_data_fptr data, char *filename)
+{
+ mve_rsrc_log_drain_construct(&drain->base, print, data);
+
+ sem_init(&drain->file_sem, 0, 1);
+ drain->fp = fopen(filename, "ab");
+ if (NULL == drain->fp)
+ {
+ printk(KERN_ERR "MVE: Failed to open the file %s\n", filename);
+ }
+}
+
+static void mve_rsrc_log_drain_file_destruct(struct mve_rsrc_log_drain_file *drain)
+{
+ sem_wait(&drain->file_sem);
+ fclose(drain->fp);
+ sem_post(&drain->file_sem);
+ sem_destroy(&drain->file_sem);
+ mve_rsrc_log_drain_destruct(&drain->base);
+}
+#endif
+
+#ifdef MVE_LOG_ALOG_ENABLE
+/**
+ * Android log drain constructor.
+ *
+ * @param drain Pointer to drain.
+ * @param print Print function pointer.
+ * @param data Data function pointer.
+ */
+static void mve_rsrc_log_drain_alog_construct(struct mve_rsrc_log_drain_alog *drain, mve_rsrc_print_fptr print, mve_rsrc_data_fptr data)
+{
+ mve_rsrc_log_drain_construct(&drain->base, print, data);
+
+ drain->socket = NULL;
+ sema_init(&drain->sem, 1);
+}
+
+/**
+ * Android log drain destructor.
+ *
+ * @param drain Pointer to drain.
+ */
+static void mve_rsrc_log_drain_alog_destruct(struct mve_rsrc_log_drain_alog *drain)
+{
+ if (drain->socket != NULL)
+ {
+#ifndef EMULATOR
+ sock_release(drain->socket);
+#endif
+ drain->socket = NULL;
+ }
+
+ mve_rsrc_log_drain_destruct(&drain->base);
+}
+#endif /* MVE_LOG_ALOG_ENABLE */
+
+/**
+ * Log constructor.
+ *
+ * @param log Pointer to log.
+ */
+static void mve_rsrc_log_construct(struct mve_rsrc_log *log)
+{
+ static const struct file_operations readme_fops =
+ {
+ .read = readme_read
+ };
+ struct dentry *dentry;
+
+ log->mve_dir = debugfs_create_dir("mve", NULL);
+ if (IS_ERR_OR_NULL(log->mve_dir))
+ {
+ printk(KERN_ERR "MVE: Failed to create 'mve' dir.\n");
+ return;
+ }
+ log->mve_dir->d_inode->i_private = log;
+
+ log->drain_dir = debugfs_create_dir("drain", log->mve_dir);
+ if (IS_ERR_OR_NULL(log->drain_dir))
+ {
+ printk(KERN_ERR "MVE: Failed to create 'drain' dir.\n");
+ goto error;
+ }
+
+ log->group_dir = debugfs_create_dir("group", log->mve_dir);
+ if (IS_ERR_OR_NULL(log->group_dir))
+ {
+ printk(KERN_ERR "MVE: Failed to create 'group' dir.\n");
+ goto error;
+ }
+
+ /* Create <group>/drain. */
+ dentry = debugfs_create_file("README", 0400, log->mve_dir, NULL, &readme_fops);
+ if (IS_ERR_OR_NULL(dentry))
+ {
+ printk(KERN_ERR "MVE: Failed to create 'README'.\n");
+ return;
+ }
+
+ return;
+
+error:
+ debugfs_remove_recursive(log->mve_dir);
+}
+
+/**
+ * Log destructor.
+ *
+ * @param log Pointer to log.
+ */
+static void mve_rsrc_log_destruct(struct mve_rsrc_log *log)
+{
+ debugfs_remove_recursive(log->mve_dir);
+}
+
+/******************************************************************************
+ * External interface
+ ******************************************************************************/
+
+void mve_rsrc_log_init(void)
+{
+ struct mve_rsrc_log_drain *drain_default = &drain_dmesg;
+ struct mve_rsrc_log_drain *drain_ram = &drain_ram0.base;
+
+#ifdef MVE_LOG_ALOG_ENABLE
+ drain_default = &drain_alog.base;
+#endif /* MVE_LOG_ALOG_ENABLE */
+
+#ifdef MVE_LOG_FTRACE_ENABLE
+ drain_default = &drain_ftrace;
+#endif /* MVE_LOG_FTRACE_ENABLE */
+
+#ifndef __KERNEL__
+ drain_ram = &drain_file.base;
+#endif /* __KERNEL__ */
+
+ /* Construct log object. */
+ mve_rsrc_log_construct(&log);
+
+ /* Construct drain objects and add them to log. */
+ mve_rsrc_log_drain_construct(&drain_dmesg, mve_rsrc_log_drain_dmesg_print, mve_rsrc_log_drain_dmesg_data);
+ mve_rsrc_log_drain_add(&log, "dmesg", &drain_dmesg);
+
+ mve_rsrc_log_drain_ram_construct(&drain_ram0, mve_rsrc_log_drain_ram_print, mve_rsrc_log_drain_ram_data, 64 * 1024, MVE_LOG_ERROR);
+ mve_rsrc_log_drain_ram_add(&log, "ram0", &drain_ram0);
+
+#ifndef __KERNEL__
+ mve_rsrc_log_drain_file_construct(&drain_file, mve_rsrc_log_drain_file_print, mve_rsrc_log_drain_file_data, "fw.log");
+#endif /* __KERNEL__ */
+
+#ifdef MVE_LOG_ALOG_ENABLE
+ mve_rsrc_log_drain_alog_construct(&drain_alog, mve_rsrc_log_drain_alog_print, mve_rsrc_log_drain_alog_data);
+ mve_rsrc_log_drain_add(&log, "alog", &drain_alog.base);
+#endif /* MVE_LOG_ALOG_ENABLE */
+
+#ifdef MVE_LOG_FTRACE_ENABLE
+ mve_rsrc_log_drain_construct(&drain_ftrace, mve_rsrc_log_drain_ftrace_print, mve_rsrc_log_drain_ftrace_data);
+ mve_rsrc_log_drain_add(&log, "ftrace", &drain_ftrace);
+#endif /* MVE_LOG_FTRACE_ENABLE */
+
+ /* Construct group objects. */
+ mve_rsrc_log_group_construct(&mve_rsrc_log, "MVE", MVE_LOG_WARNING, drain_default);
+ mve_rsrc_log_group_add(&log, "generic", &mve_rsrc_log);
+
+ mve_rsrc_log_group_construct(&mve_rsrc_log_scheduler, "MVE scheduler", MVE_LOG_WARNING, drain_default);
+ mve_rsrc_log_group_add(&log, "scheduler", &mve_rsrc_log_scheduler);
+
+ mve_rsrc_log_group_construct(&mve_rsrc_log_fwif, "MVE fwif", MVE_LOG_INFO, drain_ram);
+ mve_rsrc_log_group_add(&log, "firmware_interface", &mve_rsrc_log_fwif);
+
+ mve_rsrc_log_group_construct(&mve_rsrc_log_session, "MVE session", MVE_LOG_WARNING, drain_default);
+ mve_rsrc_log_group_add(&log, "session", &mve_rsrc_log_session);
+}
+
+void mve_rsrc_log_destroy(void)
+{
+ /* Destroy objects in reverse order. */
+ mve_rsrc_log_destruct(&log);
+
+ mve_rsrc_log_group_destruct(&mve_rsrc_log);
+ mve_rsrc_log_group_destruct(&mve_rsrc_log_scheduler);
+ mve_rsrc_log_group_destruct(&mve_rsrc_log_fwif);
+ mve_rsrc_log_group_destruct(&mve_rsrc_log_session);
+
+ mve_rsrc_log_drain_destruct(&drain_dmesg);
+ mve_rsrc_log_drain_ram_destruct(&drain_ram0);
+
+#ifndef __KERNEL__
+ mve_rsrc_log_drain_file_destruct(&drain_file);
+#endif /* __KERNEL__ */
+
+#ifdef MVE_LOG_ALOG_ENABLE
+ mve_rsrc_log_drain_alog_destruct(&drain_alog);
+#endif /* MVE_LOG_ALOG_ENABLE */
+
+#ifdef MVE_LOG_FTRACE_ENABLE
+ mve_rsrc_log_drain_destruct(&drain_ftrace);
+#endif /* MVE_LOG_FTRACE_ENABLE */
+}
+
+const char *mve_rsrc_log_strrchr(const char *s)
+{
+ const char *p = strrchr(s, '/');
+
+ return (p == NULL) ? s : p + 1;
+}
+
+struct dentry *mve_rsrc_log_get_parent_dir(void)
+{
+ return log.mve_dir;
+}
+
+EXPORT_SYMBOL(mve_rsrc_log_strrchr);
+EXPORT_SYMBOL(mve_rsrc_log_get_parent_dir);
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_log.h b/drivers/video/arm/v5xx/resource/mve_rsrc_log.h
new file mode 100644
index 000000000000..a04b9aa0414e
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_log.h
@@ -0,0 +1,299 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_LOG_H
+#define MVE_RSRC_LOG_H
+
+/******************************************************************************
+ * Includes
+ ******************************************************************************/
+
+#ifndef __KERNEL__
+#include "emulator_userspace.h"
+#include <sys/uio.h>
+#else
+#include <linux/net.h>
+#include <linux/semaphore.h>
+#include <linux/types.h>
+#include <linux/uio.h>
+#include <linux/wait.h>
+#endif
+
+/******************************************************************************
+ * Defines
+ ******************************************************************************/
+
+/**
+ * Print a log message.
+ *
+ * @param _lg Pointer to log group.
+ * @param _severity Severity.
+ * @param _fmt Format string.
+ */
+#define MVE_LOG_PRINT(_lg, _severity, _fmt, ...) \
+ if ((_severity) <= (_lg)->severity) \
+ { \
+ __MVE_LOG_PRINT(_lg, _severity, _fmt,##__VA_ARGS__); \
+ }
+
+/**
+ * Print a log message only if it's a debug build.
+ *
+ * @param _severity Severity.
+ * @param _fmt Format string.
+ */
+#if defined(_DEBUG)
+#define MVE_LOG_DEBUG_PRINT(_lg, _fmt, ...) \
+ MVE_LOG_PRINT(_lg, MVE_LOG_DEBUG, _fmt,##__VA_ARGS__)
+#else
+#define MVE_LOG_DEBUG_PRINT(_lg, _fmt, ...)
+#endif
+
+/**
+ * Print a log message for a session.
+ *
+ * @param _lg Pointer to log group.
+ * @param _severity Severity.
+ * @param _session Pointer to session.
+ * @param _fmt Format string.
+ */
+#define MVE_LOG_PRINT_SESSION(_lg, _severity, _session, _fmt, ...) \
+ if ((_severity) <= (_lg)->severity) \
+ { \
+ __MVE_LOG_PRINT(_lg, _severity, "%p " _fmt, _session, \
+ ##__VA_ARGS__); \
+ }
+
+/**
+ * Print a log message only if it's a debug build.
+ *
+ * @param _severity Severity.
+ * @param _fmt Format string.
+ */
+#if defined(_DEBUG)
+#define MVE_LOG_DEBUG_PRINT_SESSION(_lg, _session, _fmt, ...) \
+ MVE_LOG_PRINT_SESSION(_lg, MVE_LOG_DEBUG, _session, _fmt,##__VA_ARGS__)
+#else
+#define MVE_LOG_DEBUG_PRINT_SESSION(_lg, _session, _fmt, ...)
+#endif
+
+/**
+ * Print binary data.
+ *
+ * @param _lg Pointer to log group.
+ * @param _severity Severity.
+ * @param _vec Scatter input vector data.
+ * @param _size _vec array size.
+ */
+#define MVE_LOG_DATA(_lg, _severity, _vec, _count) \
+ if ((_severity) <= (_lg)->severity) \
+ { \
+ (_lg)->drain->data((_lg)->drain, _severity, _vec, _count); \
+ }
+
+/**
+ * Check if severity level for log group is enabled.
+ *
+ * @param _lg Pointer to log group.
+ * @param _severity Severity.
+ */
+#define MVE_LOG_ENABLED(_lg, _severity) \
+ ((_severity) <= (_lg)->severity)
+
+/**
+ * Execute function if log group is enabled.
+ *
+ * @param _lg Pointer to log group.
+ * @param _severity Severity.
+ * @param _vec Scatter input vector data.
+ * @param _size _vec array size.
+ */
+#define MVE_LOG_EXECUTE(_lg, _severity, _exec) \
+ if (MVE_LOG_ENABLED(_lg, _severity)) \
+ { \
+ _exec; \
+ }
+
+#ifdef MVE_LOG_PRINT_FILE_ENABLE
+#define __MVE_LOG_PRINT(_lg, _severity, _fmt, ...) \
+ (_lg)->drain->print((_lg)->drain, _severity, (_lg)->tag, \
+ _fmt " (%s:%d)", \
+ __MVE_LOG_N_ARGS(__VA_ARGS__), \
+ ##__VA_ARGS__, \
+ mve_rsrc_log_strrchr(__FILE__), __LINE__)
+#else
+#define __MVE_LOG_PRINT(_lg, _severity, _fmt, ...) \
+ (_lg)->drain->print((_lg)->drain, _severity, (_lg)->tag, _fmt, \
+ __MVE_LOG_N_ARGS(__VA_ARGS__), \
+ ##__VA_ARGS__)
+#endif /* MVE_LOG_PRINT_FILE_ENABLE */
+
+#define __MVE_LOG_N_ARGS(...) \
+ __MVE_LOG_COUNT(dummy,##__VA_ARGS__, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define __MVE_LOG_COUNT(_0, _1, _2, _3, _4, _5, _6, _7, _8, N, ...) N
+
+/******************************************************************************
+ * Types
+ ******************************************************************************/
+
+/**
+ * Severity levels.
+ */
+enum mve_rsrc_log_severity
+{
+ MVE_LOG_PANIC,
+ MVE_LOG_ERROR,
+ MVE_LOG_WARNING,
+ MVE_LOG_INFO,
+ MVE_LOG_DEBUG,
+ MVE_LOG_VERBOSE,
+ MVE_LOG_MAX
+};
+
+struct mve_rsrc_log_drain;
+
+/**
+ * Function pointer to output text messages.
+ *
+ * @param drain Pointer to drain.
+ * @param severity Severity level.
+ * @param tag Log group tag.
+ * @param fmt Format string.
+ * @param n_args Number of arguments to format string.
+ */
+typedef void (*mve_rsrc_print_fptr)(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, const char *tag, const char *fmt, const unsigned n_args, ...);
+
+/**
+ * Function pointer to output binary data.
+ *
+ * @param drain Pointer to drain.
+ * @param severity Severity level.
+ * @param tag Log group tag.
+ * @param data Pointer to data.
+ * @param length Length to data.
+ */
+typedef void (*mve_rsrc_data_fptr)(struct mve_rsrc_log_drain *drain, enum mve_rsrc_log_severity severity, struct iovec *vec, size_t count);
+
+/**
+ * Structure with information about the drain. The drain handles the formatting
+ * and redirection of the log messages.
+ */
+struct mve_rsrc_log_drain
+{
+ mve_rsrc_print_fptr print; /**< Print function pointer. */
+ mve_rsrc_data_fptr data; /**< Data function pointer. */
+
+ struct dentry *dentry; /**< Debugfs dentry. */
+};
+
+/**
+ * Structure describing a specialized RAM drain.
+ */
+struct mve_rsrc_log_drain_ram
+{
+ struct mve_rsrc_log_drain base; /**< Base class. */
+
+ char *buf; /**< Pointer to output buffer. */
+ const size_t buffer_size; /**< Size of the buffer. Must be power of 2. */
+ size_t read_pos; /**< Read position when a new file handle is opened. Is updated when the buffer is cleared. */
+ size_t write_pos; /**< Current write position in RAM buffer. */
+ size_t write_error_pos; /**< Current write position of last error in RAM buffer. */
+ enum mve_rsrc_log_severity severity; /**< Severity required to flush RAM buffer on error. */
+ wait_queue_head_t queue; /**< Wait queue for blocking IO. */
+ struct semaphore sem; /**< Semaphore to prevent concurrent writes. */
+};
+
+#ifndef __KERNEL__
+/**
+ * Structure describing a file drain.
+ */
+struct mve_rsrc_log_drain_file
+{
+ struct mve_rsrc_log_drain base;
+
+ sem_t file_sem;
+ FILE *fp;
+};
+#endif
+
+/**
+ * Structure describing Android log drain.
+ */
+struct mve_rsrc_log_drain_alog
+{
+ struct mve_rsrc_log_drain base; /**< Base class. */
+ struct socket *socket; /**< Socket to Android log daemon. */
+ struct semaphore sem;
+};
+
+/**
+ * Structure describing log group. The log group filters which log messages that
+ * shall be forwarded to the drain.
+ */
+struct mve_rsrc_log_group
+{
+ const char *tag; /**< Name of log group. */
+ enum mve_rsrc_log_severity severity; /**< Severity level. */
+ struct mve_rsrc_log_drain *drain; /**< Drain. */
+
+ struct dentry *dentry; /**< Debugfs dentry. */
+};
+
+/**
+ * Log class that keeps track of registered groups and drains.
+ */
+struct mve_rsrc_log
+{
+ struct dentry *mve_dir;
+ struct dentry *drain_dir;
+ struct dentry *group_dir;
+};
+
+/******************************************************************************
+ * Prototypes
+ ******************************************************************************/
+
+extern struct mve_rsrc_log_group mve_rsrc_log;
+extern struct mve_rsrc_log_group mve_rsrc_log_scheduler;
+extern struct mve_rsrc_log_group mve_rsrc_log_fwif;
+extern struct mve_rsrc_log_group mve_rsrc_log_session;
+
+/**
+ * Initialize log module. This function must be called before any of the log
+ * groups is used.
+ */
+void mve_rsrc_log_init(void);
+
+/**
+ * Destroy log module.
+ */
+void mve_rsrc_log_destroy(void);
+
+/**
+ * Find last occurrence of '/' in string.
+ *
+ * @param s Pointer to string.
+ * @return Pointer to '/'+1, or pointer to begin of string.
+ */
+const char *mve_rsrc_log_strrchr(const char *s);
+
+/**
+ * Returns the dentry for the parent directory. You can use the return value
+ * of this function if you want to add debug fs files from another module to
+ * the same directory as the log files (e.g. /sys/kernel/debug/mve).
+ */
+struct dentry *mve_rsrc_log_get_parent_dir(void);
+
+#endif /* MVE_RSRC_LOG_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_log_ram.h b/drivers/video/arm/v5xx/resource/mve_rsrc_log_ram.h
new file mode 100644
index 000000000000..c06d2e1a9fa7
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_log_ram.h
@@ -0,0 +1,185 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_LOG_RAM_H
+#define MVE_RSRC_LOG_RAM_H
+
+/******************************************************************************
+ * Includes
+ ******************************************************************************/
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#include <time.h>
+#include <sys/time.h>
+#else
+#include <linux/types.h>
+#include <linux/time.h>
+#endif
+
+/******************************************************************************
+ * Defines
+ ******************************************************************************/
+
+/**
+ * Magic word "MVEL" that prefix all messages.
+ *
+ * Messages are stored in native byte order. The magic word can be used to
+ * detect if the log has been stored in the same byte order as the application
+ * unpacking the log is using.
+ */
+#define MVE_LOG_MAGIC 0x4d56454c
+
+/**
+ * The maximum message length.
+ */
+#define MVE_LOG_MESSAGE_LENGTH_MAX 4096
+
+/******************************************************************************
+ * Types
+ ******************************************************************************/
+
+/**
+ * IOCTL commands.
+ */
+enum mve_log_ioctl
+{
+ MVE_LOG_IOCTL_CLEAR /**< Clear the log. */
+};
+
+/**
+ * Message type. The definitions are assigned values that are not allowed to change.
+ */
+enum mve_log_type
+{
+ MVE_LOG_TYPE_TEXT = 0,
+ MVE_LOG_TYPE_FWIF = 1,
+ MVE_LOG_TYPE_FW_BINARY = 2,
+ MVE_LOG_TYPE_MAX
+};
+
+/**
+ * Portable time value format.
+ */
+struct mve_log_timeval
+{
+ uint64_t sec; /**< Seconds since 1970-01-01, Unix time epoch. */
+ uint64_t nsec; /**< Nano seconds. */
+}
+__attribute__((packed));
+
+/**
+ * Common header for all messages stored in RAM buffer.
+ */
+struct mve_log_header
+{
+ uint32_t magic; /**< Magic word. */
+ uint16_t length; /**< Length of message, excluding this header. */
+ uint8_t type; /**< Message type. */
+ uint8_t severity; /**< Message severity. */
+ struct mve_log_timeval timestamp; /**< Time stamp. */
+}
+__attribute__((packed));
+
+/******************************************************************************
+ * Text message
+ ******************************************************************************/
+
+/**
+ * ASCII text message.
+ *
+ * The message shall be header.length long and should end with a standard ASCII
+ * character. The parser of the log will add new line and null terminate
+ * the string.
+ */
+struct mve_log_text
+{
+ char message[0]; /**< ASCII text message. */
+}
+__attribute__((packed));
+
+/******************************************************************************
+ * Firmware interface
+ ******************************************************************************/
+
+/**
+ * Firmware interface message types.
+ */
+enum mve_log_fwif_channel
+{
+ MVE_LOG_FWIF_CHANNEL_MESSAGE,
+ MVE_LOG_FWIF_CHANNEL_INPUT_BUFFER,
+ MVE_LOG_FWIF_CHANNEL_OUTPUT_BUFFER,
+ MVE_LOG_FWIF_CHANNEL_RPC
+};
+
+/**
+ * Firmware interface message types.
+ */
+enum mve_log_fwif_direction
+{
+ MVE_LOG_FWIF_DIRECTION_HOST_TO_FIRMWARE,
+ MVE_LOG_FWIF_DIRECTION_FIRMWARE_TO_HOST
+};
+
+/**
+ * Special message codes for message types not defined by the firmware interface.
+ */
+enum mve_log_fwif_code
+{
+ MVE_LOG_FWIF_CODE_STAT = 16000
+};
+
+/**
+ * Firmware interface header type.
+ */
+struct mve_log_fwif
+{
+ uint8_t version_minor; /**< Protocol version. */
+ uint8_t version_major; /**< Protocol version. */
+ uint8_t channel; /**< @see enum mve_log_fwif_channel. */
+ uint8_t direction; /**< @see enum mve_log_fwif_direction. */
+ uint64_t session; /**< Session id. */
+ uint8_t data[0]; /**< Data following the firmware interface message header. */
+}
+__attribute__((packed));
+
+/**
+ * Firmware interface statistics.
+ */
+struct mve_log_fwif_stat
+{
+ uint64_t handle; /**< Buffer handle. */
+ uint32_t queued; /**< Number of buffers currently queued to the firmware. */
+}
+__attribute__((packed));
+
+/******************************************************************************
+ * Firmware binary header
+ ******************************************************************************/
+
+/**
+ * Firmware binary header.
+ *
+ * The first ~100 bytes of the firmware binary contain information describing
+ * the codec.
+ */
+struct mve_log_fw_binary
+{
+ uint32_t length; /**< Number of bytes copied from the firmware binary. */
+ uint64_t session; /**< Session id. */
+ uint8_t data[0]; /**< Firmware binary, byte 0..length. */
+};
+
+#endif /* MVE_RSRC_LOG_RAM_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_mem_backend.c b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_backend.c
new file mode 100644
index 000000000000..ce1d688f5225
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_backend.c
@@ -0,0 +1,292 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "mve_rsrc_mem_backend.h"
+
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <linux/export.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+
+#include "mve_rsrc_driver.h"
+#include "mve_rsrc_log.h"
+
+#ifdef CONFIG_64BIT
+#define PAGE_MASK_INT 0xFFFFFFFFFFFFF000ULL
+#else
+#define PAGE_MASK_INT 0xFFFFF000
+#endif
+
+phys_addr_t mve_rsrc_mem_alloc_page(void)
+{
+ struct page *new_page;
+ dma_addr_t dma_handle;
+
+ /* Allocate a page that has a kernel logical address (low memory). These
+ * pages always have a virtual address and doesn't need to be mapped */
+ new_page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY | __GFP_COLD);
+ if (NULL == new_page)
+ {
+ goto error;
+ }
+
+ /* dma_map_page ensures that any data held in the cache is discarded or written back */
+ dma_handle = dma_map_page(&mve_rsrc_data.pdev->dev, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&mve_rsrc_data.pdev->dev, dma_handle))
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "mve_rsrc_mem_alloc_page: dma_map_page failed.");
+ goto error;
+ }
+ return (phys_addr_t)dma_handle;
+
+error:
+ if (NULL != new_page)
+ {
+ __free_page(new_page);
+ }
+
+ return 0;
+}
+
+void mve_rsrc_mem_free_page(phys_addr_t addr)
+{
+ struct page *unmap_page;
+ unmap_page = pfn_to_page(addr >> PAGE_SHIFT);
+ dma_unmap_page(&mve_rsrc_data.pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(unmap_page);
+}
+
+void mve_rsrc_mem_free_pages(phys_addr_t *addrs, uint32_t num_pages)
+{
+ int i;
+
+ if (NULL == addrs || 0 == num_pages)
+ {
+ return;
+ }
+
+ for (i = num_pages - 1; i >= 0; i--)
+ {
+ mve_rsrc_mem_free_page(addrs[i]);
+ }
+
+ vfree(addrs);
+}
+
+void *mve_rsrc_mem_cpu_map_page(phys_addr_t addr)
+{
+ phys_addr_t page_off = addr & ~PAGE_MASK_INT;
+ struct page *p = pfn_to_page(addr >> PAGE_SHIFT);
+ void *page_ptr = kmap(p);
+
+ return (((uint8_t *)page_ptr) + page_off);
+}
+
+void mve_rsrc_mem_cpu_unmap_page(phys_addr_t addr)
+{
+ struct page *p = pfn_to_page(addr >> PAGE_SHIFT);
+ kunmap(p);
+}
+
+phys_addr_t *mve_rsrc_mem_alloc_pages(uint32_t nr_pages)
+{
+ int i;
+ phys_addr_t *pages;
+
+ if (0 == nr_pages)
+ {
+ return NULL;
+ }
+
+ pages = vmalloc(sizeof(phys_addr_t) * nr_pages);
+ if (NULL == pages)
+ {
+ return NULL;
+ }
+
+ for (i = 0; i < nr_pages; i++)
+ {
+ pages[i] = mve_rsrc_mem_alloc_page();
+
+ if (0 == pages[i])
+ {
+ /* Page allocation failed. Free all allocated pages and return NULL */
+ mve_rsrc_mem_free_pages(pages, i);
+ return NULL;
+ }
+ }
+
+ return pages;
+}
+
+phys_addr_t *mve_rsrc_mem_map_virt_to_phys(void *ptr, uint32_t size, uint32_t write)
+{
+ struct page **phys_pages = NULL;
+ phys_addr_t *pages = NULL;
+ int i;
+ int get_npages = 0;
+ uint32_t nr_pages;
+ uintptr_t virt_addr;
+
+ if (NULL == ptr || 0 == size)
+ {
+ return NULL;
+ }
+
+ nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ pages = vmalloc(sizeof(phys_addr_t) * nr_pages);
+ if (NULL == pages)
+ {
+ goto error;
+ }
+
+ virt_addr = (uintptr_t)ptr;
+ if (0 != virt_addr - (virt_addr & PAGE_MASK_INT))
+ {
+ goto error;
+ }
+
+ phys_pages = vmalloc(nr_pages * sizeof(struct page *));
+ if (NULL == phys_pages)
+ {
+ goto error;
+ }
+
+ down_read(&current->mm->mmap_sem);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ get_npages = get_user_pages(virt_addr, nr_pages, write != 0 ? FOLL_WRITE : FOLL_GET, phys_pages, NULL);
+#else
+ get_npages = get_user_pages(current, current->mm, virt_addr, nr_pages, write, 0, phys_pages, NULL);
+#endif
+ up_read(&current->mm->mmap_sem);
+
+ if (get_npages != nr_pages)
+ {
+ goto error;
+ }
+
+ for (i = 0; i < nr_pages; ++i)
+ {
+ /* Flush cache and retreive physical address */
+ dma_addr_t dma_handle;
+ dma_handle = dma_map_page(&mve_rsrc_data.pdev->dev, phys_pages[i], 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&mve_rsrc_data.pdev->dev, dma_handle))
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "mve_rsrc_mem_map_virt_to_phys: dma_map_page failed.");
+ dma_handle = (dma_addr_t)NULL;
+ }
+ pages[i] = (phys_addr_t)dma_handle;
+ }
+
+ vfree(phys_pages);
+ return pages;
+
+error:
+ for (i = get_npages - 1; i >= 0; --i)
+ {
+ put_page(phys_pages[i]);
+ }
+ if (NULL != phys_pages)
+ {
+ vfree(phys_pages);
+ }
+ if (NULL != pages)
+ {
+ vfree(pages);
+ }
+
+ return NULL;
+}
+
+void mve_rsrc_mem_unmap_virt_to_phys(phys_addr_t *pages, uint32_t nr_pages)
+{
+ int i;
+
+ if (NULL == pages || 0 == nr_pages)
+ {
+ return;
+ }
+
+ for (i = nr_pages - 1; i >= 0; --i)
+ {
+ struct page *p = pfn_to_page(pages[i] >> PAGE_SHIFT);
+
+ dma_unmap_page(&mve_rsrc_data.pdev->dev, pages[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ set_page_dirty_lock(p);
+ put_page(p);
+ }
+
+ vfree(pages);
+}
+
+uint32_t mve_rsrc_mem_read32(phys_addr_t addr)
+{
+ phys_addr_t offset = addr & ~PAGE_MASK_INT;
+ struct page *p = pfn_to_page(addr >> PAGE_SHIFT);
+ void *ptr = kmap(p);
+ uint32_t ret;
+
+ ret = *((uint32_t *)(((uint8_t *)ptr) + offset));
+ kunmap(p);
+
+ return ret;
+}
+
+void mve_rsrc_mem_write32(phys_addr_t addr, uint32_t value)
+{
+ phys_addr_t offset = addr & ~PAGE_MASK_INT;
+ struct page *p = pfn_to_page(addr >> PAGE_SHIFT);
+ void *ptr = kmap(p);
+
+ *((uint32_t *)(((uint8_t *)ptr) + offset)) = value;
+ kunmap(p);
+}
+
+void mve_rsrc_mem_clean_cache_range(phys_addr_t addr, uint32_t size)
+{
+ dma_sync_single_for_device(&mve_rsrc_data.pdev->dev, addr, size, DMA_TO_DEVICE);
+}
+
+void mve_rsrc_mem_invalidate_cache_range(phys_addr_t addr, uint32_t size)
+{
+ dma_sync_single_for_device(&mve_rsrc_data.pdev->dev, addr, size, DMA_FROM_DEVICE);
+}
+
+void mve_rsrc_mem_flush_write_buffer(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ dsb(sy);
+#else
+ dsb();
+#endif
+}
+
+EXPORT_SYMBOL(mve_rsrc_mem_alloc_page);
+EXPORT_SYMBOL(mve_rsrc_mem_free_page);
+EXPORT_SYMBOL(mve_rsrc_mem_free_pages);
+EXPORT_SYMBOL(mve_rsrc_mem_cpu_map_page);
+EXPORT_SYMBOL(mve_rsrc_mem_cpu_unmap_page);
+EXPORT_SYMBOL(mve_rsrc_mem_alloc_pages);
+EXPORT_SYMBOL(mve_rsrc_mem_map_virt_to_phys);
+EXPORT_SYMBOL(mve_rsrc_mem_unmap_virt_to_phys);
+EXPORT_SYMBOL(mve_rsrc_mem_read32);
+EXPORT_SYMBOL(mve_rsrc_mem_write32);
+EXPORT_SYMBOL(mve_rsrc_mem_clean_cache_range);
+EXPORT_SYMBOL(mve_rsrc_mem_invalidate_cache_range);
+EXPORT_SYMBOL(mve_rsrc_mem_flush_write_buffer);
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_mem_backend.h b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_backend.h
new file mode 100644
index 000000000000..c679aff82cd4
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_backend.h
@@ -0,0 +1,134 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_MEM_BACKEND_H
+#define MVE_RSRC_MEM_BACKEND_H
+
+#ifndef __KERNEL__
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#endif
+
+/**
+ * Allocate one page of physical memory.
+ * @return Physical address of the allocated page.
+ */
+phys_addr_t mve_rsrc_mem_alloc_page(void);
+
+/**
+ * Free a physical page.
+ * @param addr Address of the page that is to be freed.
+ */
+void mve_rsrc_mem_free_page(phys_addr_t addr);
+
+/**
+ * Convenience function for freeing an array of pages. The supplied array will
+ * also be freed with a call to kfree. Internally calls mve_rsrc_mem_free_page on
+ * each page.
+ * @param addrs Array of pages to free.
+ * @param num_pages Number of pages to free.
+ */
+void mve_rsrc_mem_free_pages(phys_addr_t *addrs, uint32_t num_pages);
+
+/**
+ * Map a page into the virtual address space of the calling process. This function
+ * returns a virtual address that can be used to access the contents stored
+ * at the supplied physical address. Note that this function handles non page
+ * aligned addresses. The client must free the mapping using
+ * mve_rsrc_mem_cpu_unmap_page when the mapping is no longer needed.
+ * @param addr Physical address to map.
+ * @return CPU virtual address.
+ */
+void *mve_rsrc_mem_cpu_map_page(phys_addr_t addr);
+
+/**
+ * Unmap a page previously mapped with mve_rsrc_mem_cpu_map_page.
+ * @param addr Physical address of the page to unmap.
+ */
+void mve_rsrc_mem_cpu_unmap_page(phys_addr_t addr);
+
+/**
+ * Pins and returns an array of physical pages corresponding to a region in
+ * virtual CPU address space. The client must unpin the pages and free the
+ * returned array by calling mve_rsrc_mem_unmap_virt_to_phys when the pages are
+ * no longer needed.
+ * @param ptr Virtual address of the memory region.
+ * @param size Size of the memory region.
+ * @param write Map pages writable or read-only
+ * @return Array of physical addresses of the pages. Returns NULL if the virtual
+ * address is NULL, not page aligned or size is 0. The client must free
+ * the returned array to prevent memory leaks.
+ */
+phys_addr_t *mve_rsrc_mem_map_virt_to_phys(void *ptr, uint32_t size, uint32_t write);
+
+/**
+ * Unpins the pages and releases the array containing physical addresses of the
+ * pages backing a memory region pointed out by a virtual CPU address.
+ * @param pages Pages corresponding to a region defined by a virtual CPU address.
+ * @param nr_pages Number of pages
+ */
+void mve_rsrc_mem_unmap_virt_to_phys(phys_addr_t *pages, uint32_t nr_pages);
+
+/**
+ * Allocates the specified number of pages and returns an array of physical
+ * address. Note that the client must free the returned array using
+ * kfree to prevent memory leaks. This is just a convenience function since it
+ * calls mve_mem_alloc_page internally.
+ * @param nr_pages Number of pages to allocate.
+ * @return An array containing physical addresses of the allocated pages. Returns
+ * NULL on failure or if nr_pages equals 0.
+ */
+phys_addr_t *mve_rsrc_mem_alloc_pages(uint32_t nr_pages);
+
+/**
+ * Read 32-bits of data from the specified physical address.
+ * @param addr Physical address of the memory location to read.
+ * @return 32-bits of data read.
+ */
+uint32_t mve_rsrc_mem_read32(phys_addr_t addr);
+
+/**
+ * Write 32-bits of data to the specified physical address.
+ * @param addr Physical address of the memory location to write to.
+ * @param value The value to write to memory.
+ */
+void mve_rsrc_mem_write32(phys_addr_t addr, uint32_t value);
+
+/**
+ * Clean the memory range specified by addr and size from the CPU cache. The cache
+ * lines will be written to memory if they are marked as dirty. The lines will then
+ * be marked as not dirty. The lines will not be evicted from the cache which means
+ * that a subsequent memory access will hit the cache.
+ * @param addr Physical address of the first byte to clean from the cache.
+ * @param size Size of the block to clean.
+ */
+void mve_rsrc_mem_clean_cache_range(phys_addr_t addr, uint32_t size);
+
+/**
+ * Invalidate the memory range specified by addr and size in the CPU cache. The
+ * cached lines will be marked as non-valid even if they are marked as dirty. No
+ * lines will be written to memory. A cache flush can be implemented as a clean
+ * followed by an invalidate.
+ * @param addr Physical address of the first byte to invalidate in the cache.
+ * @param size Size of the block to invalidate.
+ */
+void mve_rsrc_mem_invalidate_cache_range(phys_addr_t addr, uint32_t size);
+
+/**
+ * Flush the write buffer.
+ */
+void mve_rsrc_mem_flush_write_buffer(void);
+
+#endif /* MVE_RSRC_MEM_BACKEND_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_mem_cache.c b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_cache.c
new file mode 100644
index 000000000000..c8a497da2c68
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_cache.c
@@ -0,0 +1,132 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#endif
+
+#include "mve_rsrc_mem_frontend.h"
+#include "mve_rsrc_mem_cache.h"
+#include "mve_rsrc_log.h"
+
+#define MAX_MEMORY_CACHE_SMALL 32
+#define MAX_MEMORY_CACHE_MEDIUM 128
+#define MAX_MEMORY_CACHE_LARGE 1024
+
+static struct kmem_cache *mve_small_cachep;
+static struct kmem_cache *mve_medium_cachep;
+static struct kmem_cache *mve_large_cachep;
+
+void mve_rsrc_mem_cache_init(void)
+{
+ mve_small_cachep = kmem_cache_create("mve_small_cache",
+ MAX_MEMORY_CACHE_SMALL,
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ mve_medium_cachep = kmem_cache_create("mve_medium_cache",
+ MAX_MEMORY_CACHE_MEDIUM,
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ mve_large_cachep = kmem_cache_create("mve_large_cache",
+ MAX_MEMORY_CACHE_LARGE,
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+}
+
+void mve_rsrc_mem_cache_deinit(void)
+{
+ if (NULL != mve_small_cachep)
+ {
+ kmem_cache_destroy(mve_small_cachep);
+ }
+ if (NULL != mve_medium_cachep)
+ {
+ kmem_cache_destroy(mve_medium_cachep);
+ }
+ if (NULL != mve_large_cachep)
+ {
+ kmem_cache_destroy(mve_large_cachep);
+ }
+}
+
+static struct kmem_cache *mve_rsrc_get_cachep(uint32_t size)
+{
+ struct kmem_cache *cachep = NULL;
+
+ if (size <= MAX_MEMORY_CACHE_SMALL)
+ {
+ cachep = mve_small_cachep;
+ }
+ else if (size <= MAX_MEMORY_CACHE_MEDIUM)
+ {
+ cachep = mve_medium_cachep;
+ }
+ else if (size <= MAX_MEMORY_CACHE_LARGE)
+ {
+ cachep = mve_large_cachep;
+ }
+ else
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "No allocation slab found. size=%u.", size);
+ }
+
+ return cachep;
+}
+
+void *mve_rsrc_mem_cache_alloc(uint32_t size, uint32_t flags)
+{
+ void *ptr;
+ struct kmem_cache *cachep;
+
+ cachep = mve_rsrc_get_cachep(size);
+
+ if (NULL == cachep)
+ {
+ ptr = vmalloc(size);
+ }
+ else
+ {
+ ptr = kmem_cache_alloc(cachep, flags);
+ if (ptr)
+ {
+ memset(ptr, 0, size);
+ }
+ }
+ return ptr;
+}
+
+void mve_rsrc_mem_cache_free(void *ptr, uint32_t size)
+{
+ struct kmem_cache *cachep;
+
+ cachep = mve_rsrc_get_cachep(size);
+
+ if (NULL == cachep)
+ {
+ vfree(ptr);
+ }
+ else
+ {
+ kmem_cache_free(cachep, ptr);
+ }
+}
+EXPORT_SYMBOL(mve_rsrc_mem_cache_alloc);
+EXPORT_SYMBOL(mve_rsrc_mem_cache_free);
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_mem_cache.h b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_cache.h
new file mode 100644
index 000000000000..28c649451266
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_cache.h
@@ -0,0 +1,47 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_MEM_CACHE_H
+#define MVE_RSRC_MEM_CACHE_H
+
+#ifndef __KERNEL__
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#endif
+
+/**
+ * Cache memory initialization.
+ */
+void mve_rsrc_mem_cache_init(void);
+
+/**
+ * Cache memory de-initialization.
+ */
+void mve_rsrc_mem_cache_deinit(void);
+/**
+ * Allocated the memory either from cache or kalloc.
+ * @param size The requested allocation size.
+ * @param flags Allocation flags.
+ */
+void *mve_rsrc_mem_cache_alloc(uint32_t size, uint32_t flags);
+
+/**
+ * Free the memory either from cache or kalloc.
+ * @param size The requested allocation size.
+ * @param ptr the memory pointer to be freed.
+ */
+void mve_rsrc_mem_cache_free(void *ptr, uint32_t size);
+
+#endif /* MVE_RSRC_MEM_CACHE_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma.c b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma.c
new file mode 100644
index 000000000000..2d5561f2fc98
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma.c
@@ -0,0 +1,130 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#endif
+
+#include "mve_rsrc_mem_dma.h"
+#include "mve_rsrc_mem_dma_uncached.h"
+
+/**
+ * Function pointers to a DMA memory implementation
+ */
+struct dma_mem_fptr
+{
+ struct mve_rsrc_dma_mem_t *(*alloc)(uint32_t size);
+ void (*free)(struct mve_rsrc_dma_mem_t *mem);
+ void (*clean_cache)(struct mve_rsrc_dma_mem_t *mem);
+ void (*invalidate_cache)(struct mve_rsrc_dma_mem_t *mem);
+ void *(*map)(struct mve_rsrc_dma_mem_t *mem);
+ void (*unmap)(struct mve_rsrc_dma_mem_t *mem);
+ phys_addr_t *(*get_pages)(struct mve_rsrc_dma_mem_t *mem);
+};
+
+static struct dma_mem_fptr fptrs[] =
+{
+ {
+ mve_rsrc_dma_mem_alloc_uncached,
+ mve_rsrc_dma_mem_free_uncached,
+ mve_rsrc_dma_mem_clean_cache_uncached,
+ mve_rsrc_dma_mem_invalidate_cache_uncached,
+ mve_rsrc_dma_mem_map_uncached,
+ mve_rsrc_dma_mem_unmap_unchached,
+ mve_rsrc_dma_mem_get_pages_uncached,
+ }
+};
+
+struct mve_rsrc_dma_mem_t *mve_rsrc_dma_mem_alloc(uint32_t size, enum mve_rsrc_dma_mem_type type)
+{
+ if (0 == size)
+ {
+ return NULL;
+ }
+
+ if (type >= DMA_MEM_TYPE_MAX)
+ {
+ return NULL;
+ }
+
+ return fptrs[type].alloc(size);
+}
+
+void mve_rsrc_dma_mem_free(struct mve_rsrc_dma_mem_t *mem)
+{
+ if (NULL != mem)
+ {
+ fptrs[mem->type].free(mem);
+ }
+}
+
+void mve_rsrc_dma_mem_clean_cache(struct mve_rsrc_dma_mem_t *mem)
+{
+ if (NULL != mem)
+ {
+ fptrs[mem->type].clean_cache(mem);
+ }
+}
+
+void mve_rsrc_dma_mem_invalidate_cache(struct mve_rsrc_dma_mem_t *mem)
+{
+ if (NULL != mem)
+ {
+ fptrs[mem->type].invalidate_cache(mem);
+ }
+}
+
+void *mve_rsrc_dma_mem_map(struct mve_rsrc_dma_mem_t *mem)
+{
+ void *ret = NULL;
+
+ if (NULL != mem)
+ {
+ ret = fptrs[mem->type].map(mem);
+ }
+
+ return ret;
+}
+
+void mve_rsrc_dma_mem_unmap(struct mve_rsrc_dma_mem_t *mem)
+{
+ if (NULL != mem)
+ {
+ fptrs[mem->type].unmap(mem);
+ }
+}
+
+phys_addr_t *mve_rsrc_dma_mem_get_pages(struct mve_rsrc_dma_mem_t *mem)
+{
+ phys_addr_t *ret = NULL;
+
+ if (NULL != mem)
+ {
+ ret = fptrs[mem->type].get_pages(mem);
+ }
+
+ return ret;
+}
+
+EXPORT_SYMBOL(mve_rsrc_dma_mem_alloc);
+EXPORT_SYMBOL(mve_rsrc_dma_mem_free);
+EXPORT_SYMBOL(mve_rsrc_dma_mem_clean_cache);
+EXPORT_SYMBOL(mve_rsrc_dma_mem_invalidate_cache);
+EXPORT_SYMBOL(mve_rsrc_dma_mem_map);
+EXPORT_SYMBOL(mve_rsrc_dma_mem_unmap);
+EXPORT_SYMBOL(mve_rsrc_dma_mem_get_pages);
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma.h b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma.h
new file mode 100644
index 000000000000..b78b38a70d57
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma.h
@@ -0,0 +1,92 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_MEM_DMA_H
+#define MVE_RSRC_MEM_DMA_H
+
+/**
+ * DMA memory type.
+ */
+enum mve_rsrc_dma_mem_type
+{
+ DMA_MEM_TYPE_UNCACHED, /**< Uncached DMA memory */
+ DMA_MEM_TYPE_MAX
+};
+
+/**
+ * DMA memory handle.
+ */
+struct mve_rsrc_dma_mem_t
+{
+ enum mve_rsrc_dma_mem_type type; /**< Memory type (cached/uncached) */
+ uint32_t size; /**< Size of the memory region in bytes */
+};
+
+/**
+ * Alloc DMA memory. The memory can either be cached or uncached.
+ * @param size Number of bytes to allocate
+ * @param type The kind of DMA memory (cached/uncached)
+ * @param DMA memory handle
+ */
+struct mve_rsrc_dma_mem_t *mve_rsrc_dma_mem_alloc(uint32_t size, enum mve_rsrc_dma_mem_type type);
+
+/**
+ * Free DMA memory.
+ * @param mem Handle representing the memory region to free
+ */
+void mve_rsrc_dma_mem_free(struct mve_rsrc_dma_mem_t *mem);
+
+/**
+ * Clean the CPU cache. All cache lines holding information about the
+ * DMA memory region have been evicted from the cache and written to memory
+ * when this function returns.
+ * @param mem Handle to the DMA memory to evict from the CPU cache
+ */
+void mve_rsrc_dma_mem_clean_cache(struct mve_rsrc_dma_mem_t *mem);
+
+/**
+ * Invalidate the CPU cache. All cache lines holding information about the
+ * DMA memory region have been invalidated when this function returns. The
+ * next CPU read will therefore go all the way down to the memory.
+ * @param mem Handle to the DMA memory to invalidate from the CPU cache
+ */
+void mve_rsrc_dma_mem_invalidate_cache(struct mve_rsrc_dma_mem_t *mem);
+
+/**
+ * CPU map DMA memory. Returns a virtual address to the DMA memory that the CPU
+ * can use to access the memory. Note that the client must unmap the memory
+ * when access is no longer needed.
+ * @param mem Handle to the DMA memory to CPU map
+ * @return Virtual address to the DMA memory
+ */
+void *mve_rsrc_dma_mem_map(struct mve_rsrc_dma_mem_t *mem);
+
+/**
+ * CPU unmap DMA memory. The virtual address returned by mve_rsrc_dma_mem_map
+ * is not valid after invoking this function. It's safe to unmap memory that is not
+ * currently mapped.
+ * @param mem Handle to the DMA memory to unmap
+ */
+void mve_rsrc_dma_mem_unmap(struct mve_rsrc_dma_mem_t *mem);
+
+/**
+ * Returns an array containing the physical addresses of the pages that make up
+ * the DMA memory region identified by the supplied handle.
+ * @param mem Handle to the DMA memory
+ * @return An array of physical addresses or NULL on failure. Do not free
+ * the returned array since this is handled by the DMA memory submodule.
+ */
+phys_addr_t *mve_rsrc_dma_mem_get_pages(struct mve_rsrc_dma_mem_t *mem);
+
+#endif /* MVE_RSRC_MEM_DMA_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma_uncached.c b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma_uncached.c
new file mode 100644
index 000000000000..4feb5c83d351
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma_uncached.c
@@ -0,0 +1,122 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/dma-mapping.h>
+#endif
+
+#include "mve_rsrc_mem_dma_uncached.h"
+#include "mve_rsrc_mem_frontend.h"
+#include "mve_rsrc_driver.h"
+
+/**
+ * Private structure to the uncached DMA memory allocator
+ */
+struct dma_mem_uncached_t
+{
+ struct mve_rsrc_dma_mem_t header; /**< DON'T MOVE! This member must be first in the struct */
+
+ void *cpu_addr; /**< Virtual CPU address to the first byte of the allocation */
+ dma_addr_t dma_handle; /**< DMA address (same as the physical address of the first
+ * page of the allocation) */
+ phys_addr_t *pages; /**< Array containing the allocated physical pages */
+};
+
+struct mve_rsrc_dma_mem_t *mve_rsrc_dma_mem_alloc_uncached(uint32_t size)
+{
+ struct dma_mem_uncached_t *mem;
+
+ mem = MVE_RSRC_MEM_ZALLOC(sizeof(struct dma_mem_uncached_t), GFP_KERNEL);
+ if (NULL != mem)
+ {
+ mem->header.type = DMA_MEM_TYPE_UNCACHED;
+ mem->header.size = size;
+
+ mem->cpu_addr = dma_alloc_coherent(&mve_rsrc_data.pdev->dev, size, &mem->dma_handle, GFP_KERNEL);
+ if (NULL == mem->cpu_addr)
+ {
+ MVE_RSRC_MEM_FREE(mem);
+ mem = NULL;
+ }
+ else
+ {
+ /* dma_zalloc_coherent doesn't clear the memory on arm64 */
+ memset(mem->cpu_addr, 0, size);
+ }
+ }
+
+ return (struct mve_rsrc_dma_mem_t *)mem;
+}
+
+void mve_rsrc_dma_mem_free_uncached(struct mve_rsrc_dma_mem_t *mem)
+{
+ struct dma_mem_uncached_t *ptr = (struct dma_mem_uncached_t *)mem;
+
+ dma_free_coherent(&mve_rsrc_data.pdev->dev, ptr->header.size, ptr->cpu_addr, ptr->dma_handle);
+ if (NULL != ptr->pages)
+ {
+ MVE_RSRC_MEM_VFREE(ptr->pages);
+ }
+ MVE_RSRC_MEM_FREE(mem);
+}
+
+void mve_rsrc_dma_mem_clean_cache_uncached(struct mve_rsrc_dma_mem_t *mem)
+{
+ /* Nop */
+ mve_rsrc_mem_flush_write_buffer();
+}
+
+void mve_rsrc_dma_mem_invalidate_cache_uncached(struct mve_rsrc_dma_mem_t *mem)
+{
+ /* Nop */
+ mve_rsrc_mem_flush_write_buffer();
+}
+
+void *mve_rsrc_dma_mem_map_uncached(struct mve_rsrc_dma_mem_t *mem)
+{
+ struct dma_mem_uncached_t *ptr = (struct dma_mem_uncached_t *)mem;
+
+ return ptr->cpu_addr;
+}
+
+void mve_rsrc_dma_mem_unmap_unchached(struct mve_rsrc_dma_mem_t *mem)
+{
+ /* Nop */
+}
+
+phys_addr_t *mve_rsrc_dma_mem_get_pages_uncached(struct mve_rsrc_dma_mem_t *mem)
+{
+ struct dma_mem_uncached_t *ptr = (struct dma_mem_uncached_t *)mem;
+
+ if (NULL == ptr->pages)
+ {
+ /* Construct a list containing the pages of the allocation */
+ uint32_t i;
+ uint32_t num_pages = (ptr->header.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ptr->pages = MVE_RSRC_MEM_VALLOC(sizeof(phys_addr_t) * num_pages);
+ if (NULL == ptr->pages)
+ {
+ return NULL;
+ }
+
+ for (i = 0; i < num_pages; ++i)
+ {
+ ptr->pages[i] = ptr->dma_handle + i * PAGE_SIZE;
+ }
+ }
+
+ return ptr->pages;
+}
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma_uncached.h b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma_uncached.h
new file mode 100644
index 000000000000..7d320d76ed26
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_dma_uncached.h
@@ -0,0 +1,66 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_MEM_DMA_UNCACHED_H
+#define MVE_RSRC_MEM_DMA_UNCACHED_H
+
+#include "mve_rsrc_mem_dma.h"
+
+/**
+ * Allocate uncached memory.
+ * @param size Number of bytes to allocate
+ * @return Handle to the allocated memory
+ */
+struct mve_rsrc_dma_mem_t *mve_rsrc_dma_mem_alloc_uncached(uint32_t size);
+
+/**
+ * Free uncached memory.
+ * @param mem Handle to the memory to free
+ */
+void mve_rsrc_dma_mem_free_uncached(struct mve_rsrc_dma_mem_t *mem);
+
+/**
+ * Clean the CPU cache for the memory region pointed out by the supplied handle.
+ * @param mem Handle to the memory
+ */
+void mve_rsrc_dma_mem_clean_cache_uncached(struct mve_rsrc_dma_mem_t *mem);
+
+/**
+ * Invalidate the CPU cache for the memory region pointed out by the supplied handle.
+ * @param mem Handle to the memory
+ */
+void mve_rsrc_dma_mem_invalidate_cache_uncached(struct mve_rsrc_dma_mem_t *mem);
+
+/**
+ * Map a uncached memory region into CPU address space.
+ * @param mem Handle to the memory to map
+ * @return Virtual CPU address
+ */
+void *mve_rsrc_dma_mem_map_uncached(struct mve_rsrc_dma_mem_t *mem);
+
+/**
+ * Unmap a uncached memory region from the CPU address space.
+ * @param mem Handle to the memory
+ */
+void mve_rsrc_dma_mem_unmap_unchached(struct mve_rsrc_dma_mem_t *mem);
+
+/**
+ * Get the address of the physical pages comprising the memory region pointed out by the supplied
+ * handle.
+ * @param mem Handle to the memory
+ * @return Array of physical addresses. Do not free this array!
+ */
+phys_addr_t *mve_rsrc_dma_mem_get_pages_uncached(struct mve_rsrc_dma_mem_t *mem);
+
+#endif /* MVE_RSRC_MEM_DMA_UNCACHED_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_mem_frontend.c b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_frontend.c
new file mode 100644
index 000000000000..1a577866bcb7
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_frontend.c
@@ -0,0 +1,670 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "mve_rsrc_mem_frontend.h"
+#include "mve_rsrc_mem_backend.h"
+#include "mve_rsrc_driver.h"
+#include "mve_rsrc_log.h"
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else /* EMULATOR */
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/hashtable.h>
+#endif /* EMULATOR */
+
+#if (1 == MVE_MEM_DBG_SUPPORT)
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#if (1 == MVE_MEM_DBG_RESFAIL)
+static bool mem_resfail_did_fail = false;
+static uint32_t mem_resfail_threshold = 0;
+static uint32_t mem_resfail_curr_alloc_nr = 0;
+static bool mem_resfail_enabled = false;
+#endif /* (1 == MVE_MEM_DBG_RESFAIL) */
+
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+#ifdef EMULATOR
+struct semaphore watchdog_sem;
+#endif /* EMULATOR */
+
+/* Hashtable containing all non freed memory allocations */
+static DEFINE_HASHTABLE(memory_entries, 8);
+static uint32_t allocation_nr;
+
+static uint32_t allocated_memory = 0;
+static uint32_t peak_allocated_memory = 0;
+
+/* Lock to to protect the memory allocations list */
+static spinlock_t entries_lock;
+
+enum memory_type {VIRTUAL, PHYSICAL};
+
+struct memory_entry
+{
+ void *os_ptr; /**< Address of the allocated memory */
+ uint32_t size; /**< Size of the allocation */
+
+ enum memory_type mem_type; /**< Type of allocation, either virtual
+ * or physical memory */
+ unsigned int sequence; /**< Allocation sequence */
+
+ char const *allocated_from_func; /**< String describing in which function
+ * the allocation was made */
+ unsigned int allocated_from_line; /**< At which line was the allocation made */
+
+ struct hlist_node hash; /**< Memory entry hash table */
+};
+
+/**
+ * Creates a memory entry and adds it to the list of allocations.
+ */
+static struct memory_entry *create_memory_entry(const char *func_str, int line_nr)
+{
+ struct memory_entry *entry;
+
+ entry = kzalloc(sizeof(struct memory_entry), GFP_KERNEL);
+ if (NULL == entry)
+ {
+ return NULL;
+ }
+
+ entry->allocated_from_func = func_str;
+ entry->allocated_from_line = line_nr;
+
+ return entry;
+}
+
+static struct memory_entry *find_memory_entry(void *os_ptr)
+{
+ struct memory_entry *entry = NULL;
+ struct memory_entry *ptr;
+
+ if (NULL == os_ptr)
+ {
+ return NULL;
+ }
+
+ hash_for_each_possible(memory_entries, ptr, hash, (long)os_ptr)
+ {
+ if (os_ptr == ptr->os_ptr)
+ {
+ entry = ptr;
+ break;
+ }
+ }
+
+ if (entry == NULL)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Cannot find_memory_entry: %p", os_ptr);
+ }
+
+ return entry;
+}
+
+static void free_memory_entry(void *os_ptr)
+{
+ struct memory_entry *entry;
+
+ entry = find_memory_entry(os_ptr);
+ if (NULL != entry)
+ {
+ allocated_memory -= entry->size;
+
+ hash_del(&entry->hash);
+ kfree(entry);
+ }
+}
+#endif /* (1 == MVE_MEM_DBG_TRACKMEM) */
+
+void *mve_rsrc_mem_zalloc_track(uint32_t size,
+ uint32_t flags,
+ enum mve_rsrc_mem_allocator allocator,
+ const char *func_str,
+ int line_nr)
+{
+ void *ptr;
+
+#if (1 == MVE_MEM_DBG_RESFAIL)
+ if (false != mem_resfail_enabled &&
+ mem_resfail_curr_alloc_nr >= mem_resfail_threshold)
+ {
+ /* Allocation failure simulated */
+ mem_resfail_did_fail = true;
+ __sync_fetch_and_add(&mem_resfail_curr_alloc_nr, 1);
+ return NULL;
+ }
+
+ __sync_fetch_and_add(&mem_resfail_curr_alloc_nr, 1);
+#endif /* (1 == MVE_MEM_DBG_RESFAIL) */
+
+ if (ALLOCATOR_VMALLOC == allocator)
+ {
+ ptr = vmalloc(size);
+ }
+ else if (ALLOCATOR_CACHE == allocator)
+ {
+ ptr = mve_rsrc_mem_cache_alloc(size, flags);
+ }
+ else
+ {
+ ptr = kzalloc(size, flags);
+ }
+
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+ if (NULL != ptr)
+ {
+ struct memory_entry *entry;
+
+ entry = create_memory_entry(func_str, line_nr);
+
+ spin_lock(&entries_lock);
+
+ if (NULL != entry)
+ {
+ entry->os_ptr = ptr;
+ entry->size = size;
+ entry->mem_type = VIRTUAL;
+ entry->sequence = allocation_nr++;
+
+ hash_add(memory_entries, &entry->hash, (long)entry->os_ptr);
+
+ allocated_memory += size;
+ if (allocated_memory > peak_allocated_memory)
+ {
+ peak_allocated_memory = allocated_memory;
+ }
+ }
+
+ spin_unlock(&entries_lock);
+ }
+#endif /* (1 == MVE_MEM_DBG_TRACKMEM) */
+
+ return ptr;
+}
+
+void mve_rsrc_mem_free_track(void *ptr,
+ uint32_t size,
+ enum mve_rsrc_mem_allocator allocator,
+ const char *func_str,
+ int line_nr)
+{
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+ spin_lock(&entries_lock);
+ free_memory_entry(ptr);
+ spin_unlock(&entries_lock);
+#endif /* (1 == MVE_MEM_DBG_TRACKMEM) */
+
+ if (ALLOCATOR_VMALLOC == allocator)
+ {
+ vfree(ptr);
+ }
+ else if (ALLOCATOR_CACHE == allocator)
+ {
+ mve_rsrc_mem_cache_free(ptr, size);
+ }
+ else
+ {
+ kfree(ptr);
+ }
+}
+
+phys_addr_t mve_rsrc_mem_alloc_page_track(const char *func_str, int line_nr)
+{
+ phys_addr_t paddr;
+
+#if (1 == MVE_MEM_DBG_RESFAIL)
+ if (false != mem_resfail_enabled &&
+ mem_resfail_curr_alloc_nr >= mem_resfail_threshold)
+ {
+ /* Allocation failure simulated */
+ mem_resfail_did_fail = true;
+ __sync_fetch_and_add(&mem_resfail_curr_alloc_nr, 1);
+ return 0;
+ }
+
+ __sync_fetch_and_add(&mem_resfail_curr_alloc_nr, 1);
+#endif /* (1 == MVE_MEM_DBG_RESFAIL) */
+
+ paddr = mve_rsrc_mem_alloc_page();
+
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+ if (0 != paddr)
+ {
+ struct memory_entry *entry;
+
+ entry = create_memory_entry(func_str, line_nr);
+
+ spin_lock(&entries_lock);
+
+ if (NULL != entry)
+ {
+ entry->os_ptr = (void *)paddr;
+ entry->size = PAGE_SIZE;
+ entry->mem_type = PHYSICAL;
+ entry->sequence = allocation_nr++;
+
+ hash_add(memory_entries, &entry->hash, (long)entry->os_ptr);
+
+ allocated_memory += entry->size;
+ if (allocated_memory > peak_allocated_memory)
+ {
+ peak_allocated_memory = allocated_memory;
+ }
+ }
+
+ spin_unlock(&entries_lock);
+ }
+#endif /* (1 == MVE_MEM_DBG_TRACKMEM) */
+
+ return paddr;
+}
+
+void mve_rsrc_mem_free_page_track(phys_addr_t paddr, const char *func_str, int line_nr)
+{
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+ spin_lock(&entries_lock);
+ free_memory_entry((void *)paddr);
+ spin_unlock(&entries_lock);
+#endif /* (1 == MVE_MEM_DBG_TRACKMEM) */
+
+ mve_rsrc_mem_free_page(paddr);
+}
+
+phys_addr_t *mve_rsrc_mem_alloc_pages_track(uint32_t nr_pages,
+ const char *func_str,
+ int line_nr)
+{
+ phys_addr_t *paddrs;
+
+#if (1 == MVE_MEM_DBG_RESFAIL)
+ if (false != mem_resfail_enabled &&
+ mem_resfail_curr_alloc_nr >= mem_resfail_threshold)
+ {
+ /* Allocation failure simulated */
+ mem_resfail_did_fail = true;
+ __sync_fetch_and_add(&mem_resfail_curr_alloc_nr, 1);
+ return NULL;
+ }
+
+ __sync_fetch_and_add(&mem_resfail_curr_alloc_nr, 1);
+#endif /* (1 == MVE_MEM_DBG_RESFAIL) */
+
+ paddrs = mve_rsrc_mem_alloc_pages(nr_pages);
+
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+ if (NULL != paddrs)
+ {
+ int i;
+ struct memory_entry **entries;
+
+ entries = kzalloc(sizeof(struct memory_entry *) * nr_pages, GFP_KERNEL);
+ if (NULL == entries)
+ {
+ return paddrs;
+ }
+
+ for (i = 0; i < nr_pages; ++i)
+ {
+ entries[i] = create_memory_entry(func_str, line_nr);
+ }
+
+ spin_lock(&entries_lock);
+ for (i = 0; i < nr_pages; ++i)
+ {
+ struct memory_entry *entry = entries[i];
+
+ if (NULL != entry)
+ {
+ entry->os_ptr = (void *)paddrs[i];
+ entry->size = PAGE_SIZE;
+ entry->mem_type = PHYSICAL;
+ entry->sequence = allocation_nr;
+
+ hash_add(memory_entries, &entry->hash, (long)entry->os_ptr);
+
+ allocated_memory += PAGE_SIZE;
+ }
+ }
+
+ allocation_nr++;
+
+ if (allocated_memory > peak_allocated_memory)
+ {
+ peak_allocated_memory = allocated_memory;
+ }
+
+ spin_unlock(&entries_lock);
+
+ kfree(entries);
+ }
+#endif /* (1 == MVE_MEM_DBG_TRACKMEM) */
+
+ return paddrs;
+}
+
+void mve_rsrc_mem_free_pages_track(phys_addr_t *paddrs,
+ uint32_t nr_pages,
+ const char *func_str,
+ int line_nr)
+{
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+ spin_lock(&entries_lock);
+ if (NULL != paddrs)
+ {
+ int i;
+ for (i = 0; i < nr_pages; ++i)
+ {
+ free_memory_entry((void *)paddrs[i]);
+ }
+ }
+ spin_unlock(&entries_lock);
+#endif /* (1 == MVE_MEM_DBG_TRACKMEM) */
+
+ mve_rsrc_mem_free_pages(paddrs, nr_pages);
+}
+
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+struct print_entry
+{
+ uint32_t size; /**< Size of the allocation */
+ uint32_t nr;
+ unsigned int sequence; /**< Allocation sequence */
+ char const *func; /**< String describing in which function
+ * the allocation was made */
+ unsigned int line; /**< At which line was the allocation made */
+ struct list_head list; /**< print entry list */
+};
+
+static struct print_entry *find_print_entry(struct list_head *entries, unsigned int sequence, bool *match)
+{
+ struct list_head *pos;
+
+ list_for_each(pos, entries)
+ {
+ struct print_entry *pe = container_of(pos, struct print_entry, list);
+ if (pe->sequence >= sequence)
+ {
+ if (pe->sequence == sequence)
+ {
+ *match = true;
+ }
+ return pe;
+ }
+ }
+ return NULL;
+}
+
+static void register_print(struct list_head *entries, struct memory_entry *entry)
+{
+ struct print_entry *ptr;
+ bool match = false;
+
+ ptr = find_print_entry(entries, entry->sequence, &match);
+ if (NULL != ptr && false != match)
+ {
+ ptr->nr++;
+ return;
+ }
+ else if (ptr)
+ {
+ entries = &ptr->list;
+ }
+
+ ptr = kzalloc(sizeof(struct print_entry), GFP_ATOMIC);
+ if (NULL != ptr)
+ {
+ INIT_LIST_HEAD(&ptr->list);
+ ptr->size = entry->size;
+ ptr->nr = 1;
+ ptr->sequence = entry->sequence;
+ ptr->func = entry->allocated_from_func;
+ ptr->line = entry->allocated_from_line;
+ list_add_tail(&ptr->list, entries);
+ }
+}
+
+ssize_t mve_rsrc_mem_print_stack(char *buf)
+{
+ struct memory_entry *entry;
+ unsigned long bkt;
+ ssize_t num = 0;
+ struct list_head *pos;
+ LIST_HEAD(print_entries);
+
+ spin_lock(&entries_lock);
+
+#ifdef EMULATOR
+ down_interruptible(&watchdog_sem);
+#endif /* EMULATOR */
+
+ num += snprintf(buf + num, PAGE_SIZE - num, "Currently allocated memory (%d kB, peak: %d kB):\n",
+ allocated_memory / 1024, peak_allocated_memory / 1024);
+
+ hash_for_each(memory_entries, bkt, entry, hash)
+ {
+ register_print(&print_entries, entry);
+ }
+
+ list_for_each_prev(pos, &print_entries)
+ {
+ struct print_entry *ptr = container_of(pos, struct print_entry, list);
+ num += snprintf(buf + num, PAGE_SIZE - num - 1, "[%d] %s:%d (%d bytes)\n", ptr->sequence,
+ ptr->func,
+ ptr->line,
+ ptr->size * ptr->nr);
+ if (num >= PAGE_SIZE)
+ {
+ buf[PAGE_SIZE - 2] = '\0';
+ num = PAGE_SIZE - 1;
+ break;
+ }
+ }
+
+ while (!list_empty(&print_entries))
+ {
+ struct print_entry *ptr = list_first_entry(&print_entries, struct print_entry, list);
+ list_del(&ptr->list);
+ kfree(ptr);
+ }
+
+#ifdef EMULATOR
+ up(&watchdog_sem);
+#endif /* EMULATOR */
+ spin_unlock(&entries_lock);
+
+ return num;
+}
+
+void mve_rsrc_mem_clear_stack(void)
+{
+ struct memory_entry *ptr;
+ struct hlist_node *tmp;
+ unsigned long bkt;
+
+ spin_lock(&entries_lock);
+ /* Clean up the book keeping structure */
+ hash_for_each_safe(memory_entries, bkt, tmp, ptr, hash)
+ {
+ allocated_memory -= ptr->size;
+
+ hash_del(&ptr->hash);
+ kfree(ptr);
+ }
+ spin_unlock(&entries_lock);
+}
+#endif /* (1 == MVE_MEM_DBG_TRACKMEM) */
+
+EXPORT_SYMBOL(mve_rsrc_mem_zalloc_track);
+EXPORT_SYMBOL(mve_rsrc_mem_free_track);
+EXPORT_SYMBOL(mve_rsrc_mem_alloc_page_track);
+EXPORT_SYMBOL(mve_rsrc_mem_free_page_track);
+EXPORT_SYMBOL(mve_rsrc_mem_alloc_pages_track);
+EXPORT_SYMBOL(mve_rsrc_mem_free_pages_track);
+
+#if (1 == MVE_MEM_DBG_RESFAIL)
+void mve_rsrc_mem_resfail_enable(bool enable)
+{
+ mem_resfail_enabled = enable;
+
+ mem_resfail_curr_alloc_nr = 0;
+ mem_resfail_did_fail = false;
+}
+
+void mve_rsrc_mem_resfail_set_range(uint32_t min, uint32_t max)
+{
+ mem_resfail_threshold = min;
+ (void)max;
+
+ mem_resfail_curr_alloc_nr = 0;
+ mem_resfail_did_fail = false;
+}
+
+uint32_t mve_rsrc_mem_resfail_did_fail(void)
+{
+ return mem_resfail_did_fail == false ? 0 : 1;
+}
+#endif /* (1 == MVE_MEM_DBG_RESFAIL) */
+
+#ifdef CONFIG_SYSFS
+
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+static ssize_t sysfs_print_memory_table(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return mve_rsrc_mem_print_stack(buf);
+}
+#endif /* (1 == MVE_MEM_DBG_TRACKMEM) */
+
+#if (1 == MVE_MEM_DBG_RESFAIL)
+static ssize_t sysfs_read_mem_resfail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", mem_resfail_did_fail == false ? 0 : 1);
+}
+
+ssize_t sysfs_write_mem_resfail(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ if (0 == strcmp("enable", buf))
+ {
+ mve_rsrc_mem_resfail_enable(true);
+ }
+ else if (0 == strcmp("disable", buf))
+ {
+ mve_rsrc_mem_resfail_enable(false);
+ }
+ else
+ {
+ int min, max;
+ sscanf(buf, "%d %d", &min, &max);
+
+ mve_rsrc_mem_resfail_set_range(min, max);
+ }
+
+ return count;
+}
+#endif /* (1 == MVE_MEM_DBG_RESFAIL) */
+
+static struct device_attribute sysfs_files[] =
+{
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+ __ATTR(memory_table, S_IRUGO, sysfs_print_memory_table, NULL),
+#endif /* (1 == MVE_MEM_DBG_TRACKMEM) */
+
+#if (1 == MVE_MEM_DBG_RESFAIL)
+ __ATTR(mem_resfail, S_IRUGO, sysfs_read_mem_resfail, sysfs_write_mem_resfail)
+#endif /* (1 == MVE_MEM_DBG_RESFAIL) */
+};
+
+#endif /* CONFIG_SYSFS */
+
+#endif /* (1 == MVE_MEM_DBG_SUPPORT) */
+
+void mve_rsrc_mem_init(struct device *dev)
+{
+#if (1 == MVE_MEM_DBG_SUPPORT)
+
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+ spin_lock_init(&entries_lock);
+ hash_init(memory_entries);
+
+#ifdef EMULATOR
+ sema_init(&watchdog_sem, 1);
+#endif /* EMULATOR */
+#endif /* (1 == MVE_MEM_DBG_TRACKMEM) */
+
+#ifdef CONFIG_SYSFS
+ {
+ int i;
+
+ for (i = 0; i < NELEMS(sysfs_files); ++i)
+ {
+ int err = device_create_file(dev, &sysfs_files[i]);
+ if (err < 0)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Unable to create sysfs file.");
+ }
+ }
+ }
+#endif /* CONFIG_SYSFS */
+
+#endif /* (1 == MVE_MEM_DBG_SUPPORT) */
+
+ mve_rsrc_mem_cache_init();
+}
+
+void mve_rsrc_mem_deinit(struct device *dev)
+{
+#if (1 == MVE_MEM_DBG_SUPPORT)
+#ifndef EMULATOR
+ int i;
+
+ for (i = 0; i < NELEMS(sysfs_files); ++i)
+ {
+ device_remove_file(dev, &sysfs_files[i]);
+ }
+#else /* #ifndef EMULATOR */
+
+#if (1 == MVE_MEM_DBG_TRACKMEM)
+ if (0 < allocated_memory)
+ {
+ char buf[4096] = "";
+ mve_rsrc_mem_print_stack(buf);
+ printk(KERN_ERR "%s\n", buf);
+ mve_rsrc_mem_clear_stack();
+ }
+ hash_deinit(memory_entries);
+#endif /* (1 == MVE_MEM_DBG_TRACKMEM) */
+
+#endif /* #ifndef EMULATOR */
+#endif /* (1 == MVE_MEM_DBG_SUPPORT) */
+
+ mve_rsrc_mem_cache_deinit();
+}
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_mem_frontend.h b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_frontend.h
new file mode 100644
index 000000000000..a1733b6c6242
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_mem_frontend.h
@@ -0,0 +1,217 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_MEM_FRONTEND_H
+#define MVE_RSRC_MEM_FRONTEND_H
+
+#ifndef __KERNEL__
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#endif
+
+#include "mve_rsrc_mem_backend.h"
+#include "mve_rsrc_mem_cache.h"
+
+#if (1 == MVE_MEM_DBG_SUPPORT)
+
+enum mve_rsrc_mem_allocator {ALLOCATOR_KMALLOC, ALLOCATOR_VMALLOC, ALLOCATOR_CACHE};
+
+#define MVE_RSRC_MEM_CACHE_ALLOC(size, flags) \
+ mve_rsrc_mem_zalloc_track(size, flags, ALLOCATOR_CACHE, __FUNCTION__, __LINE__)
+
+#define MVE_RSRC_MEM_CACHE_FREE(ptr, size) \
+ mve_rsrc_mem_free_track(ptr, size, ALLOCATOR_CACHE, __FUNCTION__, __LINE__)
+
+#define MVE_RSRC_MEM_ZALLOC(size, flags) \
+ mve_rsrc_mem_zalloc_track(size, flags, ALLOCATOR_KMALLOC, __FUNCTION__, __LINE__)
+
+#define MVE_RSRC_MEM_FREE(ptr) \
+ mve_rsrc_mem_free_track(ptr, 0, ALLOCATOR_KMALLOC, __FUNCTION__, __LINE__)
+
+#define MVE_RSRC_MEM_VALLOC(size) \
+ mve_rsrc_mem_zalloc_track(size, 0, ALLOCATOR_VMALLOC, __FUNCTION__, __LINE__)
+
+#define MVE_RSRC_MEM_VFREE(ptr) \
+ mve_rsrc_mem_free_track(ptr, 0, ALLOCATOR_VMALLOC, __FUNCTION__, __LINE__)
+
+#define MVE_RSRC_MEM_ALLOC_PAGE() \
+ mve_rsrc_mem_alloc_page_track(__FUNCTION__, __LINE__)
+
+#define MVE_RSRC_MEM_FREE_PAGE(paddr) \
+ mve_rsrc_mem_free_page_track(paddr, __FUNCTION__, __LINE__)
+
+#define MVE_RSRC_MEM_ALLOC_PAGES(nr_pages) \
+ mve_rsrc_mem_alloc_pages_track(nr_pages, __FUNCTION__, __LINE__)
+
+#define MVE_RSRC_MEM_FREE_PAGES(paddrs, nr_pages) \
+ mve_rsrc_mem_free_pages_track(paddrs, nr_pages, __FUNCTION__, __LINE__)
+
+/**
+ * Allocate heap memory using the supplied allocator and add the allocation to the memory tracking table.
+ * @param size Size in bytes of the allocation.
+ * @param flags Allocation flags. See the documentation of kalloc.
+ * @param allocator Which allocator to use when allocating the memory.
+ * @param func_str Function name of the allocation origin.
+ * @param line_nr Line number of the allocation origin.
+ * @return Virtual address of the allocation on success, NULL on failure.
+ */
+void *mve_rsrc_mem_zalloc_track(uint32_t size,
+ uint32_t flags,
+ enum mve_rsrc_mem_allocator allocator,
+ const char *func_str,
+ int line_nr);
+
+/**
+ * Free memory allocated with mve_rsrc_mem_zalloc_track and remove the allocation
+ * from the memory tracking table.
+ * @param size size of the corresponding allocation only used for cached memory.
+ * @param ptr Pointer to the memory to free.
+ * @param allocator The allocator that was used to allocate the memory.
+ * @param func_str Function name of the free origin.
+ * @param line_nr Line number of the free origin.
+ */
+void mve_rsrc_mem_free_track(void *ptr,
+ uint32_t size,
+ enum mve_rsrc_mem_allocator allocator,
+ const char *func_str,
+ int line_nr);
+
+/**
+ * Allocate one page of physical memory and add it to the memory tracking table
+ * @param func_str Function name of the allocation origin.
+ * @param line_nr Line number of the allocation origin.
+ * @return Physical address of the allocated page.
+ */
+phys_addr_t mve_rsrc_mem_alloc_page_track(const char *func_str, int line_nr);
+
+/**
+ * Free one page of physical memory and remove it from the memory tracking table.
+ * @param paddr Physical address of the page to free.
+ * @param func_str Function name of the free origin.
+ * @param line_nr Line number of the free origin.
+ */
+void mve_rsrc_mem_free_page_track(phys_addr_t paddr, const char *func_str, int line_nr);
+
+/**
+ * Allocate a set of pages and add the allocations to the memory tracking table.
+ * The client must free the returned array with MVE_RSRC_MEM_FREE or there will
+ * be a memory leak.
+ * @param nr_pages Number of pages to allocate.
+ * @param func_str Function name of the allocation origin.
+ * @param line_nr Line number of the allocation origin.
+ * @return An array of pages on success, NULL on failure. Note that the client
+ * must free the returned array to prevent memory leaks.
+ */
+phys_addr_t *mve_rsrc_mem_alloc_pages_track(uint32_t nr_pages,
+ const char *func_str,
+ int line_nr);
+
+/**
+ * Free a set of pages and remove them from the memory tracking table.
+ * The supplied array is freed with MVE_RSRC_MEM_FREE so there is no need for
+ * the client to free this memory.
+ * @param paddrs Array containing pages to free.
+ * @param nr_pages Number of pages in the paddrs array.
+ * @param func_str Function name of the free origin.
+ * @param line_nr Line number of the free origin.
+ */
+void mve_rsrc_mem_free_pages_track(phys_addr_t *paddrs,
+ uint32_t nr_pages,
+ const char *func_str,
+ int line_nr);
+
+/**
+ * Print the contents of the memory allocation table into the supplied string
+ * array.
+ * @param buf The string to put the data in.
+ * @return The number of characters copied to the array.
+ */
+ssize_t mve_rsrc_mem_print_stack(char *buf);
+
+/**
+ * Clear the contents of the memory allocation table.
+ */
+void mve_rsrc_mem_clear_stack(void);
+
+/**
+ * Enable/disable memory allocation failure simulation
+ * @param enable True to enable, false to disable.
+ */
+void mve_rsrc_mem_resfail_enable(bool enable);
+
+/**
+ * Set the range for the allocations that shall fail.
+ * @param min Start of the range.
+ * @param end End of the range. Note that this argument is ignored in the
+ * current implementation.
+ */
+void mve_rsrc_mem_resfail_set_range(uint32_t min, uint32_t max);
+
+/**
+ * Has a memory allocation failure been simulated since the last reset?
+ * @return 1 if a failure has been simulated, 0 if not.
+ */
+uint32_t mve_rsrc_mem_resfail_did_fail(void);
+
+#else
+
+/* Allocate physically contiguous memory using kmalloc */
+#define MVE_RSRC_MEM_CACHE_ALLOC(size, flags) \
+ mve_rsrc_mem_cache_alloc(size, flags)
+
+#define MVE_RSRC_MEM_CACHE_FREE(ptr, size) \
+ mve_rsrc_mem_cache_free(ptr, size)
+
+#define MVE_RSRC_MEM_ZALLOC(size, flags) \
+ kzalloc(size, flags)
+
+#define MVE_RSRC_MEM_FREE(ptr) \
+ kfree(ptr)
+
+/* Allocate non physically contiguous memory using vmalloc */
+#define MVE_RSRC_MEM_VALLOC(size) \
+ vmalloc(size)
+
+#define MVE_RSRC_MEM_VFREE(ptr) \
+ vfree(ptr)
+
+#define MVE_RSRC_MEM_ALLOC_PAGE() \
+ mve_rsrc_mem_alloc_page()
+
+#define MVE_RSRC_MEM_FREE_PAGE(paddr) \
+ mve_rsrc_mem_free_page(paddr)
+
+#define MVE_RSRC_MEM_ALLOC_PAGES(nr_pages) \
+ mve_rsrc_mem_alloc_pages(nr_pages)
+
+#define MVE_RSRC_MEM_FREE_PAGES(paddrs, nr_pages) \
+ mve_rsrc_mem_free_pages(paddrs, nr_pages)
+
+#endif /* MVE_MEM_DBG_SUPPORT */
+
+/**
+ * Performs all necessary initialization of the memory frontend.
+ */
+void mve_rsrc_mem_init(struct device *dev);
+
+/**
+ * Performs all necessary deinitialization of the memory frontend.
+ */
+void mve_rsrc_mem_deinit(struct device *dev);
+
+#endif /* MVE_RSRC_MEM_FRONTEND_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_pm.c b/drivers/video/arm/v5xx/resource/mve_rsrc_pm.c
new file mode 100644
index 000000000000..ec712b601307
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_pm.c
@@ -0,0 +1,275 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include "linux/export.h"
+#include "linux/pm_runtime.h"
+#include "linux/slab.h"
+#include "linux/device.h"
+#include <linux/stat.h>
+#endif
+
+#include "mve_rsrc_driver.h"
+#include "mve_rsrc_pm.h"
+#include "mve_rsrc_scheduler.h"
+#include "mve_rsrc_log.h"
+#include "mve_rsrc_irq.h"
+
+#include "machine/mve_power_management.h"
+#include "machine/mve_dvfs.h"
+#include "machine/mve_config.h"
+
+#ifdef EMULATOR
+static uint32_t vpu_freq = 20;
+#endif
+
+void mver_pm_request_frequency(uint32_t freq)
+{
+#ifdef EMULATOR
+ vpu_freq = freq;
+#else
+ struct mve_dvfs_callback_conf *dvfs_conf;
+ dvfs_conf = (struct mve_dvfs_callback_conf *)mve_config_get_value(mve_rsrc_data.config,
+ MVE_CONFIG_DEVICE_ATTR_DVFS_CALLBACKS);
+ if (NULL == dvfs_conf || NULL == dvfs_conf->set_rate)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Unable to change the frequency.");
+ }
+ else
+ {
+ dvfs_conf->set_rate(freq);
+ }
+#endif
+}
+
+uint32_t mver_pm_read_frequency(void)
+{
+#ifdef EMULATOR
+ return vpu_freq;
+#else
+ uint32_t freq = 0;
+ struct mve_dvfs_callback_conf *dvfs_conf;
+
+ dvfs_conf = (struct mve_dvfs_callback_conf *)mve_config_get_value(mve_rsrc_data.config,
+ MVE_CONFIG_DEVICE_ATTR_DVFS_CALLBACKS);
+ if (NULL == dvfs_conf || NULL == dvfs_conf->get_rate)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Unable to read the frequency.");
+ freq = 0;
+ }
+ else
+ {
+ freq = dvfs_conf->get_rate();
+ }
+
+ return freq;
+#endif
+}
+
+uint32_t mver_pm_read_max_frequency(void)
+{
+#ifdef EMULATOR
+ return 100;
+#else
+ uint32_t freq = 0;
+ struct mve_dvfs_callback_conf *dvfs_conf;
+
+ dvfs_conf = (struct mve_dvfs_callback_conf *)mve_config_get_value(mve_rsrc_data.config,
+ MVE_CONFIG_DEVICE_ATTR_DVFS_CALLBACKS);
+ if (NULL == dvfs_conf || NULL == dvfs_conf->get_max_rate)
+ {
+ printk(KERN_ERR "[MVE] Unable to read max frequency\n");
+ freq = 0;
+ }
+ else
+ {
+ freq = dvfs_conf->get_max_rate();
+ }
+
+ return freq;
+#endif
+}
+
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+
+static ssize_t sysfs_read_vpu_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ uint32_t freq;
+
+ freq = mver_pm_read_frequency();
+ return snprintf(buf, PAGE_SIZE, "%d\n", (int)freq);
+}
+
+static ssize_t sysfs_write_vpu_freq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int freq;
+
+ freq = simple_strtol(buf, NULL, 10);
+ if (0 != freq)
+ {
+ mver_pm_request_frequency(freq);
+ }
+
+ return count;
+}
+
+static struct device_attribute sysfs_files[] =
+{
+ __ATTR(vpu_freq, S_IRUGO, sysfs_read_vpu_freq, sysfs_write_vpu_freq)
+};
+
+#endif /* CONFIG_SYSFS && _DEBUG */
+
+void mver_pm_init(struct device *dev)
+{
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+ device_create_file(dev, &sysfs_files[0]);
+#endif
+}
+
+void mver_pm_deinit(struct device *dev)
+{
+#ifndef EMULATOR
+ struct mve_dvfs_callback_conf *dvfs_conf;
+ dvfs_conf = (struct mve_dvfs_callback_conf *)mve_config_get_value(mve_rsrc_data.config,
+ MVE_CONFIG_DEVICE_ATTR_DVFS_CALLBACKS);
+ if (NULL == dvfs_conf || NULL == dvfs_conf->stop)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "no dvfs_conf or stop.");
+ }
+ else
+ {
+ dvfs_conf->stop();
+ }
+#endif
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+ device_remove_file(dev, &sysfs_files[0]);
+#endif
+}
+
+int mver_pm_request_suspend(void)
+{
+ mver_irq_disable();
+ return pm_runtime_put_sync(&mve_rsrc_data.pdev->dev);
+}
+
+int mver_pm_request_resume(void)
+{
+ int ret, val;
+
+ ret = pm_runtime_get_sync(&mve_rsrc_data.pdev->dev);
+ val = mver_irq_enable();
+ if (0 != val)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log, MVE_LOG_ERROR, "Unable to register interrupt handler: \'%d\'", val);
+ }
+
+ return ret;
+}
+
+EXPORT_SYMBOL(mver_pm_request_frequency);
+EXPORT_SYMBOL(mver_pm_read_frequency);
+EXPORT_SYMBOL(mver_pm_request_suspend);
+EXPORT_SYMBOL(mver_pm_request_resume);
+
+int mver_pm_poweron(void)
+{
+#ifdef EMULATOR
+#else
+ struct mve_pm_callback_conf *pm_conf;
+ struct mve_dvfs_callback_conf *dvfs_conf;
+
+ /* Enable clock and power to the hardware */
+ pm_conf = (struct mve_pm_callback_conf *)mve_config_get_value(mve_rsrc_data.config,
+ MVE_CONFIG_DEVICE_ATTR_POWER_CALLBACKS);
+ dvfs_conf = (struct mve_dvfs_callback_conf *)mve_config_get_value(mve_rsrc_data.config,
+ MVE_CONFIG_DEVICE_ATTR_DVFS_CALLBACKS);
+ if (NULL == pm_conf || NULL == dvfs_conf)
+ {
+ return -ENXIO;
+ }
+
+ dvfs_conf->enable_clock();
+ pm_conf->power_on_callback();
+
+ /* Resume suspended sessions */
+ mver_scheduler_resume();
+#endif
+ return 0;
+}
+
+int mver_pm_poweroff(void)
+{
+#ifdef EMULATOR
+#else
+ struct mve_pm_callback_conf *pm_conf;
+ struct mve_dvfs_callback_conf *dvfs_conf;
+
+ /* Suspend all running sessions */
+ mver_scheduler_suspend();
+
+ /* Shut off clock and power to the hardware */
+ pm_conf = (struct mve_pm_callback_conf *)mve_config_get_value(mve_rsrc_data.config,
+ MVE_CONFIG_DEVICE_ATTR_POWER_CALLBACKS);
+ dvfs_conf = (struct mve_dvfs_callback_conf *)mve_config_get_value(mve_rsrc_data.config,
+ MVE_CONFIG_DEVICE_ATTR_DVFS_CALLBACKS);
+ if (NULL == pm_conf || NULL == dvfs_conf)
+ {
+ return -ENXIO;
+ }
+
+ pm_conf->power_off_callback();
+ dvfs_conf->disable_clock();
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int mver_pm_suspend(struct device *dev)
+{
+ mver_pm_poweroff();
+
+ return 0;
+}
+
+int mver_pm_resume(struct device *dev)
+{
+ mver_pm_poweron();
+
+ return 0;
+}
+
+int mver_pm_runtime_suspend(struct device *dev)
+{
+ return mver_pm_suspend(dev);
+}
+
+int mver_pm_runtime_resume(struct device *dev)
+{
+ return mver_pm_resume(dev);
+}
+
+int mver_pm_runtime_idle(struct device *dev)
+{
+ pm_runtime_suspend(dev);
+ return 1;
+}
+#endif
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_pm.h b/drivers/video/arm/v5xx/resource/mve_rsrc_pm.h
new file mode 100644
index 000000000000..b51d0919fa0a
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_pm.h
@@ -0,0 +1,121 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_PM_H
+#define MVE_RSRC_PM_H
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/device.h>
+#endif
+
+/**
+ * Notify the Linux kernel runtime PM framework that clock and power must be
+ * disabled for the hardware. This symbol is exported to the base module.
+ *
+ * @return 0 on success. Other values indicate failure.
+ */
+int mver_pm_request_suspend(void);
+
+/**
+ * Notify the Linux kernel runtime PM framework that clock and power must be
+ * enabled for the hardware. This symbol is exported to the base module.
+ *
+ * @return 0 on success. Other values indicate failure.
+ */
+int mver_pm_request_resume(void);
+
+/**
+ * Initialize the PM module.
+ */
+void mver_pm_init(struct device *dev);
+
+/**
+ * Deinitialize the PM module. E.g. removes sysfs files etc.
+ */
+void mver_pm_deinit(struct device *dev);
+
+/**
+ * Enable power to the hardware.
+ */
+int mver_pm_poweron(void);
+
+/**
+ * Disable power to the hardware.
+ */
+int mver_pm_poweroff(void);
+
+/* The following functions must not be called directly from the driver code!
+ * These are callback functions called by the Linux kernel */
+
+/**
+ * Linux kernel power management callback function. Not to be called directly
+ * from the driver!
+ *
+ * This function stops each running session and saves the session memory for all
+ * running sessions. No sessions are allowed to start after this function
+ * has been called (until mver_pm_resume has been called).
+ */
+int mver_pm_suspend(struct device *dev);
+
+/**
+ * Linux kernel power management callback function. Not to be called directly
+ * from the driver!
+ *
+ * Bring back the hardware to a functional state. I.e. undo the operations
+ * performed by mver_pm_suspend.
+ */
+int mver_pm_resume(struct device *dev);
+
+/**
+ * Linux kernel power management callback function. Not to be called directly
+ * from the driver!
+ */
+int mver_pm_runtime_suspend(struct device *dev);
+
+/**
+ * Linux kernel power management callback function. Not to be called directly
+ * from the driver!
+ */
+int mver_pm_runtime_resume(struct device *dev);
+
+/**
+ * Linux kernel power management callback function. Not to be called directly
+ * from the driver!
+ */
+int mver_pm_runtime_idle(struct device *dev);
+
+/**
+ * Request a specific hardware clock frequency.
+ *
+ * @param freq The requested clock frequency
+ */
+void mver_pm_request_frequency(uint32_t freq);
+
+/**
+ * Read the current hardware clock freqency.
+ *
+ * @return The current clock frequency
+ */
+uint32_t mver_pm_read_frequency(void);
+
+/**
+ * Read the maximum supported hardware clock freqency.
+ *
+ * @return The maximum supported clock frequency
+ */
+uint32_t mver_pm_read_max_frequency(void);
+
+#endif /* MVE_RSRC_PM_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_register.c b/drivers/video/arm/v5xx/resource/mve_rsrc_register.c
new file mode 100644
index 000000000000..52d398d434da
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_register.c
@@ -0,0 +1,95 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "mve_rsrc_register.h"
+#include "mve_rsrc_driver.h"
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <asm/io.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#endif
+
+static spinlock_t reg_bank_lock;
+
+void mver_reg_init(void)
+{
+ spin_lock_init(&reg_bank_lock);
+}
+
+void mver_reg_write32(volatile uint32_t *addr, uint32_t value)
+{
+ if (true == mve_rsrc_data.hw_interaction)
+ {
+ writel(value, addr);
+ }
+}
+
+uint32_t mver_reg_read32(volatile uint32_t *addr)
+{
+ if (true == mve_rsrc_data.hw_interaction)
+ {
+ return readl(addr);
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+tCS *mver_reg_get_coresched_bank(void)
+{
+ spin_lock(&reg_bank_lock);
+ return (tCS *)mve_rsrc_data.regs;
+}
+
+void mver_reg_put_coresched_bank(tCS **regs)
+{
+ *regs = NULL;
+ spin_unlock(&reg_bank_lock);
+}
+
+tCS *mver_reg_get_coresched_bank_irq(void)
+{
+ return (tCS *)mve_rsrc_data.regs;
+}
+
+uint32_t mver_reg_get_fuse(void)
+{
+ return mve_rsrc_data.fuse;
+}
+
+uint32_t mver_reg_get_version(void)
+{
+ return mve_rsrc_data.hw_version;
+}
+
+#ifdef UNIT
+
+void mver_driver_set_hw_interaction(bool enable)
+{
+ mve_rsrc_data.hw_interaction = enable;
+}
+EXPORT_SYMBOL(mver_driver_set_hw_interaction);
+
+#endif
+
+EXPORT_SYMBOL(mver_reg_write32);
+EXPORT_SYMBOL(mver_reg_read32);
+EXPORT_SYMBOL(mver_reg_get_coresched_bank);
+EXPORT_SYMBOL(mver_reg_put_coresched_bank);
+EXPORT_SYMBOL(mver_reg_get_fuse);
+EXPORT_SYMBOL(mver_reg_get_version);
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_register.h b/drivers/video/arm/v5xx/resource/mve_rsrc_register.h
new file mode 100644
index 000000000000..34be6d7ea6ee
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_register.h
@@ -0,0 +1,96 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_REGISTER_H
+#define MVE_RSRC_REGISTER_H
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#endif
+
+#define MVE_GORM 0x56500000
+#define MVE_AUDA 0x56550000
+#define MVE_EGIL 0x56610000
+
+typedef volatile uint32_t REG32;
+
+#include <host_interface_v1/mve_coresched_reg.h>
+
+/**
+ * Initializes this module. Must be called before attempting to write to
+ * any registers.
+ */
+void mver_reg_init(void);
+
+/**
+ * Write a 32-bit value to a MVE hardware register.
+ * @param addr Address of the register.
+ * @param value Value to write.
+ */
+void mver_reg_write32(volatile uint32_t *addr, uint32_t value);
+
+/**
+ * Read a 32-bit value from a MVE hardware register.
+ * @param addr Address of the register.
+ * @return Value read from the register.
+ */
+uint32_t mver_reg_read32(volatile uint32_t *addr);
+
+/**
+ * Returns the base address of the core scheduler register bank. Needs
+ * to be called before writing to any registers since it will acquire a
+ * spinlock that prevents other clients from writing at the same time.
+ * @return The register bank address.
+ */
+tCS *mver_reg_get_coresched_bank(void);
+
+/**
+ * This function must be called when a client is done writing to the registers.
+ * Releases the spinlock taken by mve_reg_get_coresched_bank.
+ */
+void mver_reg_put_coresched_bank(tCS **regs);
+
+/**
+ * Returns the base address of the core scheduler register bank. This function
+ * may only be called from IRQ context since it doesn't acquire the spinlock
+ * that prevents concurrent writing/reading of registers.
+ * @return The register bank address.
+ */
+tCS *mver_reg_get_coresched_bank_irq(void);
+
+/**
+ * Returns the fuse value that was cached for mve fuse state.
+ * @return The cached fuse value.
+ */
+uint32_t mver_reg_get_fuse(void);
+
+/**
+ * Returns the hardware version.
+ * @return The hardware version.
+ */
+uint32_t mver_reg_get_version(void);
+
+#ifdef UNIT
+
+/**
+ * Enable/disable all communication with the HW.
+ * @param enable True if communication should be enabled, false if disabled.
+ */
+void mver_driver_set_hw_interaction(bool enable);
+
+#endif
+
+#endif /* MVE_RSRC_REGISTER_H */
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_scheduler.c b/drivers/video/arm/v5xx/resource/mve_rsrc_scheduler.c
new file mode 100644
index 000000000000..a9d0c28c0a35
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_scheduler.c
@@ -0,0 +1,1548 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "mve_rsrc_scheduler.h"
+#include "mve_rsrc_driver.h"
+#include "mve_rsrc_register.h"
+#include "mve_rsrc_irq.h"
+#include "mve_rsrc_mem_frontend.h"
+#include "mve_rsrc_circular_buffer.h"
+#include "mve_rsrc_log.h"
+#include "mve_rsrc_dvfs.h"
+
+#ifdef EMULATOR
+#else
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/sysfs.h>
+#include <linux/semaphore.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#endif
+
+#define MAX_LSID 4 /* Maximum supported LSIDs */
+#define JOBQUEUE_SIZE 4 /* Number of entries in the MVE job queue */
+#define CORESCHED_CORELSID_ENTRY_SIZE 4 /* Number of bits used by each entry in the CORELSID */
+#define CORESCHED_JOBQUEUE_ENTRY_SIZE 8 /* Number of bits used by each entry in the JOBQUEUE */
+
+#define MAX_SESSION_INSTANCES_IN_HW_QUEUE 2
+
+/* When attempting to schedule a new session, if the session is not scheduled
+ * immediately, wait for 15 ms until giving up and returning to the caller. */
+#define UNSCHEDULED_WAIT_MS 15
+
+/* When switching out a session, the driver repeatedly reads the CORELSID
+ * register to find out when a LSID is no longer executed. This define
+ * controls how many times this process shall be iterated before giving up. */
+#ifdef EMULATOR
+#define WAIT_FOR_REBOOT_NTRIES 100000
+#define WAIT_FOR_TERMINATE_NTRIES 100000
+#else
+#define WAIT_FOR_REBOOT_NTRIES 100
+#define WAIT_FOR_TERMINATE_NTRIES 1000
+#endif
+
+/* Constant defining an empty HW job queue */
+#define JOBQUEUE_EMPTY ((JOBQUEUE_JOB_INVALID << 24) | (JOBQUEUE_JOB_INVALID << 16) | (JOBQUEUE_JOB_INVALID << 8) | (JOBQUEUE_JOB_INVALID))
+
+/**
+ * Structure used by the scheduler to keep track of the active sessions.
+ */
+struct mve_session_entry
+{
+ mver_session_id session_id; /**< Base session ID */
+ enum LSID lsid; /**< LSID allocated to the session */
+
+ uint32_t mmu_ctrl; /**< The value to write to the MMU_CTRL register */
+ int ncores; /**< Number of cores this session is allowed to use */
+ bool secure; /**< Session is secure */
+ int enqueues; /**< Number of times this session is enqueued */
+
+ irq_callback_fptr irq_callback; /**< IRQ callback function */
+
+ session_has_work_fptr has_work_callback; /**< Query the session if it can be switched out */
+ session_switchout_fptr switchout_callback; /**< Notify the session that it's about to be switched out */
+ session_switchin_fptr switchin_callback; /**< Notify the session that it's about to be switched in */
+ session_switchout_completed_fptr switchout_complete_callback; /**< Notify session that it has been switched out */
+ session_get_restricting_buffer_count_fptr
+ get_restricting_buffer_count_callback; /**< Get amount of restricting buffers enqueued to the FW */
+
+ struct list_head sessions_list; /**< Sessions linked list */
+ struct semaphore scheduled_sem; /**< Semaphore that signals whether the session is scheduled or not */
+};
+
+/* List of sessions registered with the scheduler */
+static struct list_head sessions;
+
+/* List of sessions that are waiting to be scheduled for execution but are currently
+ * not executing. This is because there are no available LSIDs or hardware job queue entries. */
+static struct mver_circular_buffer *queued_sessions;
+
+/* LSID to session mapping */
+static struct mve_session_entry *session_by_lsid[MAX_LSID] = { NULL };
+
+/* Semaphore used to prevent concurrent access to the scheduler. Note that
+ * this mutex must not be taken while making irq_callback calls. */
+static struct semaphore scheduler_sem;
+
+/* Used by the suspend/resume code to prevent new sessions from being
+ * scheduled when the system is being suspended. */
+static bool scheduling_enabled = true;
+
+/* Indicates when a session couldn't be switched in because all LSIDs are
+ * taken. */
+static bool no_free_lsid = false;
+
+/**
+ * Returns the session instance corresponding to the supplied base session ID.
+ * @param session_id Base ID of the session.
+ * @return The session instance if one exists, NULL otherwise.
+ */
+static struct mve_session_entry *get_session_cache_entry(mver_session_id session_id)
+{
+ struct mve_session_entry *entry = NULL;
+ struct list_head *pos;
+
+ list_for_each(pos, &sessions)
+ {
+ entry = container_of(pos, struct mve_session_entry, sessions_list);
+
+ if (entry->session_id == session_id)
+ {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Disable hardware job scheduling.
+ * @param regs Pointer to register mapping.
+ */
+static void disable_scheduling(tCS *regs)
+{
+ uint32_t val;
+
+ mver_reg_write32(&regs->ENABLE, 0);
+ do
+ {
+ val = mver_reg_read32(&regs->ENABLE);
+ }
+ while (0 != val);
+}
+
+/**
+ * Enable hardware job scheduling.
+ * @param regs Pointer to register mapping.
+ */
+static void enable_scheduling(tCS *regs)
+{
+ wmb();
+ mver_reg_write32(&regs->ENABLE, 1);
+}
+
+/**
+ * Returns the currently executing LSID. Note that this function assumes that
+ * only one LSID can execute at a time which is true for this implementation.
+ * @return The currently executing LSID
+ */
+static enum LSID jobqueue_currently_executing(void)
+{
+ tCS *regs;
+ uint32_t core_lsid;
+ enum LSID lsid = NO_LSID;
+ int ncores, i;
+
+ regs = mver_reg_get_coresched_bank();
+ core_lsid = mver_reg_read32(&regs->CORELSID);
+ mver_reg_put_coresched_bank(&regs);
+
+#ifdef EMULATOR
+ ncores = 8;
+#else
+ ncores = mver_scheduler_get_ncores();
+#endif
+
+ /* Find a LSID that is currently executing */
+ for (i = 0; i < ncores; ++i)
+ {
+ if ((core_lsid & 0xF) != 0xF)
+ {
+ lsid = core_lsid & 0xF;
+ break;
+ }
+
+ core_lsid >>= CORESCHED_CORELSID_ENTRY_SIZE;
+ }
+
+ return lsid;
+}
+
+/**
+ * Check if the supplied session is enqueued in the HW job queue or is currently
+ * executing.
+ * @param session The session to check
+ * @return True if the session can be found in the HW job queue or in the CORELSID.
+ * False if not.
+ */
+static bool session_is_executing(struct mve_session_entry *session)
+{
+ tCS *regs;
+ uint32_t job_queue;
+ uint32_t core_lsid;
+ int i;
+ int ncores;
+
+#ifdef EMULATOR
+ ncores = 8;
+#else
+ ncores = mver_scheduler_get_ncores();
+#endif
+
+ regs = mver_reg_get_coresched_bank();
+ job_queue = mver_reg_read32(&regs->JOBQUEUE);
+ core_lsid = mver_reg_read32(&regs->CORELSID);
+ mver_reg_put_coresched_bank(&regs);
+
+ /* Is the LSID currently executing? */
+ for (i = 0; i < ncores; ++i)
+ {
+ if (((core_lsid >> (i * CORESCHED_CORELSID_ENTRY_SIZE)) & 0xF) == session->lsid)
+ {
+ return true;
+ }
+ }
+
+ /* Is the LSID present in the HW job queue? */
+ for (i = 0; i < JOBQUEUE_SIZE; ++i)
+ {
+ if (((job_queue >> (i * CORESCHED_JOBQUEUE_ENTRY_SIZE)) & 0x0F) == session->lsid)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * Checks whether the session is enqueued to the HW job queue or not.
+ * @param session The session to check.
+ * @return True if the session is enqueued, false if not.
+ */
+static bool jobqueue_is_enqueued(struct mve_session_entry *session)
+{
+ tCS *regs;
+ uint32_t job_queue;
+ int i;
+
+ regs = mver_reg_get_coresched_bank();
+ job_queue = mver_reg_read32(&regs->JOBQUEUE);
+ mver_reg_put_coresched_bank(&regs);
+
+ /* Is the LSID present in the HW job queue? */
+ for (i = 0; i < JOBQUEUE_SIZE; ++i)
+ {
+ if (((job_queue >> (i * CORESCHED_JOBQUEUE_ENTRY_SIZE)) & 0x0F) == session->lsid)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * Check if there are any vacant positions in the HW job queue.
+ * @return True if there is at least one vacant position in the HW job queue.
+ * False otherwise.
+ */
+static bool jobqueue_is_free(void)
+{
+ tCS *regs;
+ uint32_t job_queue;
+ bool is_free;
+
+ regs = mver_reg_get_coresched_bank();
+ job_queue = mver_reg_read32(&regs->JOBQUEUE);
+ mver_reg_put_coresched_bank(&regs);
+
+ is_free = (((job_queue >> 24) & 0xFF) == JOBQUEUE_JOB_INVALID);
+
+ return is_free;
+}
+
+/**
+ * Check whether the job queue is empty or not.
+ * @return True if the job queue is empty, false otherwise.
+ */
+static bool jobqueue_is_empty(void)
+{
+ tCS *regs;
+ uint32_t job_queue;
+
+ regs = mver_reg_get_coresched_bank();
+ job_queue = mver_reg_read32(&regs->JOBQUEUE);
+ mver_reg_put_coresched_bank(&regs);
+
+ return JOBQUEUE_EMPTY == job_queue;
+}
+
+/**
+ * Returns the number of times this LSID can be found in the HW job queue.
+ * @param lsid The lsid
+ * @return The number of times this LSID can be found in the HW job queue
+ */
+static int jobqueue_number_enqueues(enum LSID lsid)
+{
+ tCS *regs;
+ uint32_t job_queue;
+ int nr = 0;
+ int i;
+
+ regs = mver_reg_get_coresched_bank();
+ job_queue = mver_reg_read32(&regs->JOBQUEUE);
+ mver_reg_put_coresched_bank(&regs);
+
+ for (i = 0; i < JOBQUEUE_SIZE; ++i)
+ {
+ if (((job_queue >> (i * CORESCHED_JOBQUEUE_ENTRY_SIZE)) & 0x0F) == lsid)
+ {
+ nr++;
+ }
+ }
+
+ return nr;
+}
+
+/**
+ * Enqueue the session to the HW job queue.
+ * @param session The session to enqueue.
+ * @return True if the session was enqueued to the HW job queue, false otherwise.
+ */
+static bool jobqueue_enqueue_session(struct mve_session_entry *session)
+{
+ tCS *regs;
+ enum LSID lsid;
+ uint32_t job_queue, ncores;
+ unsigned int i;
+ bool job_added = false;
+
+ lsid = session->lsid;
+ ncores = session->ncores;
+
+ regs = mver_reg_get_coresched_bank();
+ disable_scheduling(regs);
+
+ job_queue = mver_reg_read32(&regs->JOBQUEUE);
+
+ for (i = 0; i < JOBQUEUE_SIZE; ++i)
+ {
+ if (((job_queue >> (i * CORESCHED_JOBQUEUE_ENTRY_SIZE)) & 0xFF) == JOBQUEUE_JOB_INVALID)
+ {
+ /* This job queue entry is unused! */
+ job_queue = (job_queue & (~(0xFF << (i * 8)))) | /* Clear all bits for this job entry */
+ (lsid << (i * 8)) | /* Add the LSID number */
+ ((ncores - 1) << (i * 8 + 4)); /* Add the number of cores to use */
+ job_added = true;
+ break;
+ }
+ }
+
+ if (false != job_added)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_DEBUG, "Enqueueing session. session=%p, lsid=%u, jobqueue=0x%08X, corelsid=0x%X.",
+ session->session_id, lsid, job_queue, mver_reg_read32(&regs->CORELSID));
+ mver_reg_write32(&regs->JOBQUEUE, job_queue);
+ }
+
+ enable_scheduling(regs);
+ mver_reg_put_coresched_bank(&regs);
+
+ return job_added;
+}
+
+/**
+ * Remove all occurences of the LSID from the HW job queue. This function packs
+ * the remaining job queue entries in the lower bits of the JOBQUEUE register.
+ * This is needed since the MVE does not skip empty entries in the queue.
+ * Note that this function assumes scheduling has been disabled!
+ * @param regs Pointer to register mapping.
+ * @param lsid The LSID to dequeue.
+ */
+static void jobqueue_dequeue_lsid(tCS *regs, enum LSID lsid)
+{
+ uint32_t job_queue;
+ uint32_t new_job_queue = 0x0F0F0F0F;
+ int i;
+
+ job_queue = mver_reg_read32(&regs->JOBQUEUE);
+ for (i = 0; i < 4; ++i)
+ {
+ uint32_t job = job_queue >> 24;
+ if (lsid != (job & 0x0F))
+ {
+ new_job_queue = (new_job_queue << 8) | job;
+ }
+ job_queue <<= 8;
+ }
+ mver_reg_write32(&regs->JOBQUEUE, new_job_queue);
+}
+
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+#include "mve_rsrc_pm.h"
+
+static ssize_t sysfs_print_registered_sessions(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t num = 0;
+ struct mve_session_entry *entry = NULL;
+ struct list_head *pos;
+ int i = 0;
+
+ num += snprintf(buf, PAGE_SIZE, "Registered sessions\n");
+ list_for_each(pos, &sessions)
+ {
+ entry = container_of(pos, struct mve_session_entry, sessions_list);
+
+ num += snprintf(buf + num, PAGE_SIZE - num, "%d: ID %p\n", i, entry->session_id);
+ i++;
+ }
+
+ return num;
+}
+
+static ssize_t sysfs_print_queued_sessions(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t num = 0;
+ struct mve_session_entry *entry = NULL;
+ int i;
+
+ num += snprintf(buf, PAGE_SIZE, "Queued sessions\n");
+ mver_circular_buffer_for_each(i, entry, struct mve_session_entry *, queued_sessions)
+ {
+ num += snprintf(buf + num, PAGE_SIZE - num, "%d: ID %p\n", i, entry->session_id);
+ }
+
+ return num;
+}
+
+static ssize_t sysfs_print_running_sessions(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t num = 0;
+ struct mve_session_entry *entry = NULL;
+ int i = 0;
+
+ mver_pm_request_resume();
+
+ num += snprintf(buf, PAGE_SIZE, "Running sessions\n");
+ for (i = 0; i < mve_rsrc_data.nlsid; ++i)
+ {
+ entry = session_by_lsid[i];
+
+ if (NULL != entry)
+ {
+ num += snprintf(buf + num, PAGE_SIZE - num, "%d: ID %p State %d enqueues: %d running: %s\n", i,
+ entry->session_id, entry->has_work_callback(entry->session_id),
+ entry->enqueues,
+ session_is_executing(entry) ? "true" : "false");
+ }
+ else
+ {
+ num += snprintf(buf + num, PAGE_SIZE - num, "%d:\n", i);
+ }
+ }
+
+ mver_pm_request_suspend();
+
+ return num;
+}
+
+static ssize_t sysfs_print_hw_registers(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t num = 0;
+ tCS *regs;
+ int i;
+
+ mver_pm_request_resume();
+
+ num += snprintf(buf, PAGE_SIZE, "HW registers\n");
+
+ regs = mver_reg_get_coresched_bank();
+
+ num += snprintf(buf + num, PAGE_SIZE - num, "VERSION 0x%X\n", mver_reg_read32(&regs->VERSION));
+ num += snprintf(buf + num, PAGE_SIZE - num, "ENABLE 0x%X\n", mver_reg_read32(&regs->ENABLE));
+ num += snprintf(buf + num, PAGE_SIZE - num, "NCORES 0x%X\n", mver_reg_read32(&regs->NCORES));
+ num += snprintf(buf + num, PAGE_SIZE - num, "CORELSID 0x%X\n", mver_reg_read32(&regs->CORELSID));
+ num += snprintf(buf + num, PAGE_SIZE - num, "JOBQUEUE 0x%X\n", mver_reg_read32(&regs->JOBQUEUE));
+ num += snprintf(buf + num, PAGE_SIZE - num, "IRQVE 0x%X\n", mver_reg_read32(&regs->IRQVE));
+ num += snprintf(buf + num, PAGE_SIZE - num, "CLKFORCE 0x%X\n", mver_reg_read32(&regs->CLKFORCE));
+
+ for (i = 0; i < mve_rsrc_data.nlsid; ++i)
+ {
+ num += snprintf(buf + num, PAGE_SIZE - num, "\nLSID %d\n", i);
+ num += snprintf(buf + num, PAGE_SIZE - num, " CTRL 0x%X\n", mver_reg_read32(&regs->LSID[i].CTRL));
+ num += snprintf(buf + num, PAGE_SIZE - num, " MMU_CTRL 0x%X\n", mver_reg_read32(&regs->LSID[i].MMU_CTRL));
+ num += snprintf(buf + num, PAGE_SIZE - num, " NPROT 0x%X\n", mver_reg_read32(&regs->LSID[i].NPROT));
+ num += snprintf(buf + num, PAGE_SIZE - num, " ALLOC 0x%X\n", mver_reg_read32(&regs->LSID[i].ALLOC));
+ num += snprintf(buf + num, PAGE_SIZE - num, " SCHED 0x%X\n", mver_reg_read32(&regs->LSID[i].SCHED));
+ num += snprintf(buf + num, PAGE_SIZE - num, " TERMINATE 0x%X\n", mver_reg_read32(&regs->LSID[i].TERMINATE));
+ num += snprintf(buf + num, PAGE_SIZE - num, " IRQVE 0x%X\n", mver_reg_read32(&regs->LSID[i].IRQVE));
+ num += snprintf(buf + num, PAGE_SIZE - num, " IRQHOST 0x%X\n", mver_reg_read32(&regs->LSID[i].IRQHOST));
+ num += snprintf(buf + num, PAGE_SIZE - num, " STREAMID 0x%X\n", mver_reg_read32(&regs->LSID[i].STREAMID));
+ num += snprintf(buf + num, PAGE_SIZE - num, " BUSATTR[0] 0x%X\n", mver_reg_read32(&regs->LSID[i].BUSATTR[0]));
+ num += snprintf(buf + num, PAGE_SIZE - num, " BUSATTR[1] 0x%X\n", mver_reg_read32(&regs->LSID[i].BUSATTR[1]));
+ num += snprintf(buf + num, PAGE_SIZE - num, " BUSATTR[2] 0x%X\n", mver_reg_read32(&regs->LSID[i].BUSATTR[2]));
+ num += snprintf(buf + num, PAGE_SIZE - num, " BUSATTR[3] 0x%X\n", mver_reg_read32(&regs->LSID[i].BUSATTR[3]));
+ num += snprintf(buf + num, PAGE_SIZE - num, "\n");
+ }
+
+ mver_reg_put_coresched_bank(&regs);
+
+ mver_pm_request_suspend();
+
+ return num;
+}
+
+/**
+ * Backup the contents of the session RAM for a specified LSID.
+ * @param session_ram Buffer to store the session RAM in.
+ * @param regs Pointer to register mapping.
+ * @param lsid The LSID of the session to backup.
+ */
+static void session_ram_preserve(uint32_t *session_ram, tCS *regs, int lsid)
+{
+ uint32_t size_in_words = SESSIONRAM_SIZE_PER_LSID / sizeof(uint32_t);
+ uint32_t offset = lsid * size_in_words;
+ uint32_t i;
+
+ for (i = 0; i < size_in_words; i++)
+ {
+ session_ram[i] = mver_reg_read32(&regs->SESSIONRAM[offset + i]);
+ }
+}
+
+static ssize_t sysfs_dump_session_ram(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t num = 0;
+
+ if (0 == list_empty(&sessions))
+ {
+ tCS *regs;
+ char *tmpBuf = MVE_RSRC_MEM_ZALLOC(PAGE_SIZE, GFP_KERNEL);
+
+ mver_pm_request_resume();
+
+ regs = mver_reg_get_coresched_bank();
+ session_ram_preserve((uint32_t *)tmpBuf, regs, 0);
+ mver_reg_put_coresched_bank(&regs);
+
+ /*
+ * Binary dump of 4095 bytes
+ */
+ for (num = 0; num < PAGE_SIZE - 1; num++)
+ {
+ *(buf + num) = tmpBuf[num];
+ }
+
+ mver_pm_request_suspend();
+
+ MVE_RSRC_MEM_FREE(tmpBuf);
+ }
+
+ return num;
+}
+
+#ifdef UNIT
+static ssize_t sysfs_print_cores(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t num = 0;
+
+ num += snprintf(buf, PAGE_SIZE, "%d\n", mve_rsrc_data.ncore);
+ return num;
+}
+
+ssize_t sysfs_set_cores(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t num_cores;
+ uint32_t phys_cores;
+ int ret;
+ tCS *regs;
+
+ /* Convert string to int. */
+ ret = kstrtouint(buf, 10, &num_cores);
+
+ mver_pm_request_resume();
+
+ /* Read number of cores from register. */
+ regs = mver_reg_get_coresched_bank();
+ phys_cores = mver_reg_read32(&regs->NCORES);
+ mver_reg_put_coresched_bank(&regs);
+
+ mver_pm_request_suspend();
+
+ /* If requested number of cores is invalid, then set max number of physical cores. */
+ if (0 != ret || 0 == num_cores || num_cores > phys_cores)
+ {
+ num_cores = phys_cores;
+ }
+
+ /* Remember core count. */
+ mve_rsrc_data.ncore = num_cores;
+
+ return count;
+}
+#endif
+
+static struct device_attribute sysfs_files[] =
+{
+ __ATTR(registered_sessions, S_IRUGO, sysfs_print_registered_sessions, NULL),
+ __ATTR(queued_sessions, S_IRUGO, sysfs_print_queued_sessions, NULL),
+ __ATTR(running_sessions, S_IRUGO, sysfs_print_running_sessions, NULL),
+ __ATTR(hw_regs, S_IRUGO, sysfs_print_hw_registers, NULL),
+ __ATTR(dump_session_ram, S_IRUGO, sysfs_dump_session_ram, NULL),
+#ifdef UNIT
+ __ATTR(num_cores, S_IRUGO | S_IWUSR, sysfs_print_cores, sysfs_set_cores),
+#endif
+};
+
+#endif /* CONFIG_SYSFS && _DEBUG */
+
+/**
+ * Get session status for DVFS purposes.
+ *
+ * This function will be used by DVFS module to retrieve a status of sessions.
+ * DVFS will use this information to control clock frequency of MVE cores.
+ *
+ * @param session_id Session id
+ * @param status Pointer to a structure where to put the status
+ * @return True when session status was retrieved successfully,
+ * False, otherwise
+ */
+static bool get_session_status(mver_session_id session_id,
+ struct mver_dvfs_session_status *status)
+{
+ bool res = false;
+ struct mve_session_entry *entry;
+ int sem_taken;
+ session_get_restricting_buffer_count_fptr get_restricting_buffer_count_callback = NULL;
+
+ sem_taken = down_interruptible(&scheduler_sem);
+
+ entry = get_session_cache_entry(session_id);
+ if (NULL != entry)
+ {
+ get_restricting_buffer_count_callback = entry->get_restricting_buffer_count_callback;
+ }
+
+ if (0 == sem_taken)
+ {
+ up(&scheduler_sem);
+ }
+
+ if (NULL != get_restricting_buffer_count_callback)
+ {
+ status->restricting_buffer_count = get_restricting_buffer_count_callback(session_id);
+ if (status->restricting_buffer_count >= 0)
+ {
+ res = true;
+ }
+ }
+
+ return res;
+}
+
+void mver_scheduler_init(struct device *dev)
+{
+ INIT_LIST_HEAD(&sessions);
+ sema_init(&scheduler_sem, 1);
+ queued_sessions = mver_circular_buffer_create(20);
+
+ mver_dvfs_init(dev, get_session_status);
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+ {
+ int err;
+ int i;
+
+ for (i = 0; i < NELEMS(sysfs_files); ++i)
+ {
+ err = device_create_file(dev, &sysfs_files[i]);
+ if (err < 0)
+ {
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_ERROR, "Unable to create sysfs file.");
+ }
+ }
+ }
+#endif
+}
+
+void mver_scheduler_deinit(struct device *dev)
+{
+ int sem_taken;
+
+ mver_dvfs_deinit(dev);
+ /* Destroy queued sessions. */
+ sem_taken = down_interruptible(&scheduler_sem);
+
+ mver_circular_buffer_destroy(queued_sessions);
+ queued_sessions = NULL;
+
+ if (0 == sem_taken)
+ {
+ up(&scheduler_sem);
+ }
+
+#ifndef EMULATOR
+#if defined(CONFIG_SYSFS) && defined(_DEBUG)
+ {
+ int i;
+
+ for (i = 0; i < NELEMS(sysfs_files); ++i)
+ {
+ device_remove_file(dev, &sysfs_files[i]);
+ }
+ }
+#endif
+#endif
+}
+
+/**
+ * Switch out a running session and unmap the LSID. The session is notified
+ * that it has been switched out.
+ * @param session The session to switch out.
+ */
+static void switch_out_session(struct mve_session_entry *session)
+{
+ tCS *regs;
+ enum LSID lsid = session->lsid;
+ uint32_t val;
+
+ regs = mver_reg_get_coresched_bank();
+
+ val = mver_reg_read32(&regs->LSID[lsid].ALLOC);
+ WARN_ON(val == 0);
+ if (0 != val)
+ {
+ int sem_taken;
+ uint32_t ncore;
+ uint32_t core;
+ uint32_t corelsid_mask;
+ int ntries = WAIT_FOR_REBOOT_NTRIES;
+ enum SCHEDULE_STATE state;
+
+ /* Unallocate the LSID using the MVE register interface */
+ /* Remove all occurences of this sessions LSID from the job queues */
+ disable_scheduling(regs);
+ jobqueue_dequeue_lsid(regs, lsid);
+ enable_scheduling(regs);
+
+ /* Disable job scheduling for this LSID */
+ mver_reg_write32(&regs->LSID[lsid].SCHED, 0);
+
+ ncore = mver_reg_read32(&regs->NCORES);
+
+ /* Make sure all firmware memory writes have completed */
+ do
+ {
+ /* Get the current mask of which core is running what */
+ corelsid_mask = mver_reg_read32(&regs->CORELSID);
+
+ /* Iterate over all cores in the mask and check if they
+ * are running our lsid. */
+ for (core = 0; core < ncore; ++core)
+ {
+ if ((corelsid_mask & 0xF) == lsid)
+ {
+ /* Our lsid is still running */
+ break;
+ }
+ /* Shift to next core */
+ corelsid_mask = corelsid_mask >> 4;
+ }
+ }
+ while (core != ncore && --ntries > 0);
+
+ WARN_ON(ntries == 0);
+
+ mver_reg_write32(&regs->LSID[lsid].ALLOC, 0);
+ session_by_lsid[session->lsid] = NULL;
+ session->lsid = NO_LSID;
+
+ /* The sessions's scheduled_sem must be taken here or the session
+ * will think it is scheduled while in fact it is not. */
+ sem_taken = down_interruptible(&session->scheduled_sem);
+
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_DEBUG, "Switching out session. session=%p, lsid=%u, corelsid=0x%X.",
+ session->session_id, lsid, mver_reg_read32(&regs->CORELSID));
+
+ mver_reg_put_coresched_bank(&regs);
+ session->switchout_complete_callback(session->session_id);
+
+ state = session->has_work_callback(session->session_id);
+ if (SCHEDULE_STATE_SLEEP != state && 0 == session->enqueues)
+ {
+ /* The session has just been switched out but it has more work to do. */
+ mver_circular_buffer_add(queued_sessions, session);
+ session->enqueues++;
+ }
+
+ no_free_lsid = false;
+ }
+ else
+ {
+ mver_reg_put_coresched_bank(&regs);
+ }
+}
+
+/**
+ * Performs all the required HW interaction except enqueuing an entry to the
+ * hardware job queue to switch in a session.
+ * @param session The session to switch in.
+ * @param lsid Logical session ID to use for the session.
+ * @return True if the session was mapped, false otherwise.
+ */
+static bool map_session(struct mve_session_entry *session, enum LSID lsid)
+{
+ tCS *regs;
+ uint32_t val;
+ int ntries;
+ bool ret = true;
+ uint32_t disallow_cores = 0;
+ int ncores;
+
+ session->lsid = lsid;
+ session_by_lsid[lsid] = session;
+
+ regs = mver_reg_get_coresched_bank();
+
+ WARN_ON(mver_reg_read32(&regs->LSID[lsid].ALLOC) != 0);
+
+ if (false == session->secure)
+ {
+ mver_reg_write32(&regs->LSID[lsid].ALLOC, 1);
+ }
+ else
+ {
+ mver_reg_write32(&regs->LSID[lsid].ALLOC, 2);
+ }
+
+ /* Mali-V550: the session RAM is cleared when the ALLOC register is written.
+ * Poll the TERMINATE register to find out when the RAM has been cleared. */
+ mver_reg_write32(&regs->LSID[lsid].TERMINATE, 1);
+ ntries = WAIT_FOR_TERMINATE_NTRIES;
+ do
+ {
+ val = mver_reg_read32(&regs->LSID[lsid].TERMINATE);
+ }
+ while (0 != val && --ntries);
+
+ if (0 != val)
+ {
+ /* Hardware doesn't respond */
+ WARN_ON(true);
+ mver_reg_write32(&regs->LSID[lsid].ALLOC, 0);
+ ret = false;
+ goto out;
+ }
+
+ /* Mask out cores that should not be used. */
+ ncores = mver_scheduler_get_ncores();
+ disallow_cores = (0xFFFFFFFF << ncores) & ((1 << CORESCHED_LSID_CTRL_DISALLOW_SZ) - 1);
+
+ /* Write address of the L1 page */
+ mver_reg_write32(&regs->LSID[lsid].MMU_CTRL, session->mmu_ctrl);
+ mver_reg_write32(&regs->LSID[lsid].CTRL, disallow_cores | (session->ncores << CORESCHED_LSID_CTRL_MAXCORES));
+
+ mver_reg_write32(&regs->LSID[lsid].BUSATTR[0], mve_rsrc_data.port_attributes[0]);
+ mver_reg_write32(&regs->LSID[lsid].BUSATTR[1], mve_rsrc_data.port_attributes[1]);
+ mver_reg_write32(&regs->LSID[lsid].BUSATTR[2], mve_rsrc_data.port_attributes[2]);
+ mver_reg_write32(&regs->LSID[lsid].BUSATTR[3], mve_rsrc_data.port_attributes[3]);
+
+ mver_reg_write32(&regs->LSID[lsid].FLUSH_ALL, 0);
+
+ /* Clear all interrupts */
+ mver_reg_write32(&regs->LSID[lsid].IRQVE, 0);
+ mver_reg_write32(&regs->LSID[lsid].IRQHOST, 0);
+ wmb();
+ mver_reg_write32(&regs->LSID[lsid].SCHED, 1);
+
+out:
+ mver_reg_put_coresched_bank(&regs);
+ return ret;
+}
+
+/**
+ * Returns a free LSID.
+ * @return Free LSID
+ */
+static enum LSID find_free_lsid(void)
+{
+ enum LSID lsid = NO_LSID;
+ int i;
+
+ for (i = 0; i < mve_rsrc_data.nlsid; ++i)
+ {
+ if (NULL == session_by_lsid[i])
+ {
+ lsid = i;
+ break;
+ }
+ }
+
+ return lsid;
+}
+
+/**
+ * Performs everything required to switch in a session. The session is
+ * notified that it's about to be switched in.
+ * @param session The session to switch in.
+ * @param lsid The LSID to schedule the session on.
+ * @return True if this session was scheduled, false if not.
+ */
+static bool switch_in_session(struct mve_session_entry *session)
+{
+ bool queue_free;
+ enum LSID free_lsid;
+ int nr_enqueues;
+ bool res;
+
+ if (false == scheduling_enabled)
+ {
+ return false;
+ }
+
+ queue_free = jobqueue_is_free();
+ free_lsid = find_free_lsid();
+
+ if (false == queue_free)
+ {
+ return false;
+ }
+
+ if (NO_LSID == session->lsid)
+ {
+ bool res;
+
+ if (NO_LSID == free_lsid)
+ {
+ no_free_lsid = true;
+ return false;
+ }
+
+ res = map_session(session, free_lsid);
+ if (false == res)
+ {
+ /* Failed to map session. Try again later */
+ return false;
+ }
+ /* Notify that this session has been scheduled */
+ up(&session->scheduled_sem);
+ }
+
+ nr_enqueues = jobqueue_number_enqueues(session->lsid);
+ if (MAX_SESSION_INSTANCES_IN_HW_QUEUE <= nr_enqueues)
+ {
+ /* Don't let any session occupy more than MAX_SESSION_INSTANCES_IN_HW_QUEUE entries in the HW queue */
+ return false;
+ }
+
+ /* Notify the session that it has been switched in */
+ session->switchin_callback(session->session_id);
+
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_DEBUG, "Switching in session. session=%p, lsid=%u, enqueued=%u.", session->session_id, session->lsid, nr_enqueues);
+
+ res = jobqueue_enqueue_session(session);
+ WARN_ON(false == res);
+
+ return true;
+}
+
+/**
+ * Switch in as many pending sessions as possible.
+ */
+static void switch_in_pending_sessions(void)
+{
+ struct mve_session_entry *session;
+ uint32_t num;
+ bool res;
+ bool queue_empty;
+
+ num = mver_circular_buffer_get_num_entries(queued_sessions);
+ if (0 == num)
+ {
+ /* No pending sessions */
+ return;
+ }
+
+ do
+ {
+ mver_circular_buffer_peek(queued_sessions, (void **)&session);
+ res = switch_in_session(session);
+ if (false != res)
+ {
+ mver_circular_buffer_remove(queued_sessions, (void **)&session);
+ session->enqueues--;
+ mver_irq_signal_mve(session->session_id);
+ }
+ }
+ while (false != res && --num > 0);
+
+ /* Evict the currently executing session if it's idle */
+ queue_empty = jobqueue_is_empty();
+ if (queue_empty == false)
+ {
+ enum LSID lsid;
+
+ lsid = jobqueue_currently_executing();
+ if (lsid != NO_LSID)
+ {
+ enum SCHEDULE_STATE state;
+ struct mve_session_entry *entry;
+
+ entry = session_by_lsid[lsid];
+ state = entry->has_work_callback(entry->session_id);
+ if (SCHEDULE_STATE_IDLE == state)
+ {
+ entry->switchout_callback(entry->session_id, true);
+ }
+ }
+ }
+}
+
+/**
+ * Switch out sessions that are no longer executing and unmap their LSIDs
+ */
+static void switch_out_pending_sessions(void)
+{
+ enum LSID lsid;
+
+ for (lsid = LSID_0; lsid < NELEMS(session_by_lsid); ++lsid)
+ {
+ struct mve_session_entry *entry = session_by_lsid[lsid];
+ if (NULL != entry)
+ {
+ bool res;
+
+ res = session_is_executing(entry);
+ if (false == res)
+ {
+ switch_out_session(entry);
+ }
+ }
+ }
+
+ switch_in_pending_sessions();
+}
+
+/**
+ * Stop a running session and attempt to start a pending session
+ * on the same LSID as the stopped session. If the session is not running,
+ * this function doesn't do anything.
+ * @param session The session to stop.
+ */
+static void stop_session(struct mve_session_entry *session)
+{
+ enum LSID lsid = session->lsid;
+
+ if (NO_LSID != lsid)
+ {
+ tCS *regs;
+ int ignore_me;
+
+ regs = mver_reg_get_coresched_bank();
+ if (0 != mver_reg_read32(&regs->LSID[lsid].ALLOC))
+ {
+ uint32_t val;
+ int ntries;
+
+ /* Remove all occurences of this sessions LSID from the job queues */
+ disable_scheduling(regs);
+ jobqueue_dequeue_lsid(regs, lsid);
+ enable_scheduling(regs);
+
+ mver_reg_write32(&regs->LSID[lsid].SCHED, 0);
+
+ mver_reg_write32(&regs->LSID[lsid].TERMINATE, 1);
+ ntries = WAIT_FOR_TERMINATE_NTRIES;
+ do
+ {
+ val = mver_reg_read32(&regs->LSID[lsid].TERMINATE);
+ }
+ while (0 != val && --ntries);
+ /* Print a warning if hardware was unresponsive */
+ WARN_ON(0 != val);
+
+ mver_reg_write32(&regs->LSID[lsid].ALLOC, 0);
+
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_DEBUG, "Session stopped. Switching out session. session=%p.", session->session_id);
+ }
+
+ session_by_lsid[lsid] = NULL;
+ session->lsid = NO_LSID;
+
+ mver_reg_put_coresched_bank(&regs);
+
+ /* This session is no longer running */
+ ignore_me = down_interruptible(&session->scheduled_sem);
+
+ /* Switch in pending sessions */
+ switch_in_pending_sessions();
+ }
+}
+
+bool mver_scheduler_register_session(mver_session_id session_id,
+ uint32_t mmu_ctrl,
+ int ncores,
+ bool secure,
+ irq_callback_fptr irq_callback,
+ session_has_work_fptr has_work_callback,
+ session_switchout_fptr switchout_callback,
+ session_switchin_fptr switchin_callback,
+ session_switchout_completed_fptr switchout_complete_callback,
+ session_get_restricting_buffer_count_fptr get_restricting_buffer_count_callback)
+{
+ struct mve_session_entry *entry;
+ int sem_taken;
+
+ entry = MVE_RSRC_MEM_ZALLOC(sizeof(struct mve_session_entry), GFP_KERNEL);
+ if (NULL == entry)
+ {
+ return false;
+ }
+
+ entry->session_id = session_id;
+ entry->lsid = NO_LSID;
+ entry->mmu_ctrl = mmu_ctrl;
+ entry->ncores = ncores;
+ entry->secure = secure;
+ entry->enqueues = 0;
+ entry->irq_callback = irq_callback;
+ entry->has_work_callback = has_work_callback;
+ entry->switchout_callback = switchout_callback;
+ entry->switchin_callback = switchin_callback;
+ entry->switchout_complete_callback = switchout_complete_callback;
+ entry->get_restricting_buffer_count_callback = get_restricting_buffer_count_callback;
+
+ INIT_LIST_HEAD(&entry->sessions_list);
+
+ /* Initialize the semaphore to represent an unscheduled session */
+ sema_init(&entry->scheduled_sem, 0);
+
+ sem_taken = down_interruptible(&scheduler_sem);
+ if (0 != sem_taken)
+ {
+ goto error;
+ }
+
+ mver_dvfs_register_session(session_id);
+
+ list_add(&entry->sessions_list, &sessions);
+ up(&scheduler_sem);
+
+ return true;
+
+error:
+ if (NULL != entry)
+ {
+ MVE_RSRC_MEM_FREE(entry);
+ }
+
+ return false;
+}
+
+void mver_scheduler_unregister_session(mver_session_id session_id)
+{
+ struct mve_session_entry *entry;
+ int sem_taken;
+
+ sem_taken = down_interruptible(&scheduler_sem);
+ /* Continue event if the semaphore couldn't be acquired */
+
+ mver_dvfs_unregister_session(session_id);
+ entry = get_session_cache_entry(session_id);
+ if (NULL != entry)
+ {
+ list_del(&entry->sessions_list);
+ MVE_RSRC_MEM_FREE(entry);
+ }
+
+ if (0 == sem_taken)
+ {
+ up(&scheduler_sem);
+ }
+}
+
+/**
+ * Process IRQ for a session.
+ * @param entry The session that has received an IRQ.
+ */
+static void process_session_irq(struct mve_session_entry *entry)
+{
+ enum SCHEDULE_STATE state = SCHEDULE_STATE_IDLE;
+
+ /* Process IRQ */
+ entry->irq_callback(entry->session_id);
+ /* Does the session have any work to perform? */
+ state = entry->has_work_callback(entry->session_id);
+
+ if (SCHEDULE_STATE_IDLE == state)
+ {
+#if SCHEDULER_MODE_IDLE_SWITCHOUT == 1
+ /* Switch out all sessions that report idleness. This is done to allow
+ * the HW to power gate. */
+ entry->switchout_callback(entry->session_id, true);
+#else
+ /* The session is idle and should be switched out if any other
+ * session is enqueued */
+ bool queue_empty = jobqueue_is_empty();
+ if (!queue_empty)
+ {
+ /* There are items in the job queue. Switch out this session! */
+ entry->switchout_callback(entry->session_id, true);
+ }
+#endif
+ }
+ else if (SCHEDULE_STATE_RESCHEDULE == state && 0 == entry->enqueues)
+ {
+ bool is_executing = jobqueue_is_enqueued(entry);
+ if (false == is_executing)
+ {
+ /* Session has more work to do. Schedule it */
+ mver_circular_buffer_add(queued_sessions, entry);
+ entry->enqueues++;
+ }
+ }
+ else if (SCHEDULE_STATE_REQUEST_SWITCHOUT == state)
+ {
+ entry->switchout_callback(entry->session_id, true);
+ }
+}
+
+bool mver_scheduler_execute(mver_session_id session_id)
+{
+ struct mve_session_entry *entry;
+ int sem_taken;
+ bool ret = false;
+
+ sem_taken = down_interruptible(&scheduler_sem);
+
+ if (NULL == queued_sessions)
+ {
+ goto out;
+ }
+
+ if (false == scheduling_enabled)
+ {
+ /* Scheduling has been disabled. Just add the session to the queue
+ * and it'll execute once scheduling is enabled. */
+ entry = get_session_cache_entry(session_id);
+ if (NULL != entry && 0 == entry->enqueues)
+ {
+ mver_circular_buffer_add(queued_sessions, entry);
+ entry->enqueues++;
+ }
+
+ if (0 == sem_taken)
+ {
+ up(&scheduler_sem);
+ }
+
+ return false;
+ }
+
+ entry = get_session_cache_entry(session_id);
+ if (NULL != entry)
+ {
+ /* Make sure all IRQs for the session have been processed */
+ process_session_irq(entry);
+
+ if (0 == entry->enqueues &&
+ (NO_LSID == entry->lsid || false == session_is_executing(entry)))
+ {
+ mver_circular_buffer_add(queued_sessions, entry);
+ entry->enqueues++;
+ }
+
+ if (false != no_free_lsid)
+ {
+ /* No free LSID. Try to free a LSID that is not used */
+ switch_out_pending_sessions();
+ }
+ else
+ {
+ /* Schedule the next session in line */
+ switch_in_pending_sessions();
+ }
+
+ if (NO_LSID == entry->lsid)
+ {
+ /* This session wasn't switched in. Most likely due to
+ * too many concurrent sessions. */
+ int res;
+
+ switch_out_pending_sessions();
+
+ if (0 == sem_taken)
+ {
+ up(&scheduler_sem);
+ }
+
+ /* Don't return just yet. Wait on the scheduled semaphore
+ * with a timeout. This will reduce the number of user<->kernel
+ * space transitions. */
+ res = down_timeout(&entry->scheduled_sem, msecs_to_jiffies(UNSCHEDULED_WAIT_MS));
+ if (0 != res)
+ {
+ /* Failed to schedule the session within the time limit. */
+ ret = false;
+ }
+ else
+ {
+ /* Release the semaphore immediately since we only
+ * use it to detect whether the session has been
+ * scheduled or not. */
+ up(&entry->scheduled_sem);
+ mver_irq_signal_mve(session_id);
+ ret = true;
+ }
+
+ return ret;
+ }
+ else
+ {
+ mver_irq_signal_mve(session_id);
+ ret = true;
+ }
+ }
+
+out:
+ if (0 == sem_taken)
+ {
+ up(&scheduler_sem);
+ }
+
+ return ret;
+}
+
+void mver_scheduler_stop(mver_session_id session_id)
+{
+ struct mve_session_entry *entry;
+ int sem_taken;
+
+ sem_taken = down_interruptible(&scheduler_sem);
+
+ if (NULL == queued_sessions)
+ {
+ goto out;
+ }
+
+ /* Continue even in the case the semaphore wasn't acquired */
+ entry = get_session_cache_entry(session_id);
+ if (NULL != entry)
+ {
+ mver_circular_buffer_remove_all_occurences(queued_sessions, entry);
+ stop_session(entry);
+ }
+
+out:
+ if (0 == sem_taken)
+ {
+ up(&scheduler_sem);
+ }
+}
+
+void mver_scheduler_handle_irq(int lsid)
+{
+ struct mve_session_entry *entry;
+ int sem_taken;
+
+ if (lsid < 0 || lsid >= (int)NELEMS(session_by_lsid))
+ {
+ return;
+ }
+
+ sem_taken = down_interruptible(&scheduler_sem);
+
+ if (NULL == queued_sessions)
+ {
+ goto out;
+ }
+
+ /* Continue even if the semaphore wasn't acquired */
+ entry = session_by_lsid[lsid];
+
+ /* entry may be NULL if the session is stopped before an interrupt
+ * connected to the session is processed. This is not an error, just
+ * skip processing the interrupt. */
+ if (NULL != entry)
+ {
+ WARN_ON(lsid != entry->lsid);
+ process_session_irq(entry);
+ }
+
+ if (false != no_free_lsid)
+ {
+ /* A session was previously prevented from scheduling because
+ * of no free LSIDs. Unmap all sessions that no longer executes
+ * on the HW. */
+ switch_out_pending_sessions();
+ }
+ else
+ {
+ switch_in_pending_sessions();
+ }
+
+out:
+ if (0 == sem_taken)
+ {
+ up(&scheduler_sem);
+ }
+}
+
+void mver_scheduler_flush_tlb(mver_session_id session_id)
+{
+ struct mve_session_entry *entry;
+
+ entry = get_session_cache_entry(session_id);
+ if (NULL != entry)
+ {
+ enum LSID lsid = entry->lsid;
+
+ if (NO_LSID != lsid)
+ {
+ tCS *regs = mver_reg_get_coresched_bank();
+ mver_reg_write32(&regs->LSID[lsid].FLUSH_ALL, 0);
+ mver_reg_put_coresched_bank(&regs);
+ }
+ }
+}
+
+enum LSID mver_scheduler_get_session_lsid(mver_session_id session_id)
+{
+ struct mve_session_entry *entry;
+ enum LSID lsid = NO_LSID;
+
+ entry = get_session_cache_entry(session_id);
+ if (NULL != entry)
+ {
+ lsid = entry->lsid;
+ }
+
+ return lsid;
+}
+
+void mver_scheduler_suspend(void)
+{
+ int sem_taken;
+ int i;
+ bool done = false;
+
+ sem_taken = down_interruptible(&scheduler_sem);
+ scheduling_enabled = false;
+
+ /* Instruct all running sessions to switch out */
+ for (i = 0; i < MAX_LSID; ++i)
+ {
+ struct mve_session_entry *entry;
+
+ entry = session_by_lsid[i];
+ if (NULL != entry)
+ {
+ entry->switchout_callback(entry->session_id, false);
+ }
+ }
+
+ if (0 == sem_taken)
+ {
+ up(&scheduler_sem);
+ }
+
+ /* Wait until all sessions have switched out */
+ while (false == done)
+ {
+ done = true;
+
+ sem_taken = down_interruptible(&scheduler_sem);
+ /* Verify that no sessions are running */
+ for (i = 0; i < mve_rsrc_data.nlsid; ++i)
+ {
+ if (NULL != session_by_lsid[i])
+ {
+ /* This session has not yet switched out. When it no longer
+ * executes, force it to switch out */
+ bool res = session_is_executing(session_by_lsid[i]);
+ if (false == res)
+ {
+ switch_out_session(session_by_lsid[i]);
+ }
+
+ done = false;
+ break;
+ }
+ }
+ if (0 == sem_taken)
+ {
+ up(&scheduler_sem);
+ }
+
+ if (false == done)
+ {
+ msleep(100);
+ }
+ }
+
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_INFO, "All sessions suspended.");
+}
+
+void mver_scheduler_resume(void)
+{
+ /* No need to lock any mutexes now since userspace is frozen */
+ scheduling_enabled = true;
+ no_free_lsid = false;
+
+ /* Resume sessions */
+ switch_in_pending_sessions();
+
+ MVE_LOG_PRINT(&mve_rsrc_log_scheduler, MVE_LOG_INFO, "Sessions resumed.");
+}
+
+int mver_scheduler_get_ncores(void)
+{
+#ifdef EMULATOR
+ return 1;
+#else
+ return mve_rsrc_data.ncore;
+#endif
+}
+
+EXPORT_SYMBOL(mver_scheduler_register_session);
+EXPORT_SYMBOL(mver_scheduler_unregister_session);
+EXPORT_SYMBOL(mver_scheduler_execute);
+EXPORT_SYMBOL(mver_scheduler_stop);
+EXPORT_SYMBOL(mver_scheduler_flush_tlb);
+EXPORT_SYMBOL(mver_scheduler_get_ncores);
diff --git a/drivers/video/arm/v5xx/resource/mve_rsrc_scheduler.h b/drivers/video/arm/v5xx/resource/mve_rsrc_scheduler.h
new file mode 100644
index 000000000000..2534b7fe14f6
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/mve_rsrc_scheduler.h
@@ -0,0 +1,169 @@
+/*
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef MVE_RSRC_SCHEDULER_H
+#define MVE_RSRC_SCHEDULER_H
+
+#ifdef EMULATOR
+#include "emulator_userspace.h"
+#else
+#include <linux/types.h>
+#include <linux/device.h>
+#endif
+
+/* Defines the type used by the scheduler to identify sessions */
+typedef void *mver_session_id;
+
+/* Logical Session ID enumeration */
+enum LSID {NO_LSID = -1, LSID_0, LSID_1, LSID_2, LSID_3};
+
+/**
+ * When a session has received and processed an IRQ, the scheduler queries the
+ * session whether is has any remaining work to do. The session can return any of
+ * the following constants to describe its current work state.
+ */
+enum SCHEDULE_STATE
+{
+ SCHEDULE_STATE_BUSY, /**< The session is currently executing and has work
+ * to do. E.g. processing frames or the session is
+ * waiting for the response of a get/set message. */
+ SCHEDULE_STATE_IDLE, /**< Session is switched in but does not have anything to do */
+ SCHEDULE_STATE_RESCHEDULE, /**< Session is switched out but needs to be rescheduled */
+ SCHEDULE_STATE_SLEEP, /**< Session is switched out and does not have anything to do */
+ SCHEDULE_STATE_REQUEST_SWITCHOUT, /**< Session requests to be switched out. This state should
+ * be used with care. Instead rely on the scheduler to decide
+ * when a session should be switched out. */
+};
+
+/* Session callback functions */
+/* Called when an IRQ is received */
+typedef void (*irq_callback_fptr)(mver_session_id session_id);
+/* Called when the scheduler wants to query whether the session can be switched out */
+typedef enum SCHEDULE_STATE (*session_has_work_fptr)(mver_session_id session_id);
+/* Notify the session to switch out */
+typedef void (*session_switchout_fptr)(mver_session_id session_id, bool require_idleness);
+/* Notify the session that it's about to be switched in */
+typedef void (*session_switchin_fptr)(mver_session_id session_id);
+/* Notify the session that it has been switched out */
+typedef void (*session_switchout_completed_fptr)(mver_session_id session_id);
+/* Retrieve the number of restricting buffers for the session */
+typedef int (*session_get_restricting_buffer_count_fptr)(mver_session_id session_id);
+
+/**
+ * Initialize the scheduler module. Must be invoked before any of the
+ * other functions in this module may be called.
+ * @param dev Device parameter received from the kernel.
+ */
+void mver_scheduler_init(struct device *dev);
+
+/**
+ * Deinitialize the scheduler module. Must be invoked before the driver is
+ * uninstalled.
+ * @param dev Device parameter received from the kernel.
+ */
+void mver_scheduler_deinit(struct device *dev);
+
+/**
+ * Register a session with the scheduler. Call function mver_scheduler_enqueue_session
+ * to notify the scheduler that the session has work to perform.
+ * @param session_id Session identifier.
+ * @param mmu_ctrl L0 page entry.
+ * @param ncores Number of cores to schedule.
+ * @param secure Session is secure.
+ * @param irq_callback Callback function that is invoked when an interrupt
+ * has been received for this session.
+ * @param has_work_callback Callback function that is invoked when the scheduler
+ * wants to know the sessions execution state.
+ * @param switchout_callback Callback function that is invoked when the scheduler
+ * wants the session to prepare for switch out.
+ * @param switchin_callback Callback function that is invoked when the scheduler
+ * wants to notify the session that it has been scheduled.
+ * @param switchout_complete_callback Callback function that is invoked when the scheduler
+ * wants to notify the session that the switchout is complete.
+ * @param get_restricting_buffer_count_callback Callback function that is invoked when the scheduler
+ * wants to retrieve amount of restricting buffers enqueued to the FW.
+ * @return True if the session was successfully registered with the scheduler,
+ * false otherwise.
+ */
+bool mver_scheduler_register_session(mver_session_id session_id,
+ uint32_t mmu_ctrl,
+ int ncores,
+ bool secure,
+ irq_callback_fptr irq_callback,
+ session_has_work_fptr has_work_callback,
+ session_switchout_fptr switchout_callback,
+ session_switchin_fptr switchin_callback,
+ session_switchout_completed_fptr switchout_complete_callback,
+ session_get_restricting_buffer_count_fptr get_output_buffer_count_callback);
+
+/**
+ * Unregister a session with the scheduler.
+ * @param session_id Session identifier of the session to unregister.
+ */
+void mver_scheduler_unregister_session(mver_session_id session_id);
+
+/**
+ * Notify the scheduler that the supplied session has work to perform
+ * and wants to be scheduled for execution on the MVE.
+ * @param session_id Session identifier.
+ * @return True if the session was scheduled for immediate execution. False
+ * if the session is waiting to be scheduled.
+ */
+bool mver_scheduler_execute(mver_session_id session_id);
+
+/**
+ * Stop session for scheduling.
+ * @param session_id Session identifier.
+ */
+void mver_scheduler_stop(mver_session_id session_id);
+
+/**
+ * Called when an interrupt has been received for a LSID.
+ * @param lsid LSID of the session that generated the interrupt.
+ */
+void mver_scheduler_handle_irq(enum LSID lsid);
+
+/**
+ * Call this function to invalidate all cached page mappings in the TLB. This
+ * function must be called when pages have been removed from the page table.
+ * If the supplied session is not scheduled, nothing is done.
+ * @param session_id LSID of the session that shall have its TLB flushed.
+ */
+void mver_scheduler_flush_tlb(mver_session_id session_id);
+
+/**
+ * Returns the LSID of the supplied session.
+ * @param session_id Session identifier.
+ * @return The LSID of the session if the session is mapped, NO_LSID if the
+ * session is unmapped.
+ */
+enum LSID mver_scheduler_get_session_lsid(mver_session_id session_id);
+
+/**
+ * Suspend all running sessions and make sure that no new sessions are scheduled.
+ * Call mver_scheduler_resume to resume scheduling.
+ */
+void mver_scheduler_suspend(void);
+
+/**
+ * Resume session scheduling.
+ */
+void mver_scheduler_resume(void);
+
+/**
+ * Return number of cores supported
+ */
+int mver_scheduler_get_ncores(void);
+
+#endif /* MVE_RSRC_SCHEDULER_H */
diff --git a/drivers/video/arm/v5xx/resource/sconscript b/drivers/video/arm/v5xx/resource/sconscript
new file mode 100644
index 000000000000..599556dab030
--- /dev/null
+++ b/drivers/video/arm/v5xx/resource/sconscript
@@ -0,0 +1,48 @@
+#
+#
+# (C) COPYRIGHT ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+import os
+
+Import( 'driver_env' )
+myEnv = driver_env.Clone()
+
+if( (myEnv['arch'] == 'x86_32') or (myEnv['arch'] == 'x86_64') ):
+ myEnv.Append( CPPPATH = ['#/kernel/drivers/video/arm/v5xx/resource/',
+ '#/kernel/drivers/video/arm/v5xx/external/',
+ '#/emul/emul/emul/',
+ '#/emul/',
+ '#/fw/include',
+ '#kernel/drivers/video/arm/v5xx/resource/machine'])
+
+ myEnv.Append(CPPDEFINES=['MVE_LOG_ALOG_ENABLE'])
+
+ if(myEnv['unit'] == '0'):
+ myEnv.Append(CPPDEFINES=['SCHEDULER_MODE_IDLE_SWITCHOUT=1'])
+ else:
+ myEnv.Append(CPPDEFINES=['SCHEDULER_MODE_IDLE_SWITCHOUT=0'])
+
+ if(myEnv['power_saving_mode'] == 'clock_gating'):
+ myEnv.Append(CPPDEFINES=['DISABLE_DVFS=1'])
+
+ sources = ['mve_rsrc_irq.c', 'mve_rsrc_register.c', 'mve_rsrc_scheduler.c', 'mve_rsrc_mem_frontend.c', 'mve_rsrc_pm.c',
+ 'mve_rsrc_circular_buffer.c', 'mve_rsrc_mem_cache.c', 'mve_rsrc_mem_dma.c', 'mve_rsrc_mem_dma_uncached.c', 'mve_rsrc_log.c',
+ 'mve_rsrc_dvfs.c']
+
+ sMVEDriverLib = myEnv.StaticLibrary(
+ os.path.join( "$STATIC_LIB_PATH" , "mve_rsrc" ),
+ sources
+ )
+
+ myEnv.LibTarget( "mve_rsrc", sMVEDriverLib )
diff --git a/drivers/video/arm/v5xx/sconscript b/drivers/video/arm/v5xx/sconscript
new file mode 100644
index 000000000000..854fd0231dcf
--- /dev/null
+++ b/drivers/video/arm/v5xx/sconscript
@@ -0,0 +1,111 @@
+#
+#
+# (C) COPYRIGHT ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+import os
+import subprocess
+
+Import('target_env')
+driver_env = target_env.Clone()
+Export('driver_env')
+
+if GetOption("clean"):
+ subprocess.call(['make clean'], shell=True)
+
+# need to invert the watchdog
+if driver_env['watchdog'] == '1':
+ driver_env['disable_watchdog'] = '0'
+else:
+ driver_env['disable_watchdog'] = '1'
+
+prints = ""
+if(driver_env['log_ftrace'] == '1'):
+ prints += "CONFIG_MALI_VPU_ENABLE_FTRACE=y "
+if(driver_env['log_print_file'] == '1'):
+ prints += "CONFIG_MALI_VPU_ENABLE_PRINT_FILE=y "
+
+driver_env.BuildRecursive()
+
+if os.path.isfile(os.path.normpath(os.environ['PWD']) + '/kernel/drivers/video/arm/v5xx/base/Makefile'):
+ if((driver_env['arch'] == 'arm_v7' or driver_env['arch'] == 'arm_v8') and os.environ.get('KDIR', '') != ''):
+ platform = ""
+ if (driver_env['hw'] == 'vex6'):
+ platform = "CONFIG_MALI_VPU_VEX6=y"
+ elif (driver_env['hw'] == 'vex7'):
+ platform = "CONFIG_MALI_VPU_VEX7=y"
+ elif (driver_env['hw'] == 'juno'):
+ platform = "CONFIG_MALI_VPU_JUNO=y"
+ else:
+ print "Unsupported platform"
+
+ if (driver_env['arch'] == 'arm_v7'):
+ arch = 'arm'
+ elif (driver_env['arch'] == 'arm_v8'):
+ arch = 'arm64'
+
+ watchdog = ""
+ if (driver_env['disable_watchdog'] == '1'):
+ watchdog = "CONFIG_MALI_VPU_DISABLE_WATCHDOG=y"
+
+ dvfs_sim = ""
+ if (driver_env['dvfs_sim'] == '1' and driver_env['power_saving_mode'] != 'clock_gating'):
+ dvfs_sim = "CONFIG_MALI_VPU_ENABLE_DVFS_SIM=y"
+
+ devicetree = ""
+ if (driver_env['device_tree'] == '1'):
+ devicetree = "CONFIG_MALI_VPU_DEVICE_TREE=y"
+
+ debug = ""
+ if (driver_env['debug'] == '1'):
+ debug = "CONFIG_MALI_VPU_DEBUG=y"
+
+ unit = ""
+ if (driver_env['unit'] == '1'):
+ unit = "CONFIG_MALI_VPU_UNIT=y"
+
+ trackmem = ""
+ if (driver_env['trackmem'] == '1' or driver_env['trackmem'] == '2'):
+ trackmem = "CONFIG_MALI_VPU_TRACKMEM=y"
+
+ resfail_mem = ""
+ if (driver_env['resfail_mem'] == '1'):
+ resfail_mem = "CONFIG_MALI_VPU_RESFAIL=y"
+
+ power_saving_mode = ""
+ if (driver_env['power_saving_mode'] == 'clock_gating'):
+ power_saving_mode = "CONFIG_MALI_VPU_POWER_SAVING_MODE_CLOCK_GATING=y"
+
+ if driver_env.isAndroid():
+ prints += "CONFIG_MALI_VPU_ENABLE_ALOG=y "
+
+ source_dir = 'kernel/drivers/video/arm/v5xx'
+ cmd = driver_env.Command('kernel', [],
+ 'cd ' + source_dir + '; ' +
+ 'CROSS_COMPILE=' + driver_env['tool_prefix'] +
+ ' ARCH=' + arch +
+ ' ' + debug +
+ ' ' + platform +
+ ' ' + devicetree +
+ ' ' + prints +
+ ' ' + watchdog +
+ ' ' + dvfs_sim +
+ ' ' + unit +
+ ' ' + power_saving_mode +
+ ' ' + trackmem +
+ ' ' + resfail_mem +
+ ' CONFIG_MALI_VPU=m' +
+ ' make -C ' + os.path.normpath(os.environ['KDIR']) +
+ ' M=' + os.path.normpath(os.environ['PWD']) + '/' + source_dir + '; cd ../../../../..; cp kernel/drivers/video/arm/v5xx/base/mve_base.ko ' + driver_env['progs_install']
+ + '; cp kernel/drivers/video/arm/v5xx/resource/mve_rsrc.ko ' + driver_env['progs_install'])
+ driver_env.AlwaysBuild(cmd)
diff --git a/linaro/configs/mali.conf b/linaro/configs/mali.conf
index e30168708aa6..65b5d2c6849a 100644
--- a/linaro/configs/mali.conf
+++ b/linaro/configs/mali.conf
@@ -12,3 +12,6 @@ CONFIG_SW_SYNC_USER=y
CONFIG_MALI_MIDGARD=y
CONFIG_MALI_PLATFORM_NAME="devicetree"
CONFIG_ION_JUNO=y
+CONFIG_MALI_VPU=y
+CONFIG_MALI_VPU_JUNO=y
+CONFIG_MALI_VPU_DEVICE_TREE=y