aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2018-07-31 09:05:50 -0700
committerLinux Build Service Account <lnxbuild@localhost>2018-07-31 09:05:50 -0700
commitfcce33a9a5f54c797b184ff347e5eb86aee58d85 (patch)
tree33c74b0018e399b14f3f0a358c1f5fd1bb63eb9e
parent5b756f15d5dc646f5c877ba829ebfedffa777bfc (diff)
parent9e9743e1286ab74c4ea0a8642a23fa0bab93c86a (diff)
Merge 9e9743e1286ab74c4ea0a8642a23fa0bab93c86a on remote branchLA.UM.6.8.r1-06700-SDM710.0
Change-Id: I9909b1ba4b49d16f98e66f0f4894ca82a12fb3d9
-rw-r--r--arch/arm64/boot/dts/qcom/qcs605-lc.dtsi17
-rw-r--r--arch/arm64/boot/dts/qcom/qcs605.dtsi276
-rw-r--r--arch/arm64/boot/dts/qcom/sda670-hdk.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670-camera.dtsi17
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-camera.dtsi17
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi8
-rw-r--r--arch/arm64/configs/sdm670-perf_defconfig2
-rw-r--r--arch/arm64/configs/sdm670_defconfig2
-rw-r--r--drivers/char/adsprpc.c56
-rw-r--r--drivers/char/diag/diag_masks.c13
-rw-r--r--drivers/gpu/drm/msm/sde_rsc_hw.c104
-rw-r--r--drivers/gpu/msm/adreno.c23
-rw-r--r--drivers/gpu/msm/adreno.h6
-rw-r--r--drivers/gpu/msm/adreno_a6xx.c17
-rw-r--r--drivers/gpu/msm/adreno_a6xx_preempt.c56
-rw-r--r--drivers/gpu/msm/kgsl_gmu.c22
-rw-r--r--drivers/gpu/msm/kgsl_gmu.h8
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c19
-rw-r--r--drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c4
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h6
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/hfi.c46
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c227
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h19
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c375
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c56
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c244
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c4
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c3
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c6
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c4
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c14
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h16
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c1
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c6
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c14
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c1
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c6
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c5
-rw-r--r--drivers/media/platform/msm/camera/cam_sync/cam_sync.c11
-rw-r--r--drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c27
-rw-r--r--drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c20
-rw-r--r--drivers/soc/qcom/memshare/msm_memshare.c118
-rw-r--r--drivers/soc/qcom/memshare/msm_memshare.h4
-rw-r--r--include/uapi/media/cam_icp.h3
-rw-r--r--kernel/time/timer.c6
46 files changed, 1419 insertions, 498 deletions
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi
index 240002624644..0101048b925f 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi
@@ -144,53 +144,70 @@
&soc {
qcom,turing@8300000 {
/delete-property/ vdd_cx-supply;
+ vdd_cx-supply = <&pm8005_s1_level>;
};
qcom,lpass@62400000 {
/delete-property/ vdd_cx-supply;
+ vdd_cx-supply = <&pm8005_s1_level>;
};
};
&clock_cpucc {
/delete-property/ vdd_l3_mx_ao-supply;
/delete-property/ vdd_pwrcl_mx_ao-supply;
+ vdd_l3_mx_ao-supply = <&pm660_s2_level_ao>;
+ vdd_pwrcl_mx_ao-supply = <&pm660_s2_level_ao>;
};
&clock_gcc {
/delete-property/ vdd_cx-supply;
/delete-property/ vdd_cx_ao-supply;
+ vdd_cx-supply = <&pm8005_s1_level>;
+ vdd_cx_ao-supply = <&pm8005_s1_level_ao>;
};
&clock_videocc {
/delete-property/ vdd_cx-supply;
+ vdd_cx-supply = <&pm8005_s1_level>;
};
&clock_camcc {
/delete-property/ vdd_mx-supply;
/delete-property/ vdd_cx-supply;
+ vdd_cx-supply = <&pm8005_s1_level>;
+ vdd_mx-supply = <&pm660_s2_level>;
};
&clock_dispcc {
/delete-property/ vdd_cx-supply;
+ vdd_cx-supply = <&pm8005_s1_level>;
};
&clock_gpucc {
/delete-property/ vdd_mx-supply;
/delete-property/ vdd_cx-supply;
+ vdd_cx-supply = <&pm8005_s1_level>;
+ vdd_mx-supply = <&pm660_s2_level>;
};
&pil_modem {
/delete-property/ vdd_mx-supply;
/delete-property/ vdd_cx-supply;
/delete-property/ vdd_mss-supply;
+ vdd_cx-supply = <&pm8005_s1_level>;
+ vdd_mx-supply = <&pm660_s2_level>;
+ vdd_mss-supply = <&pm8005_s1_level>;
};
&clock_gfx {
/delete-property/ vdd_gfx-supply;
+ vdd_gfx-supply = <&pm8005_s2>;
};
&gpu_gx_gdsc {
/delete-property/ parent-supply;
+ parent-supply = <&pm8005_s2>;
};
&mdss_mdp {
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index ffb8525271ca..16ae8de73bf8 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -105,3 +105,279 @@
};
};
};
+
+&msm_gpu {
+ /delete-node/qcom,gpu-mempools;
+ /delete-node/qcom,gpu-pwrlevel-bins;
+
+ qcom,gpu-pwrlevel-bins {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible="qcom,gpu-pwrlevel-bins";
+
+ qcom,gpu-pwrlevels-0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <0>;
+
+ qcom,initial-pwrlevel = <4>;
+ qcom,ca-target-pwrlevel = <5>;
+
+ /* TURBO_L1 */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <780000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <10>;
+ qcom,bus-max = <11>;
+ };
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <750000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM_L1 */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <650000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <565000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <430000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <7>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <355000000>;
+ qcom,bus-freq = <7>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <8>;
+ };
+
+ /* LOW SVS */
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <267000000>;
+ qcom,bus-freq = <6>;
+ qcom,bus-min = <4>;
+ qcom,bus-max = <7>;
+ };
+
+ /* MIN SVS */
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <180000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <4>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@8 {
+ reg = <8>;
+ qcom,gpu-freq = <0>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
+
+ qcom,gpu-pwrlevels-1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <146>;
+
+ qcom,initial-pwrlevel = <3>;
+ qcom,ca-target-pwrlevel = <4>;
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <700000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM_L1 */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <650000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <565000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <430000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <7>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <355000000>;
+ qcom,bus-freq = <7>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <8>;
+ };
+
+ /* LOW SVS */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <267000000>;
+ qcom,bus-freq = <6>;
+ qcom,bus-min = <4>;
+ qcom,bus-max = <7>;
+ };
+
+ /* MIN SVS */
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <180000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <4>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <0>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
+
+ qcom,gpu-pwrlevels-2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <163>;
+
+ qcom,initial-pwrlevel = <4>;
+ qcom,ca-target-pwrlevel = <5>;
+
+ /* TURBO_L1 */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <780000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <10>;
+ qcom,bus-max = <11>;
+ };
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <750000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM_L1 */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <650000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <565000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <430000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <7>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <355000000>;
+ qcom,bus-freq = <7>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <8>;
+ };
+
+ /* LOW SVS */
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <267000000>;
+ qcom,bus-freq = <6>;
+ qcom,bus-min = <4>;
+ qcom,bus-max = <7>;
+ };
+
+ /* MIN SVS */
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <180000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <4>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@8 {
+ reg = <8>;
+ qcom,gpu-freq = <0>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi b/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi
index affa06e71a95..fcffd447881a 100644
--- a/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi
@@ -17,6 +17,10 @@
/delete-property/ qcom,dsi-display-active;
};
+&qrd_batterydata {
+ #include "fg-gen3-batterydata-mlp466076-3250mah.dtsi"
+};
+
&dsi_hx8399_truly_cmd {
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
index 348ba6f138fa..237152de5b0b 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
@@ -338,17 +338,17 @@
iova-mem-region-io {
/* IO region is approximately 3 GB */
iova-region-name = "io";
- iova-region-start = <0xd911000>;
- iova-region-len = <0xd26ef000>;
+ iova-region-start = <0xda00000>;
+ iova-region-len = <0xd2500000>;
iova-region-id = <0x3>;
status = "ok";
};
iova-mem-qdss-region {
- /* qdss region is approximately 64K */
+ /* qdss region is approximately 1MB */
iova-region-name = "qdss";
iova-region-start = <0xd900000>;
- iova-region-len = <0x10000>;
+ iova-region-len = <0x100000>;
iova-region-id = <0x5>;
qdss-phy-addr = <0x16790000>;
status = "ok";
@@ -911,6 +911,9 @@
cam_ipe0: qcom,ipe0 {
cell-index = <0>;
compatible = "qcom,cam-ipe";
+ reg = <0xac87000 0x3000>;
+ reg-names = "ipe0_top";
+ reg-cam-base = <0x87000>;
regulator-names = "ipe0-vdd";
ipe0-vdd-supply = <&ipe_0_gdsc>;
clock-names = "ipe_0_ahb_clk",
@@ -938,6 +941,9 @@
cam_ipe1: qcom,ipe1 {
cell-index = <1>;
compatible = "qcom,cam-ipe";
+ reg = <0xac91000 0x3000>;
+ reg-names = "ipe1_top";
+ reg-cam-base = <0x91000>;
regulator-names = "ipe1-vdd";
ipe1-vdd-supply = <&ipe_1_gdsc>;
clock-names = "ipe_1_ahb_clk",
@@ -965,6 +971,9 @@
cam_bps: qcom,bps {
cell-index = <0>;
compatible = "qcom,cam-bps";
+ reg = <0xac6f000 0x3000>;
+ reg-names = "bps_top";
+ reg-cam-base = <0x6f000>;
regulator-names = "bps-vdd";
bps-vdd-supply = <&bps_gdsc>;
clock-names = "bps_ahb_clk",
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 0a7ac6f8978f..2e445e0a2bb1 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -471,10 +471,10 @@
#size-cells = <2>;
ranges;
- hyp_region: hyp_region@85700000 {
+ hyp_region: hyp_region@85600000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x85700000 0 0x600000>;
+ reg = <0 0x85600000 0 0x800000>;
};
xbl_region: xbl_region@85e00000 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 2e2de74999af..e77dcc357dea 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -311,17 +311,17 @@
iova-mem-region-io {
/* IO region is approximately 3 GB */
iova-region-name = "io";
- iova-region-start = <0xd911000>;
- iova-region-len = <0xd26ef000>;
+ iova-region-start = <0xda00000>;
+ iova-region-len = <0xd2500000>;
iova-region-id = <0x3>;
status = "ok";
};
iova-mem-qdss-region {
- /* qdss region is approximately 64K */
+ /* qdss region is approximately 1MB */
iova-region-name = "qdss";
iova-region-start = <0xd900000>;
- iova-region-len = <0x10000>;
+ iova-region-len = <0x100000>;
iova-region-id = <0x5>;
qdss-phy-addr = <0x16790000>;
status = "ok";
@@ -881,6 +881,9 @@
cam_ipe0: qcom,ipe0 {
cell-index = <0>;
compatible = "qcom,cam-ipe";
+ reg = <0xac87000 0x3000>;
+ reg-names = "ipe0_top";
+ reg-cam-base = <0x87000>;
regulator-names = "ipe0-vdd";
ipe0-vdd-supply = <&ipe_0_gdsc>;
clock-names = "ipe_0_ahb_clk",
@@ -908,6 +911,9 @@
cam_ipe1: qcom,ipe1 {
cell-index = <1>;
compatible = "qcom,cam-ipe";
+ reg = <0xac91000 0x3000>;
+ reg-names = "ipe1_top";
+ reg-cam-base = <0x91000>;
regulator-names = "ipe1-vdd";
ipe1-vdd-supply = <&ipe_1_gdsc>;
clock-names = "ipe_1_ahb_clk",
@@ -935,6 +941,9 @@
cam_bps: qcom,bps {
cell-index = <0>;
compatible = "qcom,cam-bps";
+ reg = <0xac6f000 0x3000>;
+ reg-names = "bps_top";
+ reg-cam-base = <0x6f000>;
regulator-names = "bps-vdd";
bps-vdd-supply = <&bps_gdsc>;
clock-names = "bps_ahb_clk",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
index 97cb9814b65d..c9669d958c2c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
@@ -248,17 +248,17 @@
iova-mem-region-io {
/* IO region is approximately 3 GB */
iova-region-name = "io";
- iova-region-start = <0xd911000>;
- iova-region-len = <0xd26ef000>;
+ iova-region-start = <0xda00000>;
+ iova-region-len = <0xd2500000>;
iova-region-id = <0x3>;
status = "ok";
};
iova-mem-qdss-region {
- /* qdss region is approximately 64K */
+ /* qdss region is approximately 1MB */
iova-region-name = "qdss";
iova-region-start = <0xd900000>;
- iova-region-len = <0x10000>;
+ iova-region-len = <0x100000>;
iova-region-id = <0x5>;
qdss-phy-addr = <0x16790000>;
status = "ok";
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index 4c507f65277a..e590fd752eb6 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -367,6 +367,8 @@ CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_ADV_DEBUG=y
CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_SPECTRA_CAMERA=y
CONFIG_MSM_VIDC_V4L2=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 3e0a04172734..69cc0339c324 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -373,6 +373,8 @@ CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_ADV_DEBUG=y
CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_SPECTRA_CAMERA=y
CONFIG_MSM_VIDC_V4L2=y
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 47d05a6bc6c1..ddd48bc04dab 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1531,9 +1531,18 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
if (map && (map->attr & FASTRPC_ATTR_COHERENT))
continue;
- if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart)
- dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
- uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
+ if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) {
+ if (map && map->handle)
+ msm_ion_do_cache_op(ctx->fl->apps->client,
+ map->handle,
+ uint64_to_ptr(rpra[i].buf.pv),
+ rpra[i].buf.len,
+ ION_IOC_CLEAN_INV_CACHES);
+ else
+ dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
+ uint64_to_ptr(rpra[i].buf.pv
+ + rpra[i].buf.len));
+ }
}
PERF_END);
for (i = bufs; rpra && i < bufs + handles; i++) {
@@ -1542,11 +1551,6 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
}
- if (!ctx->fl->sctx->smmu.coherent) {
- PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
- dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
- PERF_END);
- }
bail:
return err;
}
@@ -1635,14 +1639,33 @@ static void inv_args_pre(struct smq_invoke_ctx *ctx)
if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
buf_page_start(rpra[i].buf.pv))
continue;
- if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
- dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
- (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
+ if (!IS_CACHE_ALIGNED((uintptr_t)
+ uint64_to_ptr(rpra[i].buf.pv))) {
+ if (map && map->handle)
+ msm_ion_do_cache_op(ctx->fl->apps->client,
+ map->handle,
+ uint64_to_ptr(rpra[i].buf.pv),
+ sizeof(uintptr_t),
+ ION_IOC_CLEAN_INV_CACHES);
+ else
+ dmac_flush_range(
+ uint64_to_ptr(rpra[i].buf.pv), (char *)
+ uint64_to_ptr(rpra[i].buf.pv + 1));
+ }
+
end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
rpra[i].buf.len);
- if (!IS_CACHE_ALIGNED(end))
- dmac_flush_range((char *)end,
- (char *)end + 1);
+ if (!IS_CACHE_ALIGNED(end)) {
+ if (map && map->handle)
+ msm_ion_do_cache_op(ctx->fl->apps->client,
+ map->handle,
+ uint64_to_ptr(end),
+ sizeof(uintptr_t),
+ ION_IOC_CLEAN_INV_CACHES);
+ else
+ dmac_flush_range((char *)end,
+ (char *)end + 1);
+ }
}
}
@@ -1651,7 +1674,6 @@ static void inv_args(struct smq_invoke_ctx *ctx)
int i, inbufs, outbufs;
uint32_t sc = ctx->sc;
remote_arg64_t *rpra = ctx->rpra;
- int used = ctx->used;
inbufs = REMOTE_SCALARS_INBUFS(sc);
outbufs = REMOTE_SCALARS_OUTBUFS(sc);
@@ -1682,8 +1704,6 @@ static void inv_args(struct smq_invoke_ctx *ctx)
+ rpra[i].buf.len));
}
- if (rpra)
- dmac_inv_range(rpra, (char *)rpra + used);
}
static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
@@ -2728,6 +2748,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
mutex_unlock(&fl->perf_mutex);
mutex_destroy(&fl->perf_mutex);
mutex_destroy(&fl->fl_map_mutex);
+ mutex_destroy(&fl->map_mutex);
kfree(fl);
return 0;
}
@@ -2741,7 +2762,6 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
pm_qos_remove_request(&fl->pm_qos_req);
if (fl->debugfs_file != NULL)
debugfs_remove(fl->debugfs_file);
- mutex_destroy(&fl->map_mutex);
fastrpc_file_free(fl);
file->private_data = NULL;
}
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index bfbd293f46df..6fb1d35fe8ad 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -184,10 +184,11 @@ static void diag_send_log_mask_update(uint8_t peripheral, int equip_id)
}
mask_info->update_buf = temp;
mask_info->update_buf_len = header_len + mask_size;
+ buf = temp;
}
memcpy(buf, &ctrl_pkt, header_len);
- if (mask_size > 0)
+ if (mask_size > 0 && mask_size <= LOG_MASK_SIZE)
memcpy(buf + header_len, mask->ptr, mask_size);
mutex_unlock(&mask->lock);
@@ -286,9 +287,16 @@ static void diag_send_event_mask_update(uint8_t peripheral)
} else {
mask_info->update_buf = temp;
mask_info->update_buf_len = temp_len;
+ buf = temp;
}
}
- memcpy(buf + sizeof(header), mask_info->ptr, num_bytes);
+ if (num_bytes > 0 && num_bytes < mask_info->mask_len)
+ memcpy(buf + sizeof(header), mask_info->ptr, num_bytes);
+ else {
+ pr_err("diag: num_bytes(%d) is not satisfying length condition\n",
+ num_bytes);
+ goto err;
+ }
write_len += num_bytes;
break;
default:
@@ -404,6 +412,7 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
} else {
mask_info->update_buf = temp;
mask_info->update_buf_len = temp_len;
+ buf = temp;
pr_debug("diag: In %s, successfully reallocated msg_mask update buffer to len: %d\n",
__func__, mask_info->update_buf_len);
}
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index a0d1245354c4..5049758a3ec4 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -99,6 +99,10 @@
#define MAX_CHECK_LOOPS 500
#define POWER_CTRL_BIT_12 12
+#define SDE_RSC_MODE_0_VAL 0
+#define SDE_RSC_MODE_1_VAL 1
+#define MAX_MODE2_ENTRY_TRY 3
+
static void rsc_event_trigger(struct sde_rsc_priv *rsc, uint32_t event_type)
{
struct sde_rsc_event *event;
@@ -401,28 +405,20 @@ static int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc,
if (rc)
pr_err("vdd reg is not enabled yet\n");
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0,
+ 0x3, rsc->debug_mode);
+
rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
return rc;
}
-static int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc)
+static int sde_rsc_mode2_entry_trigger(struct sde_rsc_priv *rsc)
{
int rc;
int count, wrapper_status;
unsigned long reg;
- if (rsc->power_collapse_block)
- return -EINVAL;
-
- rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST);
- if (rc) {
- pr_err("vdd reg fast mode set failed rc:%d\n", rc);
- return rc;
- }
-
- rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_PC);
-
/* update qtimers to high during clk & video mode state */
if ((rsc->current_state == SDE_RSC_VID_STATE) ||
(rsc->current_state == SDE_RSC_CLK_STATE)) {
@@ -467,12 +463,86 @@ static int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc)
usleep_range(10, 100);
}
+ return rc;
+}
+
+static void sde_rsc_reset_mode_0_1(struct sde_rsc_priv *rsc)
+{
+ u32 seq_busy, current_mode, curr_inst_addr;
+
+ seq_busy = dss_reg_r(&rsc->drv_io, SDE_RSCC_SEQ_BUSY_DRV0,
+ rsc->debug_mode);
+ current_mode = dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_STATUS2_DRV0,
+ rsc->debug_mode);
+ curr_inst_addr = dss_reg_r(&rsc->drv_io, SDE_RSCC_SEQ_PROGRAM_COUNTER,
+ rsc->debug_mode);
+ SDE_EVT32(seq_busy, current_mode, curr_inst_addr);
+
+ if (seq_busy && (current_mode == SDE_RSC_MODE_0_VAL ||
+ current_mode == SDE_RSC_MODE_1_VAL)) {
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI,
+ 0xffffff, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
+ 0xffffffff, rsc->debug_mode);
+ /* unstick f1 qtimer */
+ wmb();
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI,
+ 0x0, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
+ 0x0, rsc->debug_mode);
+ /* manually trigger f1 qtimer interrupt */
+ wmb();
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
+ 0xffffff, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
+ 0xffffffff, rsc->debug_mode);
+ /* unstick f0 qtimer */
+ wmb();
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
+ 0x0, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
+ 0x0, rsc->debug_mode);
+ /* manually trigger f0 qtimer interrupt */
+ wmb();
+ }
+}
+
+static int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc)
+{
+ int rc = 0, i;
+
+ if (rsc->power_collapse_block)
+ return -EINVAL;
+
+ rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST);
if (rc) {
- pr_err("mdss gdsc power down failed rc:%d\n", rc);
- SDE_EVT32(rc, SDE_EVTLOG_ERROR);
- goto end;
+ pr_err("vdd reg fast mode set failed rc:%d\n", rc);
+ return rc;
}
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0,
+ 0x7, rsc->debug_mode);
+ rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_PC);
+
+ for (i = 0; i <= MAX_MODE2_ENTRY_TRY; i++) {
+ rc = sde_rsc_mode2_entry_trigger(rsc);
+ if (!rc)
+ break;
+
+ pr_err("try:%d mdss gdsc power down failed rc:%d\n", i, rc);
+ SDE_EVT32(rc, i, SDE_EVTLOG_ERROR);
+
+ /* avoid touching f1 qtimer for last try */
+ if (i != MAX_MODE2_ENTRY_TRY)
+ sde_rsc_reset_mode_0_1(rsc);
+ }
+
+ if (rc)
+ goto end;
+
if ((rsc->current_state == SDE_RSC_VID_STATE) ||
(rsc->current_state == SDE_RSC_CLK_STATE)) {
dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
@@ -546,7 +616,7 @@ static int sde_rsc_state_update(struct sde_rsc_priv *rsc,
reg = dss_reg_r(&rsc->wrapper_io,
SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
- reg &= ~(BIT(8) | BIT(0));
+ reg &= ~BIT(0);
dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
reg, rsc->debug_mode);
/* make sure that solver mode is disabled */
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 9a0d2e6dfcf6..dedeea35e5d1 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -613,6 +613,7 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
struct adreno_irq *irq_params = gpudev->irq;
irqreturn_t ret = IRQ_NONE;
unsigned int status = 0, fence = 0, fence_retries = 0, tmp, int_bit;
+ unsigned int shadow_status = 0;
int i;
atomic_inc(&adreno_dev->pending_irq_refcnt);
@@ -635,18 +636,29 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
* and change the fence back to ALLOW. Poll so that this can happen.
*/
if (kgsl_gmu_isenabled(device)) {
- do {
+ adreno_readreg(adreno_dev,
+ ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
+ &fence);
+
+ while (fence != 0) {
+ /* Wait for small time before trying again */
+ udelay(1);
adreno_readreg(adreno_dev,
ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
&fence);
- if (fence_retries == FENCE_RETRY_MAX) {
+ if (fence_retries == FENCE_RETRY_MAX && fence != 0) {
+ adreno_readreg(adreno_dev,
+ ADRENO_REG_GMU_RBBM_INT_UNMASKED_STATUS,
+ &shadow_status);
+
KGSL_DRV_CRIT_RATELIMIT(device,
- "AHB fence stuck in ISR\n");
- return ret;
+ "AHB fence stuck in ISR: Shadow INT status=%8.8X\n",
+ shadow_status & irq_params->mask);
+ goto done;
}
fence_retries++;
- } while (fence != 0);
+ }
}
adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
@@ -687,6 +699,7 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
int_bit);
+done:
/* Turn off the KEEPALIVE vote from earlier unless hard fault set */
if (gpudev->gpu_keepalive) {
/* If hard fault, then let snapshot turn off the keepalive */
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 9ca22fdc57c2..f1998bb7d96b 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -721,6 +721,7 @@ enum adreno_regs {
ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
ADRENO_REG_GMU_NMI_CONTROL_STATUS,
ADRENO_REG_GMU_CM3_CFG,
+ ADRENO_REG_GMU_RBBM_INT_UNMASKED_STATUS,
ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
ADRENO_REG_REGISTER_MAX,
};
@@ -1860,8 +1861,11 @@ static inline int adreno_perfcntr_active_oob_get(
ret = gpudev->oob_set(adreno_dev, OOB_PERFCNTR_SET_MASK,
OOB_PERFCNTR_CHECK_MASK,
OOB_PERFCNTR_CLEAR_MASK);
- if (ret)
+ if (ret) {
+ adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
+ adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
+ }
}
return ret;
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 99f2a168ecd5..f9b62cd82828 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -1706,6 +1706,10 @@ static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
struct device *dev = &gmu->pdev->dev;
int val;
+ /* Only trigger wakeup sequence if sleep sequence was done earlier */
+ if (!test_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags))
+ return 0;
+
kgsl_gmu_regread(device, A6XX_GPU_CC_GX_DOMAIN_MISC, &val);
if (!(val & 0x1))
dev_err_ratelimited(&gmu->pdev->dev,
@@ -1735,6 +1739,9 @@ static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
+ /* Clear sleep sequence flag as wakeup sequence is successful */
+ clear_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags);
+
/* Enable the power counter because it was disabled before slumber */
kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
@@ -1750,6 +1757,9 @@ static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int ret;
+ if (test_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags))
+ return 0;
+
/* RSC sleep sequence is different on v1 */
if (adreno_is_a630v1(adreno_dev))
kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
@@ -1791,6 +1801,7 @@ static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
+ set_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags);
return 0;
}
@@ -1809,15 +1820,13 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
unsigned int chipid = 0;
switch (boot_state) {
- case GMU_RESET:
- /* fall through */
case GMU_COLD_BOOT:
/* Turn on TCM retention */
kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags))
_load_gmu_rpmh_ucode(device);
- else if (boot_state != GMU_RESET) {
+ else {
ret = a6xx_rpmh_power_on_gpu(device);
if (ret)
return ret;
@@ -3774,6 +3783,8 @@ static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
A6XX_GMU_NMI_CONTROL_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_CM3_CFG,
A6XX_GMU_CM3_CFG),
+ ADRENO_REG_DEFINE(ADRENO_REG_GMU_RBBM_INT_UNMASKED_STATUS,
+ A6XX_GMU_RBBM_INT_UNMASKED_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
A6XX_RBBM_SECVID_TRUST_CNTL),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index 97b0cb2f4478..28d4005fbc52 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -49,8 +49,13 @@ static void _update_wptr(struct adreno_device *adreno_dev, bool reset_timer)
OOB_PREEMPTION_SET_MASK,
OOB_PREEMPTION_CHECK_MASK,
OOB_PREEMPTION_CLEAR_MASK);
- if (status)
+ if (status) {
+ adreno_set_gpu_fault(adreno_dev,
+ ADRENO_GMU_FAULT);
+ adreno_dispatcher_schedule(
+ KGSL_DEVICE(adreno_dev));
return;
+ }
}
}
@@ -227,6 +232,38 @@ static struct adreno_ringbuffer *a6xx_next_ringbuffer(
return NULL;
}
+#define GMU_ACTIVE_STATE_RETRY_MAX 100
+
+static int adreno_gmu_wait_for_active(struct adreno_device *adreno_dev)
+{
+ unsigned int reg, num_retries = 0;
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ if (!kgsl_gmu_isenabled(device))
+ return 0;
+
+ kgsl_gmu_regread(device,
+ A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
+
+ while (reg != GPU_HW_ACTIVE) {
+ /* Wait for small time before trying again */
+ udelay(5);
+ kgsl_gmu_regread(device,
+ A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
+
+ if (num_retries == GMU_ACTIVE_STATE_RETRY_MAX &&
+ reg != GPU_HW_ACTIVE) {
+ dev_err(adreno_dev->dev.dev,
+ "GMU failed to move to ACTIVE state: 0x%x\n",
+ reg);
+ return -ETIMEDOUT;
+ }
+ num_retries++;
+ }
+
+ return 0;
+}
+
void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -356,6 +393,23 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
upper_32_bits(gpuaddr),
FENCE_STATUS_WRITEDROPPED1_MASK);
+ /*
+ * Above fence writes will make sure GMU comes out of
+ * IFPC state if its was in IFPC state but it doesn't
+ * guarantee that GMU FW actually moved to ACTIVE state
+ * i.e. wake-up from IFPC is complete.
+ * Wait for GMU to move to ACTIVE state before triggering
+ * preemption. This is require to make sure CP doesn't
+ * interrupt GMU during wake-up from IFPC.
+ */
+ if (adreno_gmu_wait_for_active(adreno_dev)) {
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+
+ adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+
adreno_dev->next_rb = next;
/* Start the timer to detect a stuck preemption */
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 10446f786363..a7b2e42a5ac5 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1318,7 +1318,7 @@ static int gmu_disable_gdsc(struct gmu_device *gmu)
return -ETIMEDOUT;
}
-static int gmu_suspend(struct kgsl_device *device)
+int gmu_suspend(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
@@ -1404,6 +1404,7 @@ int gmu_start(struct kgsl_device *device)
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct gmu_device *gmu = &device->gmu;
+ unsigned int boot_state = GMU_WARM_BOOT;
switch (device->state) {
case KGSL_STATE_INIT:
@@ -1440,12 +1441,21 @@ int gmu_start(struct kgsl_device *device)
gmu_enable_clks(gmu);
gmu_irq_enable(device);
+ /*
+ * If unrecovered is set that means last
+ * wakeup from SLUMBER state failed. Use GMU
+ * and HFI boot state as COLD as this is a
+ * boot after RESET.
+ */
+ if (gmu->unrecovered)
+ boot_state = GMU_COLD_BOOT;
+
ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
- GMU_WARM_BOOT, 0);
+ boot_state, 0);
if (ret)
goto error_gmu;
- ret = hfi_start(gmu, GMU_WARM_BOOT);
+ ret = hfi_start(gmu, boot_state);
if (ret)
goto error_gmu;
@@ -1461,7 +1471,7 @@ int gmu_start(struct kgsl_device *device)
gmu_irq_enable(device);
ret = gpudev->rpmh_gpu_pwrctrl(
- adreno_dev, GMU_FW_START, GMU_RESET, 0);
+ adreno_dev, GMU_FW_START, GMU_COLD_BOOT, 0);
if (ret)
goto error_gmu;
@@ -1478,7 +1488,7 @@ int gmu_start(struct kgsl_device *device)
hfi_stop(gmu);
ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
- GMU_RESET, 0);
+ GMU_COLD_BOOT, 0);
if (ret)
goto error_gmu;
@@ -1491,6 +1501,8 @@ int gmu_start(struct kgsl_device *device)
break;
}
+ /* Clear unrecovered as GMU start is successful */
+ gmu->unrecovered = false;
return ret;
error_gmu:
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index 19fa9728ca67..130d4004bf77 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -102,6 +102,7 @@ enum gmu_flags {
GMU_HFI_ON = 2,
GMU_FAULT = 3,
GMU_DCVS_REPLAY = 4,
+ GMU_RSCC_SLEEP_SEQ_DONE = 5,
};
/**
@@ -139,13 +140,11 @@ struct rpmh_votes_t {
/*
* These are the different ways the GMU can boot. GMU_WARM_BOOT is waking up
- * from slumber. GMU_COLD_BOOT is booting for the first time. GMU_RESET
- * is a soft reset of the GMU.
+ * from slumber. GMU_COLD_BOOT is booting for the first time.
*/
enum gmu_boot {
GMU_WARM_BOOT = 0,
GMU_COLD_BOOT = 1,
- GMU_RESET = 2
};
enum gmu_load_mode {
@@ -213,6 +212,7 @@ enum gpu_idle_level {
* @ccl: CNOC BW scaling client
* @idle_level: Minimal GPU idle power level
* @fault_count: GMU fault count
+ * @unrecovered: Indicates whether GMU recovery failed or not
*/
struct gmu_device {
unsigned int ver;
@@ -247,6 +247,7 @@ struct gmu_device {
unsigned int ccl;
unsigned int idle_level;
unsigned int fault_count;
+ bool unrecovered;
};
void gmu_snapshot(struct kgsl_device *device);
@@ -256,6 +257,7 @@ void gmu_remove(struct kgsl_device *device);
int allocate_gmu_image(struct gmu_device *gmu, unsigned int size);
int gmu_start(struct kgsl_device *device);
void gmu_stop(struct kgsl_device *device);
+int gmu_suspend(struct kgsl_device *device);
int gmu_dcvs_set(struct gmu_device *gmu, unsigned int gpu_pwrlevel,
unsigned int bus_level);
#endif /* __KGSL_GMU_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 1baf20e38c2d..a4e966bd022d 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -2795,6 +2795,25 @@ _aware(struct kgsl_device *device)
WARN_ONCE(1, "Failed to recover GMU\n");
if (device->snapshot)
device->snapshot->recovered = false;
+ /*
+ * On recovery failure, we are clearing
+ * GMU_FAULT bit and also not keeping
+ * the state as RESET to make sure any
+ * attempt to wake GMU/GPU after this
+ * is treated as a fresh start. But on
+ * recovery failure, GMU HS, clocks and
+ * IRQs are still ON/enabled because of
+ * which next GMU/GPU wakeup results in
+ * multiple warnings from GMU start as HS,
+ * clocks and IRQ were ON while doing a
+ * fresh start i.e. wake from SLUMBER.
+ *
+ * Suspend the GMU on recovery failure
+ * to make sure next attempt to wake up
+ * GMU/GPU is indeed a fresh start.
+ */
+ gmu_suspend(device);
+ gmu->unrecovered = true;
kgsl_pwrctrl_set_state(device, state);
} else {
if (device->snapshot)
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
index a15ccdcf2dcc..d3c39f940f05 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1884,7 +1884,7 @@ int cam_fd_hw_mgr_init(struct device_node *of_node,
}
rc = cam_req_mgr_workq_create("cam_fd_worker", CAM_FD_WORKQ_NUM_TASK,
- &g_fd_hw_mgr.work, CRM_WORKQ_USAGE_IRQ);
+ &g_fd_hw_mgr.work, CRM_WORKQ_USAGE_IRQ, 0);
if (rc) {
CAM_ERR(CAM_FD, "Unable to create a worker, rc=%d", rc);
goto detach_smmu;
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index 2c364e0181e6..4256064dc1bb 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -153,4 +153,10 @@ int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg);
int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
void __iomem *icp_base, bool debug);
+/**
+ * cam_hfi_queue_dump() - utility function to dump hfi queues
+ */
+void cam_hfi_queue_dump(void);
+
+
#endif /* _HFI_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index a0752f596c96..de72e85358d0 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -47,6 +47,49 @@ unsigned int g_icp_mmu_hdl;
static DEFINE_MUTEX(hfi_cmd_q_mutex);
static DEFINE_MUTEX(hfi_msg_q_mutex);
+void cam_hfi_queue_dump(void)
+{
+ struct hfi_qtbl *qtbl;
+ struct hfi_qtbl_hdr *qtbl_hdr;
+ struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr;
+ struct hfi_mem_info *hfi_mem = NULL;
+ uint32_t *read_q, *read_ptr;
+ int i;
+
+ hfi_mem = &g_hfi->map;
+ if (!hfi_mem) {
+ CAM_ERR(CAM_HFI, "Unable to dump queues hfi memory is NULL");
+ return;
+ }
+
+ qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva;
+ qtbl_hdr = &qtbl->q_tbl_hdr;
+ CAM_INFO(CAM_HFI,
+ "qtbl: version = %x size = %u num q = %u qhdr_size = %u",
+ qtbl_hdr->qtbl_version, qtbl_hdr->qtbl_size,
+ qtbl_hdr->qtbl_num_q, qtbl_hdr->qtbl_qhdr_size);
+
+ cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
+ CAM_INFO(CAM_HFI, "cmd: size = %u r_idx = %u w_idx = %u addr = %x",
+ cmd_q_hdr->qhdr_q_size, cmd_q_hdr->qhdr_read_idx,
+ cmd_q_hdr->qhdr_write_idx, hfi_mem->cmd_q.iova);
+ read_q = (uint32_t *)g_hfi->map.cmd_q.kva;
+ read_ptr = (uint32_t *)(read_q + 0);
+ CAM_INFO(CAM_HFI, "CMD Q START");
+ for (i = 0; i < ICP_CMD_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; i++)
+ CAM_INFO(CAM_HFI, "Word: %d Data: 0x%08x ", i, read_ptr[i]);
+
+ msg_q_hdr = &qtbl->q_hdr[Q_MSG];
+ CAM_INFO(CAM_HFI, "msg: size = %u r_idx = %u w_idx = %u addr = %x",
+ msg_q_hdr->qhdr_q_size, msg_q_hdr->qhdr_read_idx,
+ msg_q_hdr->qhdr_write_idx, hfi_mem->msg_q.iova);
+ read_q = (uint32_t *)g_hfi->map.msg_q.kva;
+ read_ptr = (uint32_t *)(read_q + 0);
+ CAM_INFO(CAM_HFI, "MSG Q START");
+ for (i = 0; i < ICP_MSG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; i++)
+ CAM_INFO(CAM_HFI, "Word: %d Data: 0x%08x ", i, read_ptr[i]);
+}
+
int hfi_write_cmd(void *cmd_ptr)
{
uint32_t size_in_words, empty_space, new_write_idx, read_idx, temp;
@@ -92,7 +135,8 @@ int hfi_write_cmd(void *cmd_ptr)
(q->qhdr_q_size - (q->qhdr_write_idx - read_idx)) :
(read_idx - q->qhdr_write_idx);
if (empty_space <= size_in_words) {
- CAM_ERR(CAM_HFI, "failed");
+ CAM_ERR(CAM_HFI, "failed: empty space %u, size_in_words %u",
+ empty_space, size_in_words);
rc = -EIO;
goto err;
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 16e97ea1f813..3c5690d00070 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -1833,7 +1833,7 @@ static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
rc = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG, &read_len);
if (rc) {
- CAM_DBG(CAM_ICP, "Unable to read msg q");
+ CAM_DBG(CAM_ICP, "Unable to read msg q rc %d", rc);
} else {
read_len = read_len << BYTE_WORD_SHIFT;
msg_ptr = (uint32_t *)icp_hw_mgr.msg_buf;
@@ -2323,6 +2323,7 @@ static int cam_icp_mgr_abort_handle(
if (!rem_jiffies) {
rc = -ETIMEDOUT;
CAM_ERR(CAM_ICP, "FW timeout/err in abort handle command");
+ cam_hfi_queue_dump();
}
kfree(abort_cmd);
@@ -2379,6 +2380,7 @@ static int cam_icp_mgr_destroy_handle(
if (icp_hw_mgr.a5_debug_type ==
HFI_DEBUG_MODE_QUEUE)
cam_icp_mgr_process_dbg_buf();
+ cam_hfi_queue_dump();
}
kfree(destroy_cmd);
return rc;
@@ -2680,6 +2682,7 @@ static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr)
if (!rem_jiffies) {
rc = -ETIMEDOUT;
CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+ cam_hfi_queue_dump();
}
CAM_DBG(CAM_ICP, "Done Waiting for INIT DONE Message");
@@ -2858,8 +2861,10 @@ static int cam_icp_mgr_enqueue_config(struct cam_icp_hw_mgr *hw_mgr,
struct hfi_cmd_work_data *task_data;
struct hfi_cmd_ipebps_async *hfi_cmd;
struct cam_hw_update_entry *hw_update_entries;
+ struct icp_frame_info *frame_info = NULL;
- request_id = *(uint64_t *)config_args->priv;
+ frame_info = (struct icp_frame_info *)config_args->priv;
+ request_id = frame_info->request_id;
hw_update_entries = config_args->hw_update_entries;
CAM_DBG(CAM_ICP, "req_id = %lld %pK", request_id, config_args->priv);
@@ -2881,6 +2886,82 @@ static int cam_icp_mgr_enqueue_config(struct cam_icp_hw_mgr *hw_mgr,
return rc;
}
+static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
+ uint32_t io_buf_addr)
+{
+ int rc = 0;
+ struct hfi_cmd_work_data *task_data;
+ struct hfi_cmd_ipebps_async ioconfig_cmd;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
+ struct crm_workq_task *task;
+ uint32_t size_in_words;
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
+
+ ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
+ ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
+ else
+ ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
+
+ reinit_completion(&ctx_data->wait_complete);
+
+ ioconfig_cmd.num_fw_handles = 1;
+ ioconfig_cmd.fw_handles[0] = ctx_data->fw_handle;
+ ioconfig_cmd.payload.indirect = io_buf_addr;
+ ioconfig_cmd.user_data1 = (uint64_t)ctx_data;
+ ioconfig_cmd.user_data2 = (uint64_t)0x0;
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)&ioconfig_cmd;
+ task_data->request_id = 0;
+ task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+ task->process_cb = cam_icp_mgr_process_cmd;
+ size_in_words = (*(uint32_t *)task_data->data) >> 2;
+ CAM_INFO(CAM_ICP, "size_in_words %u", size_in_words);
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
+
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+ cam_hfi_queue_dump();
+ }
+
+ return rc;
+}
+
+static int cam_icp_mgr_send_recfg_io(struct cam_icp_hw_ctx_data *ctx_data,
+ struct hfi_cmd_ipebps_async *ioconfig_cmd, uint64_t req_id)
+{
+ int rc = 0;
+ struct hfi_cmd_work_data *task_data;
+ struct crm_workq_task *task;
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
+
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)ioconfig_cmd;
+ task_data->request_id = req_id;
+ task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_icp_mgr_process_cmd;
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
{
int rc = 0;
@@ -2889,6 +2970,7 @@ static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
struct cam_hw_config_args *config_args = config_hw_args;
struct cam_icp_hw_ctx_data *ctx_data = NULL;
+ struct icp_frame_info *frame_info = NULL;
if (!hw_mgr || !config_args) {
CAM_ERR(CAM_ICP, "Invalid arguments %pK %pK",
@@ -2912,11 +2994,23 @@ static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
return -EINVAL;
}
- req_id = *(uint64_t *)config_args->priv;
+ frame_info = (struct icp_frame_info *)config_args->priv;
+ req_id = frame_info->request_id;
idx = cam_icp_clk_idx_from_req_id(ctx_data, req_id);
ctx_data->hfi_frame_process.fw_process_flag[idx] = true;
cam_icp_mgr_ipe_bps_clk_update(hw_mgr, ctx_data, idx);
+ CAM_DBG(CAM_ICP, "req_id %llu, io config %llu", req_id,
+ frame_info->io_config);
+
+ if (frame_info->io_config != 0) {
+ CAM_INFO(CAM_ICP, "Send recfg io");
+ rc = cam_icp_mgr_send_recfg_io(ctx_data,
+ &frame_info->hfi_cfg_io_cmd, req_id);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Fail to send reconfig io cmd");
+ }
+
rc = cam_icp_mgr_enqueue_config(hw_mgr, config_args);
if (rc)
goto config_err;
@@ -3147,7 +3241,9 @@ static int cam_icp_packet_generic_blob_handler(void *user_data,
struct icp_cmd_generic_blob *blob;
struct cam_icp_hw_ctx_data *ctx_data;
uint32_t index;
+ size_t io_buf_size;
int rc = 0;
+ uint64_t pResource;
if (!blob_data || (blob_size == 0)) {
CAM_ERR(CAM_ICP, "Invalid blob info %pK %d", blob_data,
@@ -3176,6 +3272,28 @@ static int cam_icp_packet_generic_blob_handler(void *user_data,
clk_info->compressed_bw);
break;
+ case CAM_ICP_CMD_GENERIC_BLOB_CFG_IO:
+ CAM_DBG(CAM_ICP, "CAM_ICP_CMD_GENERIC_BLOB_CFG_IO");
+ pResource = *((uint32_t *)blob_data);
+ if (copy_from_user(&ctx_data->icp_dev_io_info,
+ (void __user *)pResource,
+ sizeof(struct cam_icp_acquire_dev_info))) {
+ CAM_ERR(CAM_ICP, "Failed in copy from user");
+ return -EFAULT;
+ }
+ CAM_DBG(CAM_ICP, "buf handle %d",
+ ctx_data->icp_dev_io_info.io_config_cmd_handle);
+ rc = cam_mem_get_io_buf(
+ ctx_data->icp_dev_io_info.io_config_cmd_handle,
+ icp_hw_mgr.iommu_hdl,
+ blob->io_buf_addr, &io_buf_size);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed in blob update");
+ else
+ CAM_DBG(CAM_ICP, "io buf addr %llu",
+ *blob->io_buf_addr);
+ break;
+
default:
CAM_WARN(CAM_ICP, "Invalid blob type %d", blob_type);
break;
@@ -3186,7 +3304,8 @@ static int cam_icp_packet_generic_blob_handler(void *user_data,
static int cam_icp_process_generic_cmd_buffer(
struct cam_packet *packet,
struct cam_icp_hw_ctx_data *ctx_data,
- int32_t index)
+ int32_t index,
+ uint64_t *io_buf_addr)
{
int i, rc = 0;
struct cam_cmd_buf_desc *cmd_desc = NULL;
@@ -3194,6 +3313,7 @@ static int cam_icp_process_generic_cmd_buffer(
cmd_generic_blob.ctx = ctx_data;
cmd_generic_blob.frame_info_idx = index;
+ cmd_generic_blob.io_buf_addr = io_buf_addr;
cmd_desc = (struct cam_cmd_buf_desc *)
((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
@@ -3213,6 +3333,28 @@ static int cam_icp_process_generic_cmd_buffer(
return rc;
}
+static int cam_icp_mgr_process_cfg_io_cmd(
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct hfi_cmd_ipebps_async *ioconfig_cmd,
+ uint64_t request_id,
+ uint64_t io_config)
+{
+ ioconfig_cmd->size = sizeof(struct hfi_cmd_ipebps_async);
+ ioconfig_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ ioconfig_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
+ else
+ ioconfig_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
+
+ ioconfig_cmd->num_fw_handles = 1;
+ ioconfig_cmd->fw_handles[0] = ctx_data->fw_handle;
+ ioconfig_cmd->payload.indirect = io_config;
+ ioconfig_cmd->user_data1 = (uint64_t)ctx_data;
+ ioconfig_cmd->user_data2 = request_id;
+
+ return 0;
+}
+
static int cam_icp_mgr_update_hfi_frame_process(
struct cam_icp_hw_ctx_data *ctx_data,
struct cam_packet *packet,
@@ -3220,6 +3362,7 @@ static int cam_icp_mgr_update_hfi_frame_process(
int32_t *idx)
{
int32_t index, rc;
+ struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
index = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
ctx_data->hfi_frame_process.bits);
@@ -3231,15 +3374,27 @@ static int cam_icp_mgr_update_hfi_frame_process(
ctx_data->hfi_frame_process.request_id[index] =
packet->header.request_id;
- rc = cam_icp_process_generic_cmd_buffer(packet, ctx_data, index);
+ ctx_data->hfi_frame_process.frame_info[index].request_id =
+ packet->header.request_id;
+ ctx_data->hfi_frame_process.frame_info[index].io_config = 0;
+ rc = cam_icp_process_generic_cmd_buffer(packet, ctx_data, index,
+ &ctx_data->hfi_frame_process.frame_info[index].io_config);
if (rc) {
clear_bit(index, ctx_data->hfi_frame_process.bitmap);
ctx_data->hfi_frame_process.request_id[index] = -1;
return rc;
}
+
+ if (ctx_data->hfi_frame_process.frame_info[index].io_config) {
+ hfi_cmd = (struct hfi_cmd_ipebps_async *)&ctx_data->
+ hfi_frame_process.frame_info[index].hfi_cfg_io_cmd;
+ rc = cam_icp_mgr_process_cfg_io_cmd(ctx_data, hfi_cmd,
+ packet->header.request_id, ctx_data->
+ hfi_frame_process.frame_info[index].io_config);
+ }
*idx = index;
- return 0;
+ return rc;
}
static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
@@ -3320,7 +3475,7 @@ static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
prepare_args->num_hw_update_entries = 1;
prepare_args->hw_update_entries[0].addr = (uint64_t)hfi_cmd;
- prepare_args->priv = &ctx_data->hfi_frame_process.request_id[idx];
+ prepare_args->priv = &ctx_data->hfi_frame_process.frame_info[idx];
CAM_DBG(CAM_ICP, "X: req id = %lld ctx_id = %u",
packet->header.request_id, ctx_data->ctx_id);
@@ -3584,53 +3739,6 @@ static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
return rc;
}
-static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
- uint32_t io_buf_addr)
-{
- int rc = 0;
- struct hfi_cmd_work_data *task_data;
- struct hfi_cmd_ipebps_async ioconfig_cmd;
- unsigned long rem_jiffies;
- int timeout = 5000;
- struct crm_workq_task *task;
-
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- if (!task)
- return -ENOMEM;
-
- ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
- ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
- if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
- ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
- else
- ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
-
- reinit_completion(&ctx_data->wait_complete);
- ioconfig_cmd.num_fw_handles = 1;
- ioconfig_cmd.fw_handles[0] = ctx_data->fw_handle;
- ioconfig_cmd.payload.indirect = io_buf_addr;
- ioconfig_cmd.user_data1 = (uint64_t)ctx_data;
- ioconfig_cmd.user_data2 = (uint64_t)0x0;
- task_data = (struct hfi_cmd_work_data *)task->payload;
- task_data->data = (void *)&ioconfig_cmd;
- task_data->request_id = 0;
- task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
- task->process_cb = cam_icp_mgr_process_cmd;
- rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
- CRM_TASK_PRIORITY_0);
- if (rc)
- return rc;
-
- rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
- msecs_to_jiffies((timeout)));
- if (!rem_jiffies) {
- rc = -ETIMEDOUT;
- CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
- }
-
- return rc;
-}
-
static int cam_icp_mgr_create_handle(uint32_t dev_type,
struct cam_icp_hw_ctx_data *ctx_data)
{
@@ -3665,6 +3773,7 @@ static int cam_icp_mgr_create_handle(uint32_t dev_type,
if (!rem_jiffies) {
rc = -ETIMEDOUT;
CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+ cam_hfi_queue_dump();
}
if (ctx_data->fw_handle == 0) {
@@ -3710,6 +3819,7 @@ static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data)
if (!rem_jiffies) {
rc = -ETIMEDOUT;
CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+ cam_hfi_queue_dump();
}
return rc;
@@ -3843,6 +3953,8 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
icp_dev_acquire_info = ctx_data->icp_dev_acquire_info;
+ CAM_DBG(CAM_ICP, "acquire io buf handle %d",
+ icp_dev_acquire_info->io_config_cmd_handle);
rc = cam_mem_get_io_buf(
icp_dev_acquire_info->io_config_cmd_handle,
hw_mgr->iommu_hdl,
@@ -4134,21 +4246,22 @@ static int cam_icp_mgr_create_wq(void)
int i;
rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
- &icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ);
+ &icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ,
+ 0);
if (rc) {
CAM_ERR(CAM_ICP, "unable to create a command worker");
goto cmd_work_failed;
}
rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
- &icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ);
+ &icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ, 0);
if (rc) {
CAM_ERR(CAM_ICP, "unable to create a message worker");
goto msg_work_failed;
}
rc = cam_req_mgr_workq_create("icp_timer_queue", ICP_WORKQ_NUM_TASK,
- &icp_hw_mgr.timer_work, CRM_WORKQ_USAGE_IRQ);
+ &icp_hw_mgr.timer_work, CRM_WORKQ_USAGE_IRQ, 0);
if (rc) {
CAM_ERR(CAM_ICP, "unable to create a timer worker");
goto timer_work_failed;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index 8746ee264f31..3e3c0e08e187 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -126,6 +126,19 @@ struct clk_work_data {
};
/**
+ * struct icp_frame_info
+ * @request_id: request id
+ * @io_config: the address of io config
+ * @hfi_cfg_io_cmd: command struct to be sent to hfi
+ */
+struct icp_frame_info {
+ uint64_t request_id;
+ uint64_t io_config;
+ struct hfi_cmd_ipebps_async hfi_cfg_io_cmd;
+};
+
+
+/**
* struct hfi_frame_process_info
* @hfi_frame_cmd: Frame process command info
* @bitmap: Bitmap for hfi_frame_cmd
@@ -136,6 +149,7 @@ struct clk_work_data {
* @out_resource: Out sync info
* @fw_process_flag: Frame process flag
* @clk_info: Clock information for a request
+ * @frame_info: information needed to process request
*/
struct hfi_frame_process_info {
struct hfi_cmd_ipebps_async hfi_frame_cmd[CAM_FRAME_CMD_MAX];
@@ -149,6 +163,7 @@ struct hfi_frame_process_info {
uint32_t in_free_resource[CAM_FRAME_CMD_MAX];
uint32_t fw_process_flag[CAM_FRAME_CMD_MAX];
struct cam_icp_clk_bw_request clk_info[CAM_FRAME_CMD_MAX];
+ struct icp_frame_info frame_info[CAM_FRAME_CMD_MAX];
};
/**
@@ -189,6 +204,7 @@ struct cam_ctx_clk_info {
* @clk_info: Current clock info of a context
* @watch_dog: watchdog timer handle
* @watch_dog_reset_counter: Counter for watch dog reset
+ * @icp_dev_io_info: io config resource
*/
struct cam_icp_hw_ctx_data {
void *context_priv;
@@ -208,16 +224,19 @@ struct cam_icp_hw_ctx_data {
struct cam_ctx_clk_info clk_info;
struct cam_req_mgr_timer *watch_dog;
uint32_t watch_dog_reset_counter;
+ struct cam_icp_acquire_dev_info icp_dev_io_info;
};
/**
* struct icp_cmd_generic_blob
* @ctx: Current context info
* @frame_info_idx: Index used for frame process info
+ * @io_buf_addr: pointer to io buffer address
*/
struct icp_cmd_generic_blob {
struct cam_icp_hw_ctx_data *ctx;
uint32_t frame_info_idx;
+ uint64_t *io_buf_addr;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index c1aa501e229d..e5c54d6a85ed 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -225,6 +225,8 @@ static int cam_ife_hw_mgr_start_hw_res(
CAM_ERR(CAM_ISP, "Can not start HW resources");
goto err;
}
+ CAM_DBG(CAM_ISP, "Start HW %d Res %d", hw_intf->hw_idx,
+ isp_hw_res->hw_res[i]->res_id);
} else {
CAM_ERR(CAM_ISP, "function null");
goto err;
@@ -366,7 +368,8 @@ static int cam_ife_mgr_csid_stop_hw(
isp_res = hw_mgr_res->hw_res[i];
if (isp_res->hw_intf->hw_idx != base_idx)
continue;
-
+ CAM_DBG(CAM_ISP, "base_idx %d res_id %d cnt %u",
+ base_idx, isp_res->res_id, cnt);
stop_res[cnt] = isp_res;
cnt++;
}
@@ -483,8 +486,8 @@ static void cam_ife_mgr_add_base_info(
"Add split id = %d for base idx = %d num_base=%d",
split_id, base_idx, ctx->num_base);
} else {
- /*Check if base index is alreay exist in the list */
- for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+ /*Check if base index already exists in the list */
+ for (i = 0; i < ctx->num_base; i++) {
if (ctx->base[i].idx == base_idx) {
if (split_id != CAM_ISP_HW_SPLIT_MAX &&
ctx->base[i].split_id ==
@@ -495,7 +498,7 @@ static void cam_ife_mgr_add_base_info(
}
}
- if (i == CAM_IFE_HW_NUM_MAX) {
+ if (i == ctx->num_base) {
ctx->base[ctx->num_base].split_id = split_id;
ctx->base[ctx->num_base].idx = base_idx;
ctx->num_base++;
@@ -845,7 +848,8 @@ static int cam_ife_hw_mgr_acquire_res_ife_src(
}
ife_src_res->hw_res[i] = vfe_acquire.vfe_in.rsrc_node;
CAM_DBG(CAM_ISP,
- "acquire success res type :0x%x res id:0x%x",
+ "acquire success IFE:%d res type :0x%x res id:0x%x",
+ hw_intf->hw_idx,
ife_src_res->hw_res[i]->res_type,
ife_src_res->hw_res[i]->res_id);
@@ -871,29 +875,75 @@ err:
static int cam_ife_mgr_acquire_cid_res(
struct cam_ife_hw_mgr_ctx *ife_ctx,
struct cam_isp_in_port_info *in_port,
- uint32_t *cid_res_id,
+ struct cam_ife_hw_mgr_res **cid_res,
enum cam_ife_pix_path_res_id csid_path)
{
int rc = -1;
int i, j;
struct cam_ife_hw_mgr *ife_hw_mgr;
- struct cam_ife_hw_mgr_res *cid_res;
struct cam_hw_intf *hw_intf;
+ struct cam_ife_hw_mgr_res *cid_res_temp, *cid_res_iterator;
struct cam_csid_hw_reserve_resource_args csid_acquire;
+ uint32_t acquired_cnt = 0;
ife_hw_mgr = ife_ctx->hw_mgr;
+ *cid_res = NULL;
- rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &cid_res);
+ rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, cid_res);
if (rc) {
CAM_ERR(CAM_ISP, "No more free hw mgr resource");
- goto err;
+ goto end;
}
- cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, &cid_res);
+
+ cid_res_temp = *cid_res;
csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
csid_acquire.in_port = in_port;
csid_acquire.res_id = csid_path;
+ CAM_DBG(CAM_ISP, "path %d", csid_path);
+
+ /* Try acquiring CID resource from previously acquired HW */
+ list_for_each_entry(cid_res_iterator, &ife_ctx->res_list_ife_cid,
+ list) {
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!cid_res_iterator->hw_res[i])
+ continue;
+ hw_intf = cid_res_iterator->hw_res[i]->hw_intf;
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+ &csid_acquire, sizeof(csid_acquire));
+ if (rc) {
+ CAM_DBG(CAM_ISP,
+ "No ife cid resource from hw %d",
+ hw_intf->hw_idx);
+ continue;
+ }
+
+ cid_res_temp->hw_res[acquired_cnt++] =
+ csid_acquire.node_res;
+
+ CAM_DBG(CAM_ISP,
+ "acquired csid(%s)=%d CID rsrc successfully",
+ (i == 0) ? "left" : "right",
+ hw_intf->hw_idx);
+
+ if (in_port->usage_type && acquired_cnt == 1 &&
+ csid_path == CAM_IFE_PIX_PATH_RES_IPP)
+ /* Continue to acquire Right */
+ continue;
+
+ if (acquired_cnt)
+ /*
+ * If successfully acquired CID from
+ * previously acquired HW, skip the next
+ * part
+ */
+ goto acquire_successful;
+ }
+ }
+
+ /* Acquire Left if not already acquired */
for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
if (!ife_hw_mgr->csid_devices[i])
continue;
@@ -903,31 +953,45 @@ static int cam_ife_mgr_acquire_cid_res(
sizeof(csid_acquire));
if (rc)
continue;
- else
+ else {
+ cid_res_temp->hw_res[acquired_cnt++] =
+ csid_acquire.node_res;
break;
+ }
}
if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
- CAM_ERR(CAM_ISP, "Can not acquire ife csid rdi resource");
- goto err;
+ CAM_ERR(CAM_ISP, "Can not acquire ife cid resource for path %d",
+ csid_path);
+ goto put_res;
}
- cid_res->res_type = CAM_IFE_HW_MGR_RES_CID;
- cid_res->res_id = csid_acquire.node_res->res_id;
- cid_res->is_dual_vfe = in_port->usage_type;
- cid_res->hw_res[0] = csid_acquire.node_res;
- cid_res->hw_res[1] = NULL;
+acquire_successful:
+ CAM_DBG(CAM_ISP, "CID left acquired success is_dual %d",
+ in_port->usage_type);
+
+ cid_res_temp->res_type = CAM_IFE_HW_MGR_RES_CID;
/* CID(DT_ID) value of acquire device, require for path */
- *cid_res_id = csid_acquire.node_res->res_id;
+ cid_res_temp->res_id = csid_acquire.node_res->res_id;
+ cid_res_temp->is_dual_vfe = in_port->usage_type;
+ cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, cid_res);
- if (cid_res->is_dual_vfe) {
+ /*
+ * Acquire Right if not already acquired.
+ * Dual IFE for RDI is not currently supported.
+ */
+ if (cid_res_temp->is_dual_vfe && csid_path
+ == CAM_IFE_PIX_PATH_RES_IPP && acquired_cnt == 1) {
csid_acquire.node_res = NULL;
csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
csid_acquire.in_port = in_port;
- for (j = i + 1; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
+ for (j = 0; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
if (!ife_hw_mgr->csid_devices[j])
continue;
+ if (j == cid_res_temp->hw_res[0]->hw_intf->hw_idx)
+ continue;
+
hw_intf = ife_hw_mgr->csid_devices[j];
rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
&csid_acquire, sizeof(csid_acquire));
@@ -940,16 +1004,20 @@ static int cam_ife_mgr_acquire_cid_res(
if (j == CAM_IFE_CSID_HW_NUM_MAX) {
CAM_ERR(CAM_ISP,
"Can not acquire ife csid rdi resource");
- goto err;
+ goto end;
}
- cid_res->hw_res[1] = csid_acquire.node_res;
+ cid_res_temp->hw_res[1] = csid_acquire.node_res;
+ CAM_DBG(CAM_ISP, "CID right acquired success is_dual %d",
+ in_port->usage_type);
}
- cid_res->parent = &ife_ctx->res_list_ife_in;
+ cid_res_temp->parent = &ife_ctx->res_list_ife_in;
ife_ctx->res_list_ife_in.child[
- ife_ctx->res_list_ife_in.num_children++] = cid_res;
+ ife_ctx->res_list_ife_in.num_children++] = cid_res_temp;
return 0;
-err:
+put_res:
+ cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, cid_res);
+end:
return rc;
}
@@ -966,35 +1034,25 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_ipp(
struct cam_ife_hw_mgr_res *csid_res;
struct cam_ife_hw_mgr_res *cid_res;
struct cam_hw_intf *hw_intf;
- uint32_t cid_res_id;
struct cam_csid_hw_reserve_resource_args csid_acquire;
+ ife_hw_mgr = ife_ctx->hw_mgr;
/* get cid resource */
- rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id,
+ rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res,
CAM_IFE_PIX_PATH_RES_IPP);
if (rc) {
CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
- goto err;
+ goto end;
}
- ife_hw_mgr = ife_ctx->hw_mgr;
-
rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res);
if (rc) {
CAM_ERR(CAM_ISP, "No more free hw mgr resource");
- goto err;
+ goto end;
}
- cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
-
- csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
- csid_acquire.res_id = CAM_IFE_PIX_PATH_RES_IPP;
- csid_acquire.cid = cid_res_id;
- csid_acquire.in_port = in_port;
- csid_acquire.out_port = in_port->data;
csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH;
csid_res->res_id = CAM_IFE_PIX_PATH_RES_IPP;
- csid_res->is_dual_vfe = in_port->usage_type;
if (in_port->usage_type)
csid_res->is_dual_vfe = 1;
@@ -1003,66 +1061,60 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_ipp(
csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
}
- list_for_each_entry(cid_res, &ife_ctx->res_list_ife_cid,
- list) {
- if (cid_res->res_id != cid_res_id)
- continue;
-
- for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
- if (!cid_res->hw_res[i])
- continue;
+ /* IPP resource needs to be from same HW as CID resource */
+ for (i = 0; i <= csid_res->is_dual_vfe; i++) {
+ CAM_DBG(CAM_ISP, "i %d is_dual %d", i, csid_res->is_dual_vfe);
+ csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+ csid_acquire.res_id = CAM_IFE_PIX_PATH_RES_IPP;
+ csid_acquire.cid = cid_res->hw_res[i]->res_id;
+ csid_acquire.in_port = in_port;
+ csid_acquire.out_port = in_port->data;
+ csid_acquire.node_res = NULL;
- hw_intf = ife_hw_mgr->csid_devices[
- cid_res->hw_res[i]->hw_intf->hw_idx];
+ hw_intf = cid_res->hw_res[i]->hw_intf;
- csid_acquire.node_res = NULL;
- if (csid_res->is_dual_vfe) {
- if (i == CAM_ISP_HW_SPLIT_LEFT) {
- master_idx = hw_intf->hw_idx;
- csid_acquire.sync_mode =
- CAM_ISP_HW_SYNC_MASTER;
- } else {
- if (master_idx == -1) {
- CAM_ERR(CAM_ISP,
- "No Master found");
- goto err;
- }
- csid_acquire.sync_mode =
- CAM_ISP_HW_SYNC_SLAVE;
- csid_acquire.master_idx = master_idx;
+ if (csid_res->is_dual_vfe) {
+ if (i == CAM_ISP_HW_SPLIT_LEFT) {
+ master_idx = hw_intf->hw_idx;
+ csid_acquire.sync_mode =
+ CAM_ISP_HW_SYNC_MASTER;
+ } else {
+ if (master_idx == -1) {
+ CAM_ERR(CAM_ISP,
+ "No Master found");
+ goto put_res;
}
+ csid_acquire.sync_mode =
+ CAM_ISP_HW_SYNC_SLAVE;
+ csid_acquire.master_idx = master_idx;
}
-
- rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
- &csid_acquire, sizeof(csid_acquire));
- if (rc) {
- CAM_ERR(CAM_ISP,
- "Cannot acquire ife csid ipp resource");
- goto err;
- }
-
- csid_res->hw_res[i] = csid_acquire.node_res;
- CAM_DBG(CAM_ISP,
- "acquired csid(%s)=%d ipp rsrc successfully",
- (i == 0) ? "left" : "right",
- hw_intf->hw_idx);
-
}
- if (i == CAM_IFE_CSID_HW_NUM_MAX) {
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+ &csid_acquire, sizeof(csid_acquire));
+ if (rc) {
CAM_ERR(CAM_ISP,
- "Can not acquire ife csid ipp resource");
- goto err;
+ "Cannot acquire ife csid ipp resource");
+ goto put_res;
}
- csid_res->parent = cid_res;
- cid_res->child[cid_res->num_children++] = csid_res;
+ csid_res->hw_res[i] = csid_acquire.node_res;
+ CAM_DBG(CAM_ISP,
+ "acquired csid(%s)=%d ipp rsrc successfully",
+ (i == 0) ? "left" : "right",
+ hw_intf->hw_idx);
}
+ cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
+
+ csid_res->parent = cid_res;
+ cid_res->child[cid_res->num_children++] = csid_res;
CAM_DBG(CAM_ISP, "acquire res %d", csid_acquire.res_id);
return 0;
-err:
+put_res:
+ cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &csid_res);
+end:
return rc;
}
@@ -1099,111 +1151,88 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi(
struct cam_ife_hw_mgr_ctx *ife_ctx,
struct cam_isp_in_port_info *in_port)
{
- int rc = -1;
- int i, j;
+ int rc = -EINVAL;
+ int i;
struct cam_ife_hw_mgr *ife_hw_mgr;
struct cam_ife_hw_mgr_res *csid_res;
struct cam_ife_hw_mgr_res *cid_res;
struct cam_hw_intf *hw_intf;
struct cam_isp_out_port_info *out_port;
- uint32_t cid_res_id;
struct cam_csid_hw_reserve_resource_args csid_acquire;
+ enum cam_ife_pix_path_res_id path_type;
ife_hw_mgr = ife_ctx->hw_mgr;
for (i = 0; i < in_port->num_out_res; i++) {
out_port = &in_port->data[i];
- if (!cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
+ path_type = cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
+ out_port->res_type);
+ if (path_type == CAM_IFE_PIX_PATH_RES_MAX)
continue;
- /* get cid resource */
- rc = cam_ife_mgr_acquire_cid_res(ife_ctx,
- in_port, &cid_res_id,
- cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
- out_port->res_type));
- if (rc) {
- CAM_ERR(CAM_ISP,
- "Acquire IFE CID resource Failed");
- goto err;
+ /* get cid resource */
+ rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res,
+ path_type);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
+ goto end;
}
+ /* For each RDI we need CID + PATH resource */
rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
&csid_res);
if (rc) {
CAM_ERR(CAM_ISP, "No more free hw mgr resource");
- goto err;
+ goto end;
}
- cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
-
- /*
- * no need to check since we are doing one to one mapping
- * between the csid rdi type and out port rdi type
- */
memset(&csid_acquire, 0, sizeof(csid_acquire));
- csid_acquire.res_id =
- cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
- out_port->res_type);
+ csid_acquire.res_id = path_type;
csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
- csid_acquire.cid = cid_res_id;
+ csid_acquire.cid = cid_res->hw_res[0]->res_id;
csid_acquire.in_port = in_port;
csid_acquire.out_port = out_port;
csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
+ csid_acquire.node_res = NULL;
- list_for_each_entry(cid_res, &ife_ctx->res_list_ife_cid,
- list) {
- if (cid_res->res_id != cid_res_id)
- continue;
-
- for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
- if (!cid_res->hw_res[j])
- continue;
-
- csid_acquire.node_res = NULL;
-
- hw_intf = ife_hw_mgr->csid_devices[
- cid_res->hw_res[j]->hw_intf->hw_idx];
- rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
- &csid_acquire, sizeof(csid_acquire));
- if (rc) {
- CAM_DBG(CAM_ISP,
- "CSID Path reserve failed hw=%d rc=%d",
- hw_intf->hw_idx, rc);
- continue;
- }
+ hw_intf = cid_res->hw_res[0]->hw_intf;
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+ &csid_acquire, sizeof(csid_acquire));
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "CSID Path reserve failed hw=%d rc=%d cid=%d",
+ hw_intf->hw_idx, rc,
+ cid_res->hw_res[0]->res_id);
- /* RDI does not need Dual ISP. Break */
- break;
- }
+ goto put_res;
+ }
- if (j == CAM_ISP_HW_SPLIT_MAX &&
- csid_acquire.node_res == NULL) {
- CAM_ERR(CAM_ISP,
- "acquire csid rdi rsrc failed, cid %d",
- cid_res_id);
- goto err;
- }
+ if (csid_acquire.node_res == NULL) {
+ CAM_ERR(CAM_ISP, "Acquire CSID RDI rsrc failed");
- csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH;
- csid_res->res_id = csid_acquire.res_id;
- csid_res->is_dual_vfe = 0;
- csid_res->hw_res[0] = csid_acquire.node_res;
- csid_res->hw_res[1] = NULL;
- CAM_DBG(CAM_ISP, "acquire res %d",
- csid_acquire.res_id);
- csid_res->parent = cid_res;
- cid_res->child[cid_res->num_children++] =
- csid_res;
-
- /* Done with cid_res_id. Break */
- break;
+ goto put_res;
}
+
+ csid_res->res_type = (enum cam_ife_hw_mgr_res_type)
+ CAM_ISP_RESOURCE_PIX_PATH;
+ csid_res->res_id = csid_acquire.res_id;
+ csid_res->is_dual_vfe = 0;
+ csid_res->hw_res[0] = csid_acquire.node_res;
+ csid_res->hw_res[1] = NULL;
+ CAM_DBG(CAM_ISP, "acquire res %d",
+ csid_acquire.res_id);
+ csid_res->parent = cid_res;
+ cid_res->child[cid_res->num_children++] =
+ csid_res;
+ cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
}
return 0;
-err:
+put_res:
+ cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &csid_res);
+end:
return rc;
}
@@ -1870,33 +1899,39 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
*/
if (i == ctx->num_base)
master_base_idx = ctx->base[0].idx;
+ CAM_DBG(CAM_ISP, "Stopping master CSID idx %d", master_base_idx);
- /* Stop the master CIDs first */
- cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
- master_base_idx, csid_halt_type);
+ /* Stop the master CSID path first */
+ cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+ master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
- /* stop rest of the CIDs */
+ /* stop rest of the CSID paths */
for (i = 0; i < ctx->num_base; i++) {
- if (i == master_base_idx)
+ if (ctx->base[i].idx == master_base_idx)
continue;
- cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
- ctx->base[i].idx, csid_halt_type);
+ CAM_DBG(CAM_ISP, "Stopping CSID idx %d i %d master %d",
+ ctx->base[i].idx, i, master_base_idx);
+
+ cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+ ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
}
- /* Stop the master CSID path first */
- cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+ CAM_DBG(CAM_ISP, "Stopping master CID idx %d", master_base_idx);
+
+ /* Stop the master CIDs first */
+ cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
master_base_idx, csid_halt_type);
- /* stop rest of the CSID paths */
+ /* stop rest of the CIDs */
for (i = 0; i < ctx->num_base; i++) {
- if (i == master_base_idx)
+ if (ctx->base[i].idx == master_base_idx)
continue;
-
- cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+ CAM_DBG(CAM_ISP, "Stopping CID idx %d i %d master %d",
+ ctx->base[i].idx, i, master_base_idx);
+ cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
ctx->base[i].idx, csid_halt_type);
}
-
/* Deinit IFE CID */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CID\n", __func__);
@@ -4218,7 +4253,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
/* Create Worker for ife_hw_mgr with 10 tasks */
rc = cam_req_mgr_workq_create("cam_ife_worker", 10,
- &g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ);
+ &g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ, 0);
if (rc < 0) {
CAM_ERR(CAM_ISP, "Unable to create worker");
goto end;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index e869e2bdc918..1444911b7a0d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -70,6 +70,12 @@ int cam_isp_add_change_base(
hw_entry[num_ent].handle = kmd_buf_info->handle;
hw_entry[num_ent].len = get_base.cmd.used_bytes;
hw_entry[num_ent].offset = kmd_buf_info->offset;
+ CAM_DBG(CAM_ISP,
+ "num_ent=%d handle=0x%x, len=%u, offset=%u",
+ num_ent,
+ hw_entry[num_ent].handle,
+ hw_entry[num_ent].len,
+ hw_entry[num_ent].offset);
kmd_buf_info->used_bytes += get_base.cmd.used_bytes;
kmd_buf_info->offset += get_base.cmd.used_bytes;
@@ -184,6 +190,16 @@ int cam_isp_add_cmd_buf_update(
return -EINVAL;
}
+ cmd_update.cmd_type = hw_cmd_type;
+ cmd_update.cmd.cmd_buf_addr = cmd_buf_addr;
+ cmd_update.cmd.size = kmd_buf_remain_size;
+ cmd_update.cmd.used_bytes = 0;
+ cmd_update.data = cmd_update_data;
+ CAM_DBG(CAM_ISP, "cmd_type %u cmd buffer 0x%pK, size %d",
+ cmd_update.cmd_type,
+ cmd_update.cmd.cmd_buf_addr,
+ cmd_update.cmd.size);
+
for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
if (!hw_mgr_res->hw_res[i])
continue;
@@ -193,14 +209,7 @@ int cam_isp_add_cmd_buf_update(
res = hw_mgr_res->hw_res[i];
cmd_update.res = res;
- cmd_update.cmd_type = hw_cmd_type;
- cmd_update.cmd.cmd_buf_addr = cmd_buf_addr;
- cmd_update.cmd.size = kmd_buf_remain_size;
- cmd_update.data = cmd_update_data;
-
- CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
- cmd_update.cmd.cmd_buf_addr,
- cmd_update.cmd.size);
+
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
cmd_update.cmd_type, &cmd_update,
@@ -280,6 +289,12 @@ int cam_isp_add_command_buffers(
hw_entry[num_ent].handle =
cmd_desc[i].mem_handle;
hw_entry[num_ent].offset = cmd_desc[i].offset;
+ CAM_DBG(CAM_ISP,
+ "Meta_Left num_ent=%d handle=0x%x, len=%u, offset=%u",
+ num_ent,
+ hw_entry[num_ent].handle,
+ hw_entry[num_ent].len,
+ hw_entry[num_ent].offset);
if (cmd_meta_data ==
CAM_ISP_PACKET_META_DMI_LEFT)
@@ -295,6 +310,12 @@ int cam_isp_add_command_buffers(
hw_entry[num_ent].handle =
cmd_desc[i].mem_handle;
hw_entry[num_ent].offset = cmd_desc[i].offset;
+ CAM_DBG(CAM_ISP,
+ "Meta_Right num_ent=%d handle=0x%x, len=%u, offset=%u",
+ num_ent,
+ hw_entry[num_ent].handle,
+ hw_entry[num_ent].len,
+ hw_entry[num_ent].offset);
if (cmd_meta_data ==
CAM_ISP_PACKET_META_DMI_RIGHT)
@@ -308,7 +329,12 @@ int cam_isp_add_command_buffers(
hw_entry[num_ent].handle =
cmd_desc[i].mem_handle;
hw_entry[num_ent].offset = cmd_desc[i].offset;
-
+ CAM_DBG(CAM_ISP,
+ "Meta_Common num_ent=%d handle=0x%x, len=%u, offset=%u",
+ num_ent,
+ hw_entry[num_ent].handle,
+ hw_entry[num_ent].len,
+ hw_entry[num_ent].offset);
if (cmd_meta_data == CAM_ISP_PACKET_META_DMI_COMMON)
hw_entry[num_ent].flags = 0x1;
@@ -647,6 +673,12 @@ int cam_isp_add_io_buffers(
prepare->hw_update_entries[num_ent].len = io_cfg_used_bytes;
prepare->hw_update_entries[num_ent].offset =
kmd_buf_info->offset;
+ CAM_DBG(CAM_ISP,
+ "num_ent=%d handle=0x%x, len=%u, offset=%u",
+ num_ent,
+ prepare->hw_update_entries[num_ent].handle,
+ prepare->hw_update_entries[num_ent].len,
+ prepare->hw_update_entries[num_ent].offset);
num_ent++;
kmd_buf_info->used_bytes += io_cfg_used_bytes;
@@ -741,6 +773,12 @@ int cam_isp_add_reg_update(
prepare->hw_update_entries[num_ent].len = reg_update_size;
prepare->hw_update_entries[num_ent].offset =
kmd_buf_info->offset;
+ CAM_DBG(CAM_ISP,
+ "num_ent=%d handle=0x%x, len=%u, offset=%u",
+ num_ent,
+ prepare->hw_update_entries[num_ent].handle,
+ prepare->hw_update_entries[num_ent].len,
+ prepare->hw_update_entries[num_ent].offset);
num_ent++;
kmd_buf_info->used_bytes += reg_update_size;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 3edae4a53d28..29b9e11a9365 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -20,6 +20,7 @@
#include "cam_soc_util.h"
#include "cam_io_util.h"
#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
/* Timeout value in msec */
#define IFE_CSID_TIMEOUT 1000
@@ -286,67 +287,49 @@ static int cam_ife_csid_get_format_ipp(
}
static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
- struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
- uint32_t res_type)
+ struct cam_isp_resource_node **res, int32_t vc, uint32_t dt)
{
- int rc = 0;
struct cam_ife_csid_cid_data *cid_data;
- uint32_t i = 0, j = 0;
+ uint32_t i = 0;
+ *res = NULL;
+
+ /* Return already reserved CID if the VC/DT matches */
for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
if (csid_hw->cid_res[i].res_state >=
CAM_ISP_RESOURCE_STATE_RESERVED) {
cid_data = (struct cam_ife_csid_cid_data *)
csid_hw->cid_res[i].res_priv;
- if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
- if (cid_data->tpg_set) {
- cid_data->cnt++;
- *res = &csid_hw->cid_res[i];
- break;
- }
- } else {
- if (cid_data->vc == vc && cid_data->dt == dt) {
- cid_data->cnt++;
- *res = &csid_hw->cid_res[i];
- break;
- }
+ if (cid_data->vc == vc && cid_data->dt == dt) {
+ cid_data->cnt++;
+ *res = &csid_hw->cid_res[i];
+ return 0;
}
}
}
- if (i == CAM_IFE_CSID_CID_RES_MAX) {
- if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
- CAM_ERR(CAM_ISP, "CSID:%d TPG CID not available",
- csid_hw->hw_intf->hw_idx);
- rc = -EINVAL;
- }
-
- for (j = 0; j < CAM_IFE_CSID_CID_RES_MAX; j++) {
- if (csid_hw->cid_res[j].res_state ==
- CAM_ISP_RESOURCE_STATE_AVAILABLE) {
- cid_data = (struct cam_ife_csid_cid_data *)
- csid_hw->cid_res[j].res_priv;
- cid_data->vc = vc;
- cid_data->dt = dt;
- cid_data->cnt = 1;
- csid_hw->cid_res[j].res_state =
- CAM_ISP_RESOURCE_STATE_RESERVED;
- *res = &csid_hw->cid_res[j];
- CAM_DBG(CAM_ISP, "CSID:%d CID %d allocated",
- csid_hw->hw_intf->hw_idx,
- csid_hw->cid_res[j].res_id);
- break;
- }
- }
-
- if (j == CAM_IFE_CSID_CID_RES_MAX) {
- CAM_ERR(CAM_ISP, "CSID:%d Free cid is not available",
- csid_hw->hw_intf->hw_idx);
- rc = -EINVAL;
+ for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+ if (csid_hw->cid_res[i].res_state ==
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ cid_data = (struct cam_ife_csid_cid_data *)
+ csid_hw->cid_res[i].res_priv;
+ cid_data->vc = vc;
+ cid_data->dt = dt;
+ cid_data->cnt = 1;
+ csid_hw->cid_res[i].res_state =
+ CAM_ISP_RESOURCE_STATE_RESERVED;
+ *res = &csid_hw->cid_res[i];
+ CAM_DBG(CAM_ISP, "CSID:%d CID %d allocated",
+ csid_hw->hw_intf->hw_idx,
+ csid_hw->cid_res[i].res_id);
+ return 0;
}
}
- return rc;
+ CAM_ERR(CAM_ISP, "CSID:%d Free cid is not available",
+ csid_hw->hw_intf->hw_idx);
+
+ return -EINVAL;
}
@@ -547,6 +530,7 @@ static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
{
int rc = 0;
struct cam_ife_csid_cid_data *cid_data;
+ uint32_t camera_hw_version;
CAM_DBG(CAM_ISP,
"CSID:%d res_sel:0x%x Lane type:%d lane_num:%d dt:%d vc:%d",
@@ -614,12 +598,40 @@ static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
goto end;
}
- if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_PHY_3 &&
- csid_hw->hw_intf->hw_idx != 2) {
+ if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
+ CAM_ERR(CAM_ISP,
+ "CSID%d reserve cnt reached max",
+ csid_hw->hw_intf->hw_idx);
rc = -EINVAL;
goto end;
}
+ rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Failed to get HW version rc:%d", rc);
+ goto end;
+ }
+ CAM_DBG(CAM_ISP, "HW version: %d", camera_hw_version);
+
+ switch (camera_hw_version) {
+ case CAM_CPAS_TITAN_NONE:
+ case CAM_CPAS_TITAN_MAX:
+ CAM_ERR(CAM_ISP, "Invalid HW version: %d", camera_hw_version);
+ break;
+ case CAM_CPAS_TITAN_170_V100:
+ case CAM_CPAS_TITAN_170_V110:
+ case CAM_CPAS_TITAN_170_V120:
+ if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_PHY_3 &&
+ csid_hw->hw_intf->hw_idx != 2) {
+ rc = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ break;
+ }
+ CAM_DBG(CAM_ISP, "Reserve_cnt %u", csid_hw->csi2_reserve_cnt);
+
if (csid_hw->csi2_reserve_cnt) {
/* current configure res type should match requested res type */
if (csid_hw->res_type != cid_reserv->in_port->res_type) {
@@ -652,12 +664,53 @@ static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
}
}
+ switch (cid_reserv->res_id) {
+ case CAM_IFE_PIX_PATH_RES_IPP:
+ if (csid_hw->ipp_res.res_state !=
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_DBG(CAM_ISP,
+ "CSID:%d IPP resource not available",
+ csid_hw->hw_intf->hw_idx);
+ rc = -EINVAL;
+ goto end;
+ }
+ break;
+ case CAM_IFE_PIX_PATH_RES_RDI_0:
+ case CAM_IFE_PIX_PATH_RES_RDI_1:
+ case CAM_IFE_PIX_PATH_RES_RDI_2:
+ case CAM_IFE_PIX_PATH_RES_RDI_3:
+ if (csid_hw->rdi_res[cid_reserv->res_id].res_state !=
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_ERR(CAM_ISP,
+ "CSID:%d RDI:%d resource not available",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->res_id);
+ rc = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "CSID%d: Invalid csid path",
+ csid_hw->hw_intf->hw_idx);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = cam_ife_csid_cid_get(csid_hw,
+ &cid_reserv->node_res,
+ cid_reserv->in_port->vc,
+ cid_reserv->in_port->dt);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CSID:%d CID Reserve failed res_type %d",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->in_port->res_type);
+ goto end;
+ }
+ cid_data = (struct cam_ife_csid_cid_data *)
+ cid_reserv->node_res->res_priv;
+
if (!csid_hw->csi2_reserve_cnt) {
csid_hw->res_type = cid_reserv->in_port->res_type;
- /* Take the first CID resource*/
- csid_hw->cid_res[0].res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
- cid_data = (struct cam_ife_csid_cid_data *)
- csid_hw->cid_res[0].res_priv;
csid_hw->csi2_rx_cfg.lane_cfg =
cid_reserv->in_port->lane_cfg;
@@ -699,71 +752,13 @@ static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
csid_hw->csi2_rx_cfg.phy_sel =
(cid_reserv->in_port->res_type & 0xFF) - 1;
}
-
- cid_data->vc = cid_reserv->in_port->vc;
- cid_data->dt = cid_reserv->in_port->dt;
- cid_data->cnt = 1;
- cid_reserv->node_res = &csid_hw->cid_res[0];
- csid_hw->csi2_reserve_cnt++;
-
- CAM_DBG(CAM_ISP,
- "CSID:%d CID :%d resource acquired successfully",
- csid_hw->hw_intf->hw_idx,
- cid_reserv->node_res->res_id);
- } else {
- switch (cid_reserv->res_id) {
- case CAM_IFE_PIX_PATH_RES_IPP:
- if (csid_hw->ipp_res.res_state !=
- CAM_ISP_RESOURCE_STATE_AVAILABLE) {
- CAM_DBG(CAM_ISP,
- "CSID:%d IPP resource not available",
- csid_hw->hw_intf->hw_idx);
- rc = -EINVAL;
- goto end;
- }
- break;
- case CAM_IFE_PIX_PATH_RES_RDI_0:
- case CAM_IFE_PIX_PATH_RES_RDI_1:
- case CAM_IFE_PIX_PATH_RES_RDI_2:
- case CAM_IFE_PIX_PATH_RES_RDI_3:
- if (csid_hw->rdi_res[cid_reserv->res_id].res_state !=
- CAM_ISP_RESOURCE_STATE_AVAILABLE) {
- CAM_DBG(CAM_ISP,
- "CSID:%d RDI:%d resource not available",
- csid_hw->hw_intf->hw_idx,
- cid_reserv->res_id);
- rc = -EINVAL;
- goto end;
- }
- break;
- default:
- CAM_ERR(CAM_ISP, "CSID%d: Invalid csid path",
- csid_hw->hw_intf->hw_idx);
- rc = -EINVAL;
- goto end;
- }
-
- rc = cam_ife_csid_cid_get(csid_hw,
- &cid_reserv->node_res,
- cid_reserv->in_port->vc,
- cid_reserv->in_port->dt,
- cid_reserv->in_port->res_type);
- /* if success then increment the reserve count */
- if (!rc) {
- if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
- CAM_ERR(CAM_ISP,
- "CSID%d reserve cnt reached max",
- csid_hw->hw_intf->hw_idx);
- rc = -EINVAL;
- } else {
- csid_hw->csi2_reserve_cnt++;
- CAM_DBG(CAM_ISP, "CSID:%d CID:%d acquired",
- csid_hw->hw_intf->hw_idx,
- cid_reserv->node_res->res_id);
- }
- }
}
+ csid_hw->csi2_reserve_cnt++;
+ CAM_DBG(CAM_ISP, "CSID:%d CID:%d acquired",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->node_res->res_id);
+
end:
return rc;
}
@@ -2415,12 +2410,24 @@ static int cam_ife_csid_stop(void *hw_priv,
return -EINVAL;
}
csid_stop = (struct cam_csid_hw_stop_args *) stop_args;
+
+ if (!csid_stop->num_res) {
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
+ return -EINVAL;
+ }
+
csid_hw_info = (struct cam_hw_info *)hw_priv;
csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
+ CAM_DBG(CAM_ISP, "CSID:%d num_res %d",
+ csid_hw->hw_intf->hw_idx,
+ csid_stop->num_res);
/* Stop the resource first */
for (i = 0; i < csid_stop->num_res; i++) {
res = csid_stop->node_res[i];
+ CAM_DBG(CAM_ISP, "CSID:%d res_type %d res_id %d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_type, res->res_id);
switch (res->res_type) {
case CAM_ISP_RESOURCE_CID:
if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
@@ -2777,6 +2784,7 @@ int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
{
int rc = -EINVAL;
uint32_t i;
+ uint32_t num_paths;
struct cam_ife_csid_path_cfg *path_data;
struct cam_ife_csid_cid_data *cid_data;
struct cam_hw_info *csid_hw_info;
@@ -2828,8 +2836,10 @@ int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
ife_csid_hw->hw_intf->hw_ops.write = cam_ife_csid_write;
ife_csid_hw->hw_intf->hw_ops.process_cmd = cam_ife_csid_process_cmd;
- /*Initialize the CID resoure */
- for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+ num_paths = ife_csid_hw->csid_info->csid_reg->cmn_reg->no_pix +
+ ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
+ /* Initialize the CID resource */
+ for (i = 0; i < num_paths; i++) {
ife_csid_hw->cid_res[i].res_type = CAM_ISP_RESOURCE_CID;
ife_csid_hw->cid_res[i].res_id = i;
ife_csid_hw->cid_res[i].res_state =
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index 97d076a78c04..fdeee545bc6c 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -1171,7 +1171,7 @@ static int cam_jpeg_setup_workqs(void)
"jpeg_command_queue",
CAM_JPEG_WORKQ_NUM_TASK,
&g_jpeg_hw_mgr.work_process_frame,
- CRM_WORKQ_USAGE_NON_IRQ);
+ CRM_WORKQ_USAGE_NON_IRQ, 0);
if (rc) {
CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc);
goto work_process_frame_failed;
@@ -1181,7 +1181,7 @@ static int cam_jpeg_setup_workqs(void)
"jpeg_message_queue",
CAM_JPEG_WORKQ_NUM_TASK,
&g_jpeg_hw_mgr.work_process_irq_cb,
- CRM_WORKQ_USAGE_IRQ);
+ CRM_WORKQ_USAGE_IRQ, 0);
if (rc) {
CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc);
goto work_process_irq_cb_failed;
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
index a60661e04cd3..0f34c9f15b56 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -989,7 +989,8 @@ int cam_lrme_mgr_register_device(
CAM_DBG(CAM_LRME, "Create submit workq for %s", buf);
rc = cam_req_mgr_workq_create(buf,
CAM_LRME_WORKQ_NUM_TASK,
- &hw_device->work, CRM_WORKQ_USAGE_NON_IRQ);
+ &hw_device->work, CRM_WORKQ_USAGE_NON_IRQ,
+ 0);
if (rc) {
CAM_ERR(CAM_LRME,
"Unable to create a worker, rc=%d", rc);
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
index da42c84f3835..ec392f5d0752 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,8 +28,6 @@
#include "cam_mem_mgr_api.h"
#include "cam_smmu_api.h"
-#define CAM_LRME_HW_WORKQ_NUM_TASK 30
-
static int cam_lrme_hw_dev_util_cdm_acquire(struct cam_lrme_core *lrme_core,
struct cam_hw_info *lrme_hw)
{
@@ -122,7 +120,7 @@ static int cam_lrme_hw_dev_probe(struct platform_device *pdev)
rc = cam_req_mgr_workq_create("cam_lrme_hw_worker",
CAM_LRME_HW_WORKQ_NUM_TASK,
- &lrme_core->work, CRM_WORKQ_USAGE_IRQ);
+ &lrme_core->work, CRM_WORKQ_USAGE_IRQ, 0);
if (rc) {
CAM_ERR(CAM_LRME, "Unable to create a workq, rc=%d", rc);
goto free_memory;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 4602d6c135a5..060aaf2c3268 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -2324,6 +2324,7 @@ end:
int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
{
int rc = 0;
+ int wq_flag = 0;
char buf[128];
struct cam_create_dev_hdl root_dev;
struct cam_req_mgr_core_session *cam_session;
@@ -2394,8 +2395,9 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
/* Create worker for current link */
snprintf(buf, sizeof(buf), "%x-%x",
link_info->session_hdl, link->link_hdl);
+ wq_flag = CAM_WORKQ_FLAG_HIGH_PRIORITY | CAM_WORKQ_FLAG_SERIAL;
rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
- &link->workq, CRM_WORKQ_USAGE_NON_IRQ);
+ &link->workq, CRM_WORKQ_USAGE_NON_IRQ, wq_flag);
if (rc < 0) {
CAM_ERR(CAM_CRM, "FATAL: unable to create worker");
__cam_req_mgr_destroy_link_info(link);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index 066efd6fa3e7..3798ef8e6d5f 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -178,9 +178,10 @@ end:
}
int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
- struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq)
+ struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq,
+ int flags)
{
- int32_t i;
+ int32_t i, wq_flags = 0, max_active_tasks = 0;
struct crm_workq_task *task;
struct cam_req_mgr_core_workq *crm_workq = NULL;
char buf[128] = "crm_workq-";
@@ -192,10 +193,17 @@ int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
if (crm_workq == NULL)
return -ENOMEM;
+ wq_flags |= WQ_UNBOUND;
+ if (flags & CAM_WORKQ_FLAG_HIGH_PRIORITY)
+ wq_flags |= WQ_HIGHPRI;
+
+ if (flags & CAM_WORKQ_FLAG_SERIAL)
+ max_active_tasks = 1;
+
strlcat(buf, name, sizeof(buf));
CAM_DBG(CAM_CRM, "create workque crm_workq-%s", name);
crm_workq->job = alloc_workqueue(buf,
- WQ_HIGHPRI | WQ_UNBOUND, 0, NULL);
+ wq_flags, max_active_tasks, NULL);
if (!crm_workq->job) {
kfree(crm_workq);
return -ENOMEM;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
index eb3b804f97a9..af76ae467346 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,15 @@
#include "cam_req_mgr_core.h"
+/* Flag to create a high priority workq */
+#define CAM_WORKQ_FLAG_HIGH_PRIORITY (1 << 0)
+
+/* This flag ensures only one task from a given
+ * workq will execute at any given point on any
+ * given CPU.
+ */
+#define CAM_WORKQ_FLAG_SERIAL (1 << 1)
+
/* Task priorities, lower the number higher the priority*/
enum crm_task_priority {
CRM_TASK_PRIORITY_0,
@@ -101,11 +110,14 @@ struct cam_req_mgr_core_workq {
* @num_task : Num_tasks to be allocated for workq
* @workq : Double pointer worker
* @in_irq : Set to one if workq might be used in irq context
+ * @flags : Bitwise OR of Flags for workq behavior.
+ * e.g. CAM_REQ_MGR_WORKQ_HIGH_PRIORITY | CAM_REQ_MGR_WORKQ_SERIAL
* This function will allocate and create workqueue and pass
* the workq pointer to caller.
*/
int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
- struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq);
+ struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq,
+ int flags);
/**
* cam_req_mgr_workq_destroy()
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index a34d70c50a94..da8ff21cd06f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -53,6 +53,7 @@ int32_t cam_actuator_construct_default_power_setting(
free_power_settings:
kfree(power_info->power_setting);
+ power_info->power_setting = NULL;
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index 96fdfeb1b4ba..733c9e870637 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -252,6 +252,8 @@ static int32_t cam_actuator_platform_remove(struct platform_device *pdev)
a_ctrl->io_master_info.cci_client = NULL;
kfree(power_info->power_setting);
kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
kfree(a_ctrl->soc_info.soc_private);
kfree(a_ctrl->i2c_data.per_frame);
a_ctrl->i2c_data.per_frame = NULL;
@@ -284,6 +286,8 @@ static int32_t cam_actuator_driver_i2c_remove(struct i2c_client *client)
kfree(power_info->power_setting);
kfree(power_info->power_down_setting);
kfree(a_ctrl->soc_info.soc_private);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
a_ctrl->soc_info.soc_private = NULL;
kfree(a_ctrl);
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
index 9b748268a448..f0efd4cd41cf 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -191,25 +191,11 @@ static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl,
int cam_flash_off(struct cam_flash_ctrl *flash_ctrl)
{
- int i = 0;
-
if (!flash_ctrl) {
CAM_ERR(CAM_FLASH, "Flash control Null");
return -EINVAL;
}
- for (i = 0; i < flash_ctrl->flash_num_sources; i++)
- if (flash_ctrl->flash_trigger[i])
- cam_res_mgr_led_trigger_event(
- flash_ctrl->flash_trigger[i],
- LED_OFF);
-
- for (i = 0; i < flash_ctrl->torch_num_sources; i++)
- if (flash_ctrl->torch_trigger[i])
- cam_res_mgr_led_trigger_event(
- flash_ctrl->torch_trigger[i],
- LED_OFF);
-
if (flash_ctrl->switch_trigger)
cam_res_mgr_led_trigger_event(flash_ctrl->switch_trigger,
LED_SWITCH_OFF);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index dfcb9fcba4b7..6f77e0ecb93e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -55,6 +55,7 @@ int32_t cam_ois_construct_default_power_setting(
free_power_settings:
kfree(power_info->power_setting);
+ power_info->power_setting = NULL;
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
index d742acf7813d..5d16a4e54d04 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -240,6 +240,8 @@ static int cam_ois_i2c_driver_remove(struct i2c_client *client)
kfree(power_info->power_setting);
kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
kfree(o_ctrl->soc_info.soc_private);
kfree(o_ctrl);
@@ -341,6 +343,8 @@ static int cam_ois_platform_driver_remove(struct platform_device *pdev)
kfree(power_info->power_setting);
kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
kfree(o_ctrl->soc_info.soc_private);
kfree(o_ctrl->io_master_info.cci_client);
kfree(o_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index 2133932dde57..6fe051aa048c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -599,7 +599,8 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
} else {
CAM_ERR(CAM_SENSOR, "Invalid Command Type: %d",
cmd->handle_type);
- return -EINVAL;
+ rc = -EINVAL;
+ goto release_mutex;
}
pu = power_info->power_setting;
@@ -653,7 +654,7 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
}
CAM_INFO(CAM_SENSOR,
- "Probe Succees,slot:%d,slave_addr:0x%x,sensor_id:0x%x",
+ "Probe success,slot:%d,slave_addr:0x%x,sensor_id:0x%x",
s_ctrl->soc_info.index,
s_ctrl->sensordata->slave_info.sensor_slave_addr,
s_ctrl->sensordata->slave_info.sensor_id);
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 517b7dfe8128..55896f497c59 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -46,6 +46,7 @@ int cam_sync_create(int32_t *sync_obj, const char *name)
}
*sync_obj = idx;
+ CAM_DBG(CAM_SYNC, "sync_obj: %i", *sync_obj);
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return rc;
@@ -170,21 +171,24 @@ int cam_sync_signal(int32_t sync_obj, uint32_t status)
INIT_LIST_HEAD(&sync_list);
if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
- CAM_ERR(CAM_SYNC, "Error: Out of range sync obj");
+ CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
+ sync_obj, CAM_SYNC_MAX_OBJS);
return -EINVAL;
}
row = sync_dev->sync_table + sync_obj;
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
if (row->state == CAM_SYNC_STATE_INVALID) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
CAM_ERR(CAM_SYNC,
"Error: accessing an uninitialized sync obj = %d",
sync_obj);
return -EINVAL;
}
- spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
if (row->type == CAM_SYNC_TYPE_GROUP) {
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
- CAM_ERR(CAM_SYNC, "Error: Signaling a GROUP sync object = %d",
+ CAM_ERR(CAM_SYNC,
+ "Error: Signaling a GROUP sync object = %d",
sync_obj);
return -EINVAL;
}
@@ -368,6 +372,7 @@ int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
int cam_sync_destroy(int32_t sync_obj)
{
+ CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj);
return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index ed69829575bb..43bce513bff2 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -51,8 +51,9 @@ int cam_sync_init_object(struct sync_table_row *table,
init_completion(&row->signaled);
INIT_LIST_HEAD(&row->callback_list);
INIT_LIST_HEAD(&row->user_payload_list);
- CAM_DBG(CAM_SYNC, "Sync object Initialised: sync_id:%u row_state:%u ",
- row->sync_id, row->state);
+ CAM_DBG(CAM_SYNC,
+ "row name:%s sync_id:%i [idx:%u] row_state:%u ",
+ row->name, row->sync_id, idx, row->state);
return 0;
}
@@ -74,9 +75,11 @@ uint32_t cam_sync_util_get_group_object_state(struct sync_table_row *table,
* counts of error, active and success states of all children objects
*/
for (i = 0; i < num_objs; i++) {
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
child_row = table + sync_objs[i];
switch (child_row->state) {
case CAM_SYNC_STATE_SIGNALED_ERROR:
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
return CAM_SYNC_STATE_SIGNALED_ERROR;
case CAM_SYNC_STATE_SIGNALED_SUCCESS:
success_count++;
@@ -87,8 +90,10 @@ uint32_t cam_sync_util_get_group_object_state(struct sync_table_row *table,
default:
CAM_ERR(CAM_SYNC,
"Invalid state of child object during merge");
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
return CAM_SYNC_STATE_SIGNALED_ERROR;
}
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
}
if (active_count)
@@ -209,12 +214,16 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
return -EINVAL;
+ CAM_DBG(CAM_SYNC,
+ "row name:%s sync_id:%i [idx:%u] row_state:%u",
+ row->name, row->sync_id, idx, row->state);
+
spin_lock_bh(&sync_dev->row_spinlocks[idx]);
if (row->state == CAM_SYNC_STATE_INVALID) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
CAM_ERR(CAM_SYNC,
"Error: accessing an uninitialized sync obj: idx = %d",
idx);
- spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return -EINVAL;
}
row->state = CAM_SYNC_STATE_INVALID;
@@ -252,9 +261,9 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
if (child_row->state == CAM_SYNC_STATE_INVALID) {
+ list_del_init(&child_info->list);
spin_unlock_bh(&sync_dev->row_spinlocks[
child_info->sync_id]);
- list_del_init(&child_info->list);
kfree(child_info);
continue;
}
@@ -262,9 +271,8 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
cam_sync_util_cleanup_parents_list(child_row,
SYNC_LIST_CLEAN_ONE, idx);
- spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
-
list_del_init(&child_info->list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
kfree(child_info);
}
@@ -277,9 +285,9 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
if (parent_row->state == CAM_SYNC_STATE_INVALID) {
+ list_del_init(&parent_info->list);
spin_unlock_bh(&sync_dev->row_spinlocks[
parent_info->sync_id]);
- list_del_init(&parent_info->list);
kfree(parent_info);
continue;
}
@@ -287,9 +295,8 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
cam_sync_util_cleanup_children_list(parent_row,
SYNC_LIST_CLEAN_ONE, idx);
- spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
-
list_del_init(&parent_info->list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
kfree(parent_info);
}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index 30ab0754c47f..db2629d2a2f9 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -171,6 +171,24 @@ int cam_packet_util_process_patches(struct cam_packet *packet,
patch_desc[i].dst_buf_hdl, patch_desc[i].dst_offset,
patch_desc[i].src_buf_hdl, patch_desc[i].src_offset);
+ if (patch_desc[i].src_offset >= src_buf_size) {
+ CAM_ERR_RATE_LIMIT(CAM_UTIL,
+ "Inval src offset:0x%x src len:0x%x reqid:%lld",
+ patch_desc[i].src_offset,
+ (unsigned int)src_buf_size,
+ packet->header.request_id);
+ return -EINVAL;
+ }
+
+ if (patch_desc[i].dst_offset >= dst_buf_len) {
+ CAM_ERR_RATE_LIMIT(CAM_UTIL,
+ "Inval dst offset:0x%x dst len:0x%x reqid:%lld",
+ patch_desc[i].dst_offset,
+ (unsigned int)dst_buf_len,
+ packet->header.request_id);
+ return -EINVAL;
+ }
+
dst_cpu_addr = (uint32_t *)((uint8_t *)dst_cpu_addr +
patch_desc[i].dst_offset);
temp += patch_desc[i].src_offset;
diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c
index e58fa2e466c9..696c043d8de4 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.c
+++ b/drivers/soc/qcom/memshare/msm_memshare.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -270,6 +270,9 @@ static int mem_share_do_ramdump(void)
{
int i = 0, ret;
char *client_name = NULL;
+ u32 source_vmlist[1] = {VMID_MSS_MSA};
+ int dest_vmids[1] = {VMID_HLOS};
+ int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
for (i = 0; i < num_clients; i++) {
@@ -296,6 +299,33 @@ static int mem_share_do_ramdump(void)
continue;
}
+ if (memblock[i].hyp_mapping &&
+ memblock[i].peripheral ==
+ DHMS_MEM_PROC_MPSS_V01) {
+ pr_debug("memshare: hypervisor unmapping for client id: %d\n",
+ memblock[i].client_id);
+ if (memblock[i].alloc_request)
+ continue;
+ ret = hyp_assign_phys(
+ memblock[i].phy_addr,
+ memblock[i].size,
+ source_vmlist,
+ 1, dest_vmids,
+ dest_perms, 1);
+ if (ret) {
+ /*
+ * This is an error case as hyp
+ * mapping was successful
+ * earlier but during unmap
+ * it lead to failure.
+ */
+ pr_err("memshare: %s, failed to map the region to APPS\n",
+ __func__);
+ } else {
+ memblock[i].hyp_mapping = 0;
+ }
+ }
+
ramdump_segments_tmp = kcalloc(1,
sizeof(struct ramdump_segment),
GFP_KERNEL);
@@ -322,9 +352,8 @@ static int mem_share_do_ramdump(void)
static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
void *_cmd)
{
- int i;
- int ret;
- u32 source_vmlist[2] = {VMID_HLOS, VMID_MSS_MSA};
+ int i, ret, size = 0;
+ u32 source_vmlist[1] = {VMID_MSS_MSA};
int dest_vmids[1] = {VMID_HLOS};
int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
struct notif_data *notifdata = NULL;
@@ -335,6 +364,8 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
case SUBSYS_BEFORE_SHUTDOWN:
bootup_request++;
+ for (i = 0; i < MAX_CLIENTS; i++)
+ memblock[i].alloc_request = 0;
break;
case SUBSYS_RAMDUMP_NOTIFICATION:
@@ -362,6 +393,7 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
case SUBSYS_AFTER_POWERUP:
pr_debug("memshare: Modem has booted up\n");
for (i = 0; i < MAX_CLIENTS; i++) {
+ size = memblock[i].size;
if (memblock[i].free_memory > 0 &&
bootup_request >= 2) {
memblock[i].free_memory -= 1;
@@ -370,18 +402,20 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
memblock[i].client_id);
}
- if (memblock[i].free_memory == 0) {
- if (memblock[i].peripheral ==
- DHMS_MEM_PROC_MPSS_V01 &&
- !memblock[i].guarantee &&
- memblock[i].allotted) {
- pr_debug("memshare: hypervisor unmapping for client id: %d\n",
- memblock[i].client_id);
+ if (memblock[i].free_memory == 0 &&
+ memblock[i].peripheral ==
+ DHMS_MEM_PROC_MPSS_V01 &&
+ !memblock[i].guarantee &&
+ memblock[i].allotted &&
+ !memblock[i].alloc_request) {
+ pr_debug("memshare: hypervisor unmapping for client id: %d\n",
+ memblock[i].client_id);
+ if (memblock[i].hyp_mapping) {
ret = hyp_assign_phys(
memblock[i].phy_addr,
memblock[i].size,
source_vmlist,
- 2, dest_vmids,
+ 1, dest_vmids,
dest_perms, 1);
if (ret &&
memblock[i].hyp_mapping == 1) {
@@ -393,17 +427,25 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
*/
pr_err("memshare: %s, failed to unmap the region\n",
__func__);
- memblock[i].hyp_mapping = 1;
} else {
memblock[i].hyp_mapping = 0;
}
- dma_free_attrs(memsh_drv->dev,
- memblock[i].size,
- memblock[i].virtual_addr,
- memblock[i].phy_addr,
- attrs);
- free_client(i);
}
+ if (memblock[i].client_id == 1) {
+ /*
+ * Check if the client id
+ * is of diag so that free
+ * the memory region of
+ * client's size + guard
+ * bytes of 4K.
+ */
+ size += MEMSHARE_GUARD_BYTES;
+ }
+ dma_free_attrs(memsh_drv->dev,
+ size, memblock[i].virtual_addr,
+ memblock[i].phy_addr,
+ attrs);
+ free_client(i);
}
}
bootup_request++;
@@ -425,9 +467,8 @@ static void shared_hyp_mapping(int client_id)
{
int ret;
u32 source_vmlist[1] = {VMID_HLOS};
- int dest_vmids[2] = {VMID_HLOS, VMID_MSS_MSA};
- int dest_perms[2] = {PERM_READ|PERM_WRITE,
- PERM_READ|PERM_WRITE};
+ int dest_vmids[1] = {VMID_MSS_MSA};
+ int dest_perms[1] = {PERM_READ|PERM_WRITE};
if (client_id == DHMS_MEM_CLIENT_INVALID) {
pr_err("memshare: %s, Invalid Client\n", __func__);
@@ -437,7 +478,7 @@ static void shared_hyp_mapping(int client_id)
ret = hyp_assign_phys(memblock[client_id].phy_addr,
memblock[client_id].size,
source_vmlist, 1, dest_vmids,
- dest_perms, 2);
+ dest_perms, 1);
if (ret != 0) {
pr_err("memshare: hyp_assign_phys failed size=%u err=%d\n",
@@ -546,6 +587,7 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h)
memblock[client_id].free_memory);
memblock[client_id].sequence_id = alloc_req->sequence_id;
+ memblock[client_id].alloc_request = 1;
fill_alloc_response(alloc_resp, client_id, &resp);
/*
@@ -603,9 +645,11 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h)
{
struct mem_free_generic_req_msg_v01 *free_req;
struct mem_free_generic_resp_msg_v01 free_resp;
- int rc;
- int flag = 0;
+ int rc, flag = 0, ret = 0, size = 0;
uint32_t client_id;
+ u32 source_vmlist[1] = {VMID_MSS_MSA};
+ int dest_vmids[1] = {VMID_HLOS};
+ int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
mutex_lock(&memsh_drv->mem_free);
free_req = (struct mem_free_generic_req_msg_v01 *)req;
@@ -624,7 +668,29 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h)
memblock[client_id].allotted) {
pr_debug("memshare: %s: size: %d",
__func__, memblock[client_id].size);
- dma_free_attrs(memsh_drv->dev, memblock[client_id].size,
+ ret = hyp_assign_phys(memblock[client_id].phy_addr,
+ memblock[client_id].size, source_vmlist, 1,
+ dest_vmids, dest_perms, 1);
+ if (ret && memblock[client_id].hyp_mapping == 1) {
+ /*
+ * This is an error case as hyp mapping was successful
+ * earlier but during unmap it lead to failure.
+ */
+ pr_err("memshare: %s, failed to unmap the region\n",
+ __func__);
+ }
+ size = memblock[client_id].size;
+ if (memblock[client_id].client_id == 1) {
+ /*
+ * Check if the client id
+ * is of diag so that free
+ * the memory region of
+ * client's size + guard
+ * bytes of 4K.
+ */
+ size += MEMSHARE_GUARD_BYTES;
+ }
+ dma_free_attrs(memsh_drv->dev, size,
memblock[client_id].virtual_addr,
memblock[client_id].phy_addr,
attrs);
diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h
index ca11137aeff1..6b546528404c 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.h
+++ b/drivers/soc/qcom/memshare/msm_memshare.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,8 @@ struct mem_blocks {
uint32_t guarantee;
/* Memory alloted or not */
uint32_t allotted;
+ /* Memory allocation request received or not */
+ uint32_t alloc_request;
/* Size required for client */
uint32_t size;
/*
diff --git a/include/uapi/media/cam_icp.h b/include/uapi/media/cam_icp.h
index cd2d2d297003..680d05b630a6 100644
--- a/include/uapi/media/cam_icp.h
+++ b/include/uapi/media/cam_icp.h
@@ -59,8 +59,9 @@
/* Command meta types */
#define CAM_ICP_CMD_META_GENERIC_BLOB 0x1
-/* Generic blon types */
+/* Generic blob types */
#define CAM_ICP_CMD_GENERIC_BLOB_CLK 0x1
+#define CAM_ICP_CMD_GENERIC_BLOB_CFG_IO 0x2
/**
* struct cam_icp_clk_bw_request
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 41481dc0d678..d397432023be 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1902,6 +1902,12 @@ static void __migrate_timers(unsigned int cpu, bool remove_pinned)
spin_lock_irqsave(&new_base->lock, flags);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ /*
+ * The current CPUs base clock might be stale. Update it
+ * before moving the timers over.
+ */
+ forward_timer_base(new_base);
+
if (!cpu_online(cpu))
BUG_ON(old_base->running_timer);